idx
int64
0
41.2k
question
stringlengths
73
5.81k
target
stringlengths
5
918
23,500
private static boolean isRetryable ( Throwable ex ) { ex = Exceptions . unwrap ( ex ) ; return ex instanceof WriteFailureException || ex instanceof DataLogNotAvailableException ; }
Determines whether the given exception can be retried .
23,501
private LogMetadata updateMetadata ( LogMetadata currentMetadata , LedgerHandle newLedger , boolean clearEmptyLedgers ) throws DurableDataLogException { boolean create = currentMetadata == null ; if ( create ) { currentMetadata = new LogMetadata ( newLedger . getId ( ) ) ; } else { currentMetadata = currentMetadata . addLedger ( newLedger . getId ( ) ) ; if ( clearEmptyLedgers ) { currentMetadata = currentMetadata . removeEmptyLedgers ( Ledgers . MIN_FENCE_LEDGER_COUNT ) ; } } try { persistMetadata ( currentMetadata , create ) ; } catch ( DurableDataLogException ex ) { try { Ledgers . delete ( newLedger . getId ( ) , this . bookKeeper ) ; } catch ( Exception deleteEx ) { log . warn ( "{}: Unable to delete newly created ledger {}." , this . traceObjectId , newLedger . getId ( ) , deleteEx ) ; ex . addSuppressed ( deleteEx ) ; } throw ex ; } log . info ( "{} Metadata updated ({})." , this . traceObjectId , currentMetadata ) ; return currentMetadata ; }
Updates the metadata and persists it as a result of adding a new Ledger .
23,502
private void persistMetadata ( LogMetadata metadata , boolean create ) throws DurableDataLogException { try { byte [ ] serializedMetadata = LogMetadata . SERIALIZER . serialize ( metadata ) . getCopy ( ) ; if ( create ) { this . zkClient . create ( ) . creatingParentsIfNeeded ( ) . forPath ( this . logNodePath , serializedMetadata ) ; metadata . withUpdateVersion ( 0 ) ; } else { this . zkClient . setData ( ) . withVersion ( metadata . getUpdateVersion ( ) ) . forPath ( this . logNodePath , serializedMetadata ) ; metadata . withUpdateVersion ( metadata . getUpdateVersion ( ) + 1 ) ; } } catch ( KeeperException . NodeExistsException | KeeperException . BadVersionException keeperEx ) { throw new DataLogWriterNotPrimaryException ( String . format ( "Unable to acquire exclusive write lock for log (path = '%s%s')." , this . zkClient . getNamespace ( ) , this . logNodePath ) , keeperEx ) ; } catch ( Exception generalEx ) { throw new DataLogInitializationException ( String . format ( "Unable to update ZNode for path '%s%s'." , this . zkClient . getNamespace ( ) , this . logNodePath ) , generalEx ) ; } log . info ( "{} Metadata persisted ({})." , this . traceObjectId , metadata ) ; }
Persists the given metadata into ZooKeeper .
23,503
@ GuardedBy ( "lock" ) private List < Long > getLedgerIdsToDelete ( LogMetadata oldMetadata , LogMetadata currentMetadata ) { if ( oldMetadata == null ) { return Collections . emptyList ( ) ; } val existingIds = currentMetadata . getLedgers ( ) . stream ( ) . map ( LedgerMetadata :: getLedgerId ) . collect ( Collectors . toSet ( ) ) ; return oldMetadata . getLedgers ( ) . stream ( ) . map ( LedgerMetadata :: getLedgerId ) . filter ( id -> ! existingIds . contains ( id ) ) . collect ( Collectors . toList ( ) ) ; }
Determines which Ledger Ids are safe to delete from BookKeeper .
23,504
public synchronized void setStorageLength ( long value ) { Exceptions . checkArgument ( value >= 0 , "value" , "Storage Length must be a non-negative number." ) ; Exceptions . checkArgument ( value >= this . storageLength , "value" , "New Storage Length cannot be smaller than the previous one." ) ; log . trace ( "{}: StorageLength changed from {} to {}." , this . traceObjectId , this . storageLength , value ) ; this . storageLength = value ; }
region UpdateableSegmentMetadata Implementation
23,505
synchronized int cleanupAttributes ( int maximumAttributeCount , long lastUsedCutoff ) { if ( this . extendedAttributes . size ( ) <= maximumAttributeCount ) { return 0 ; } val candidates = this . extendedAttributes . entrySet ( ) . stream ( ) . filter ( e -> e . getValue ( ) . lastUsed < lastUsedCutoff ) . sorted ( Comparator . comparingLong ( e -> e . getValue ( ) . lastUsed ) ) . collect ( Collectors . toList ( ) ) ; int count = 0 ; for ( val e : candidates ) { if ( this . extendedAttributes . size ( ) <= maximumAttributeCount ) { break ; } this . extendedAttributes . remove ( e . getKey ( ) ) ; count ++ ; } log . debug ( "{}: Evicted {} attribute(s)." , this . traceObjectId , count ) ; return count ; }
Evicts those Extended Attributes from memory that have a LastUsed value prior to the given cutoff .
23,506
public static DebugRecoveryProcessor create ( int containerId , DurableDataLog durableDataLog , ContainerConfig config , ReadIndexConfig readIndexConfig , ScheduledExecutorService executor , OperationCallbacks callbacks ) { Preconditions . checkNotNull ( durableDataLog , "durableDataLog" ) ; Preconditions . checkNotNull ( config , "config" ) ; Preconditions . checkNotNull ( readIndexConfig , "readIndexConfig" ) ; Preconditions . checkNotNull ( executor , "executor" ) ; Preconditions . checkNotNull ( callbacks , callbacks ) ; StreamSegmentContainerMetadata metadata = new StreamSegmentContainerMetadata ( containerId , config . getMaxActiveSegmentCount ( ) ) ; CacheManager cacheManager = new CacheManager ( new CachePolicy ( Long . MAX_VALUE , Duration . ofHours ( 10 ) , Duration . ofHours ( 1 ) ) , executor ) ; cacheManager . startAsync ( ) . awaitRunning ( ) ; ContainerReadIndexFactory rf = new ContainerReadIndexFactory ( readIndexConfig , new NoOpCacheFactory ( ) , cacheManager , executor ) ; Storage s = new InMemoryStorageFactory ( executor ) . createStorageAdapter ( ) ; return new DebugRecoveryProcessor ( metadata , durableDataLog , rf , s , cacheManager , callbacks ) ; }
Creates a new instance of the DebugRecoveryProcessor class with the given arguments .
23,507
protected void recoverOperation ( DataFrameRecord < Operation > dataFrameRecord , OperationMetadataUpdater metadataUpdater ) throws DataCorruptionException { if ( this . callbacks . beginRecoverOperation != null ) { Callbacks . invokeSafely ( this . callbacks . beginRecoverOperation , dataFrameRecord . getItem ( ) , dataFrameRecord . getFrameEntries ( ) , null ) ; } try { super . recoverOperation ( dataFrameRecord , metadataUpdater ) ; } catch ( Throwable ex ) { if ( this . callbacks . operationFailed != null ) { Callbacks . invokeSafely ( this . callbacks . operationFailed , dataFrameRecord . getItem ( ) , ex , null ) ; } throw ex ; } if ( this . callbacks . operationSuccess != null ) { Callbacks . invokeSafely ( this . callbacks . operationSuccess , dataFrameRecord . getItem ( ) , null ) ; } }
region RecoveryProcessor Overrides
23,508
boolean startNewEntry ( boolean firstRecordEntry ) { Preconditions . checkState ( ! this . sealed , "DataFrame is sealed and cannot accept any more entries." ) ; endEntry ( true ) ; if ( getAvailableLength ( ) < MIN_ENTRY_LENGTH_NEEDED ) { return false ; } this . writeEntryStartIndex = this . writePosition ; this . writeEntryHeader = new WriteEntryHeader ( this . contents . subSegment ( this . writePosition , WriteEntryHeader . HEADER_SIZE ) ) ; this . writeEntryHeader . setFirstRecordEntry ( firstRecordEntry ) ; this . writePosition += WriteEntryHeader . HEADER_SIZE ; return true ; }
Indicates that a new DataFrame Entry should be opened .
23,509
boolean endEntry ( boolean endOfRecord ) { if ( this . writeEntryStartIndex >= 0 ) { int entryLength = this . writePosition - this . writeEntryStartIndex - WriteEntryHeader . HEADER_SIZE ; assert entryLength >= 0 : "entryLength is negative." ; this . writeEntryHeader . setEntryLength ( entryLength ) ; this . writeEntryHeader . setLastRecordEntry ( endOfRecord ) ; this . writeEntryHeader . serialize ( ) ; this . writeEntryHeader = null ; this . writeEntryStartIndex = - 1 ; } return getAvailableLength ( ) >= MIN_ENTRY_LENGTH_NEEDED ; }
Indicates that the currently open DataFrame Entry can be ended .
23,510
int append ( ByteArraySegment data ) { ensureAppendConditions ( ) ; int actualLength = Math . min ( data . getLength ( ) , getAvailableLength ( ) ) ; if ( actualLength > 0 ) { this . contents . copyFrom ( data , writePosition , actualLength ) ; writePosition += actualLength ; } return actualLength ; }
Appends the contents of the ByteArraySegment to the DataFrame .
23,511
void seal ( ) { if ( ! this . sealed && ! this . contents . isReadOnly ( ) ) { Preconditions . checkState ( writeEntryStartIndex < 0 , "An open entry exists. Any open entries must be closed prior to sealing." ) ; this . header . setContentLength ( writePosition ) ; this . header . commit ( ) ; this . sealed = true ; } }
Seals the frame for writing . After this method returns no more modifications are allowed on this DataFrame . This method has no effect if the Frame is read - only if it is already sealed .
23,512
public static DataFrameEntryIterator read ( InputStream source , int length , LogAddress address ) throws IOException { ReadFrameHeader header = new ReadFrameHeader ( source ) ; if ( length < ReadFrameHeader . SERIALIZATION_LENGTH + header . getContentLength ( ) ) { throw new SerializationException ( String . format ( "Given buffer has insufficient number of bytes for this DataFrame. Expected %d, actual %d." , ReadFrameHeader . SERIALIZATION_LENGTH + header . getContentLength ( ) , length ) ) ; } BoundedInputStream contents = new BoundedInputStream ( source , header . getContentLength ( ) ) ; return new DataFrameEntryIterator ( contents , address , ReadFrameHeader . SERIALIZATION_LENGTH ) ; }
Interprets the given InputStream as a DataFrame and returns a DataFrameEntryIterator for the entries serialized in it .
23,513
public static Flow create ( ) { return new Flow ( ID_GENERATOR . updateAndGet ( i -> i == Integer . MAX_VALUE ? 0 : i + 1 ) , 0 ) ; }
Create a new Flow .
23,514
public void updateHostContainerMetrics ( Map < Host , Set < Integer > > oldMapping , Map < Host , Set < Integer > > newMapping ) { if ( newMapping == null ) { return ; } DYNAMIC_LOGGER . reportGaugeValue ( SEGMENT_STORE_HOST_NUMBER , newMapping . keySet ( ) . size ( ) ) ; newMapping . keySet ( ) . forEach ( host -> reportContainerCountPerHost ( host , newMapping . get ( host ) ) ) ; if ( oldMapping == null ) { return ; } Set < Host > workingNodes = new HashSet < > ( oldMapping . keySet ( ) ) ; if ( workingNodes . retainAll ( newMapping . keySet ( ) ) ) { oldMapping . keySet ( ) . stream ( ) . filter ( host -> ! workingNodes . contains ( host ) ) . forEach ( failedHost -> { reportHostFailures ( failedHost ) ; reportContainerFailovers ( oldMapping . get ( failedHost ) ) ; } ) ; } }
This method reports the number of available Segment Store hosts managing Containers as well as the number of Containers assigned to each host . Moreover this method also reports failures for hosts and Containers ; we consider a failure the situation in which a host is present in the oldMapping but not present in newMapping .
23,515
void updateInstance ( T instance ) { T oldInstance = this . instance . getAndSet ( Preconditions . checkNotNull ( instance , "instance" ) ) ; if ( oldInstance != null && oldInstance != instance ) { oldInstance . close ( ) ; } }
Updates the underlying Metric instance with the given one and closes out the previous one .
23,516
public static < T > Collection < T > filterOut ( Collection < T > collection , Collection < T > toExclude ) { return collection . stream ( ) . filter ( o -> ! toExclude . contains ( o ) ) . collect ( Collectors . toList ( ) ) ; }
Returns a new collection which contains all the items in the given collection that are not to be excluded .
23,517
public static < T > Set < T > joinSets ( Set < T > set1 , Set < T > set2 ) { return new NonConvertedSetView < > ( set1 , set2 ) ; }
Returns an unmodifiable Set View made up of the given Sets . The returned Set View does not copy any of the data from any of the given Sets therefore any changes in the two Sets will be reflected in the View .
23,518
public static < OutputType , Type1 , Type2 > Set < OutputType > joinSets ( Set < Type1 > set1 , Function < Type1 , OutputType > converter1 , Set < Type2 > set2 , Function < Type2 , OutputType > converter2 ) { return new ConvertedSetView < > ( set1 , converter1 , set2 , converter2 ) ; }
Returns an unmodifiable Set View made up of the given Sets while translating the items into a common type . The returned Set View does not copy any of the data from any of the given Sets therefore any changes in the two Sets will be reflected in the View .
23,519
public static < OutputType , Type1 , Type2 > Collection < OutputType > joinCollections ( Collection < Type1 > c1 , Function < Type1 , OutputType > converter1 , Collection < Type2 > c2 , Function < Type2 , OutputType > converter2 ) { return new ConvertedSetView < > ( c1 , converter1 , c2 , converter2 ) ; }
Returns an unmodifiable Collection View made up of the given Collections while translating the items into a common type . The returned Collection View does not copy any of the data from any of the given Collections therefore any changes in the two Collections will be reflected in the View .
23,520
private void run ( String key , ConcurrentLinkedQueue < Work > workQueue ) { Work work = workQueue . poll ( ) ; CompletableFuture < Void > future ; try { future = processEvent ( work . getEvent ( ) ) ; } catch ( Exception e ) { future = Futures . failedFuture ( e ) ; } future . whenComplete ( ( r , e ) -> { if ( e != null && toPostpone ( work . getEvent ( ) , work . getPickupTime ( ) , e ) ) { handleWorkPostpone ( key , workQueue , work ) ; } else { if ( e != null ) { work . getResult ( ) . completeExceptionally ( e ) ; } else { work . getResult ( ) . complete ( r ) ; } handleWorkComplete ( key , workQueue , work ) ; } } ) ; }
Run method is called only if work queue is not empty . So we can safely do a workQueue . poll . WorkQueue . poll should only happen in the run method and no where else .
23,521
public void add ( T item ) { CompletableFuture < Queue < T > > pending ; Queue < T > result = null ; synchronized ( this . contents ) { Exceptions . checkNotClosed ( this . closed , this ) ; this . contents . addLast ( item ) ; pending = this . pendingTake ; this . pendingTake = null ; if ( pending != null ) { result = fetch ( this . contents . size ( ) ) ; } } if ( pending != null ) { pending . complete ( result ) ; } }
Adds a new item to the queue .
23,522
public Queue < T > poll ( int maxCount ) { synchronized ( this . contents ) { Exceptions . checkNotClosed ( this . closed , this ) ; Preconditions . checkState ( this . pendingTake == null , "Cannot call poll() when there is a pending take() request." ) ; return fetch ( maxCount ) ; } }
Returns the next items from the queue if any .
23,523
public CompletableFuture < Queue < T > > take ( int maxCount ) { synchronized ( this . contents ) { Exceptions . checkNotClosed ( this . closed , this ) ; Preconditions . checkState ( this . pendingTake == null , "Cannot have more than one concurrent pending take() request." ) ; Queue < T > result = fetch ( maxCount ) ; if ( result . size ( ) > 0 ) { return CompletableFuture . completedFuture ( result ) ; } else { this . pendingTake = new CompletableFuture < > ( ) ; return this . pendingTake ; } } }
Returns the next items from the queue . If the queue is empty it blocks the call until at least one item is added .
23,524
public void recordAppend ( String streamSegmentName , long dataLength , int numOfEvents , Duration elapsed ) { getWriteStreamSegment ( ) . reportSuccessEvent ( elapsed ) ; DynamicLogger dl = getDynamicLogger ( ) ; dl . incCounterValue ( globalMetricName ( SEGMENT_WRITE_BYTES ) , dataLength ) ; dl . incCounterValue ( globalMetricName ( SEGMENT_WRITE_EVENTS ) , numOfEvents ) ; if ( ! StreamSegmentNameUtils . isTransactionSegment ( streamSegmentName ) ) { dl . incCounterValue ( SEGMENT_WRITE_BYTES , dataLength , segmentTags ( streamSegmentName ) ) ; dl . incCounterValue ( SEGMENT_WRITE_EVENTS , numOfEvents , segmentTags ( streamSegmentName ) ) ; try { SegmentAggregates aggregates = getSegmentAggregate ( streamSegmentName ) ; if ( aggregates != null && aggregates . update ( dataLength , numOfEvents ) ) { report ( streamSegmentName , aggregates ) ; } } catch ( Exception e ) { log . warn ( "Record statistic for {} for data: {} and events:{} threw exception" , streamSegmentName , dataLength , numOfEvents , e ) ; } } }
Updates segment specific aggregates . Then if two minutes have elapsed between last report of aggregates for this segment send a new update to the monitor . This update to the monitor is processed by monitor asynchronously .
23,525
public void merge ( String streamSegmentName , long dataLength , int numOfEvents , long txnCreationTime ) { getDynamicLogger ( ) . incCounterValue ( SEGMENT_WRITE_BYTES , dataLength , segmentTags ( streamSegmentName ) ) ; getDynamicLogger ( ) . incCounterValue ( SEGMENT_WRITE_EVENTS , numOfEvents , segmentTags ( streamSegmentName ) ) ; SegmentAggregates aggregates = getSegmentAggregate ( streamSegmentName ) ; if ( aggregates != null && aggregates . updateTx ( dataLength , numOfEvents , txnCreationTime ) ) { report ( streamSegmentName , aggregates ) ; } }
Method called with txn stats whenever a txn is committed .
23,526
public void setSequenceNumber ( long value ) { Preconditions . checkState ( this . sequenceNumber < 0 , "Sequence Number has been previously set for this entry. Cannot set a new one." ) ; Exceptions . checkArgument ( value >= 0 , "value" , "Sequence Number must be a non-negative number." ) ; this . sequenceNumber = value ; }
Sets the Sequence Number for this operation if not already set .
23,527
public ControllerServiceStarter awaitServiceStarting ( ) { monitor . enterWhenUninterruptibly ( hasReachedStarting ) ; try { if ( serviceState != ServiceState . STARTING ) { throw new IllegalStateException ( "Expected state=" + ServiceState . STARTING + ", but actual state=" + serviceState ) ; } else { return this . starter ; } } finally { monitor . leave ( ) ; } }
Awaits until the internal state changes to STARTING and returns the reference of current ControllerServiceStarter .
23,528
public ControllerServiceStarter awaitServicePausing ( ) { monitor . enterWhenUninterruptibly ( hasReachedPausing ) ; try { if ( serviceState != ServiceState . PAUSING ) { throw new IllegalStateException ( "Expected state=" + ServiceState . PAUSING + ", but actual state=" + serviceState ) ; } else { return this . starter ; } } finally { monitor . leave ( ) ; } }
Awaits until the internal state changes to PAUSING and returns the reference of current ControllerServiceStarter .
23,529
boolean beginRecord ( ) throws IOException , DurableDataLogException { try { if ( this . currentEntry != null && ! this . prefetchedEntry ) { endRecord ( ) ; } fetchNextEntry ( ) ; return true ; } catch ( NoMoreRecordsException ex ) { return false ; } }
Indicates that a new record is to be expected . When invoked if in the middle of a record it will be skipped over and the DataFrameInputStream will be positioned at the beginning of the next record .
23,530
public void clear ( ) { this . root = null ; this . first = null ; this . last = null ; this . size = 0 ; this . modCount ++ ; }
region SortedIndex Implementation
23,531
private UpdateResult insert ( V item , Node node ) { UpdateResult result ; if ( node == null ) { result = new UpdateResult ( ) ; result . node = new Node ( item ) ; this . size ++ ; this . modCount ++ ; } else { long itemKey = item . key ( ) ; long nodeKey = node . item . key ( ) ; if ( itemKey < nodeKey ) { result = insert ( item , node . left ) ; node . left = result . node ; } else if ( itemKey > nodeKey ) { result = insert ( item , node . right ) ; node . right = result . node ; } else { result = new UpdateResult ( ) ; result . updatedItem = node . item ; node . item = item ; } result . node = balance ( node ) ; } return result ; }
Inserts an item into a subtree .
23,532
private UpdateResult delete ( long key , Node node ) { UpdateResult result ; if ( node == null ) { result = new UpdateResult ( ) ; } else { long itemKey = node . item . key ( ) ; if ( key < itemKey ) { result = delete ( key , node . left ) ; node . left = result . node ; } else if ( key > itemKey ) { result = delete ( key , node . right ) ; node . right = result . node ; } else { result = new UpdateResult ( ) ; result . updatedItem = node . item ; if ( node . left != null && node . right != null ) { node . item = findSmallest ( node . right ) ; node . right = delete ( node . item . key ( ) , node . right ) . node ; } else { node = ( node . left != null ) ? node . left : node . right ; this . size -- ; this . modCount ++ ; } } result . node = balance ( node ) ; } return result ; }
Removes an item with given key from a subtree .
23,533
private Node balance ( Node node ) { if ( node == null ) { return null ; } int imbalance = getHeight ( node . left ) - getHeight ( node . right ) ; if ( imbalance > MAX_IMBALANCE ) { if ( getHeight ( node . left . left ) < getHeight ( node . left . right ) ) { node . left = rotateRight ( node . left ) ; } return rotateLeft ( node ) ; } else if ( - imbalance > MAX_IMBALANCE ) { if ( getHeight ( node . right . right ) < getHeight ( node . right . left ) ) { node . right = rotateLeft ( node . right ) ; } return rotateRight ( node ) ; } else { node . height = calculateHeight ( getHeight ( node . left ) , getHeight ( node . right ) ) ; return node ; } }
Rebalances the subtree with given root node if necessary .
23,534
private V findSmallest ( Node node ) { if ( node == null ) { return null ; } while ( node . left != null ) { node = node . left ; } return node . item ; }
Finds the smallest item in a subtree .
23,535
private V findLargest ( Node node ) { if ( node == null ) { return null ; } while ( node . right != null ) { node = node . right ; } return node . item ; }
Finds the largest item in a subtree .
23,536
public < T > CompletableFuture < T > execute ( final Resource resource , final Serializable [ ] parameters , final FutureOperation < T > operation ) { if ( ! ready ) { return Futures . failedFuture ( new IllegalStateException ( getClass ( ) . getName ( ) + " not yet ready" ) ) ; } final String tag = UUID . randomUUID ( ) . toString ( ) ; final TaskData taskData = getTaskData ( parameters ) ; final CompletableFuture < T > result = new CompletableFuture < > ( ) ; final TaggedResource taggedResource = new TaggedResource ( tag , resource ) ; log . debug ( "Host={}, Tag={} starting to execute task {}-{} on resource {}" , context . hostId , tag , taskData . getMethodName ( ) , taskData . getMethodVersion ( ) , resource ) ; if ( createIndexOnlyMode ) { return createIndexes ( taggedResource , taskData ) ; } taskMetadataStore . putChild ( context . hostId , taggedResource ) . thenComposeAsync ( x -> executeTask ( resource , taskData , tag , operation ) , executor ) . whenCompleteAsync ( ( value , e ) -> taskMetadataStore . removeChild ( context . hostId , taggedResource , true ) . whenCompleteAsync ( ( innerValue , innerE ) -> { if ( e != null ) { result . completeExceptionally ( e ) ; } else { result . complete ( value ) ; } } , executor ) , executor ) ; return result ; }
Wrapper method that initially obtains lock then executes the passed method and finally releases lock .
23,537
static UUID bytesToUUID ( byte [ ] data ) { long msb = 0 ; long lsb = 0 ; assert data . length == 16 : "data must be 16 bytes in length" ; for ( int i = 0 ; i < 8 ; i ++ ) { msb = ( msb << 8 ) | ( data [ i ] & 0xff ) ; } for ( int i = 8 ; i < 16 ; i ++ ) { lsb = ( lsb << 8 ) | ( data [ i ] & 0xff ) ; } return new UUID ( msb , lsb ) ; }
Converts a 128 bit array into a UUID . Copied from UUID s private constructor .
23,538
static double longToDoubleFraction ( long value ) { long shifted = ( value >> 12 ) & MASK ; return Double . longBitsToDouble ( LEADING_BITS + shifted ) - 1 ; }
Turns the leading 54 bits of a long into a double between 0 and 1 .
23,539
public static AsyncReadResultProcessor process ( ReadResult readResult , AsyncReadResultHandler entryHandler , Executor executor ) { Preconditions . checkNotNull ( executor , "executor" ) ; AsyncReadResultProcessor processor = new AsyncReadResultProcessor ( readResult , entryHandler ) ; processor . processResult ( executor ) ; return processor ; }
Processes the given ReadResult using the given AsyncReadResultHandler .
23,540
public CacheManager . CacheStatus getCacheStatus ( ) { int minGen = 0 ; int maxGen = 0 ; long size = 0 ; synchronized ( this . segmentCaches ) { for ( SegmentKeyCache e : this . segmentCaches . values ( ) ) { if ( e != null ) { val cs = e . getCacheStatus ( ) ; minGen = Math . min ( minGen , cs . getOldestGeneration ( ) ) ; maxGen = Math . max ( maxGen , cs . getNewestGeneration ( ) ) ; size += cs . getSize ( ) ; } } } return new CacheManager . CacheStatus ( size , minGen , maxGen ) ; }
region CacheManager . Client Implementation
23,541
CacheBucketOffset get ( long segmentId , UUID keyHash ) { SegmentKeyCache cache ; int generation ; synchronized ( this . segmentCaches ) { generation = this . currentCacheGeneration ; cache = this . segmentCaches . get ( segmentId ) ; } return cache == null ? null : cache . get ( keyHash , generation ) ; }
Looks up a cached offset for the given Segment and Key Hash .
23,542
Map < UUID , CacheBucketOffset > getTailHashes ( long segmentId ) { return forSegmentCache ( segmentId , SegmentKeyCache :: getTailBucketOffsets , Collections . emptyMap ( ) ) ; }
Gets the unindexed Key Hashes mapped to their latest offsets .
23,543
CompletableFuture < Void > addStreamToScope ( String name , int streamPosition ) { String path = getPathForStreamPosition ( name , streamPosition ) ; return Futures . toVoid ( store . createZNodeIfNotExist ( path ) ) ; }
Streams are ordered under the scope in the order of stream position . The metadata store first calls getNextStreamPosition method to generate a new position . This position is assigned to the stream and a reference to this stream is stored under the scope at the said position .
23,544
CompletableFuture < Integer > getNextStreamPosition ( ) { return store . createEphemeralSequentialZNode ( counterPath ) . thenApply ( counterStr -> Integer . parseInt ( counterStr . replace ( counterPath , "" ) ) ) ; }
When a new stream is created under a scope we first get a new counter value by creating a sequential znode under a counter node . this is a 10 digit integer which the store passes to the zkscope object as position .
23,545
static RollingSegmentHandle deserialize ( byte [ ] serialization , SegmentHandle headerHandle ) { StringTokenizer st = new StringTokenizer ( new String ( serialization , ENCODING ) , SEPARATOR , false ) ; Preconditions . checkArgument ( st . hasMoreTokens ( ) , "No separators in serialization." ) ; SegmentRollingPolicy policy = null ; OffsetAdjuster om = new OffsetAdjuster ( ) ; long lastOffset = 0 ; ArrayList < SegmentChunk > segmentChunks = new ArrayList < > ( ) ; while ( st . hasMoreTokens ( ) ) { val entry = parse ( st . nextToken ( ) ) ; if ( entry . getKey ( ) . equalsIgnoreCase ( KEY_POLICY_MAX_SIZE ) ) { if ( policy == null ) { Preconditions . checkArgument ( isValidLong ( entry . getValue ( ) ) , "Invalid entry value for '%s'." , entry ) ; policy = new SegmentRollingPolicy ( Long . parseLong ( entry . getValue ( ) ) ) ; } } else if ( entry . getKey ( ) . equalsIgnoreCase ( KEY_CONCAT ) ) { val concatInfo = parseConcat ( entry . getValue ( ) ) ; om . set ( concatInfo . getKey ( ) , concatInfo . getValue ( ) ) ; } else { Preconditions . checkArgument ( isValidLong ( entry . getKey ( ) ) , "Invalid key value for '%s'." , entry ) ; long offset = om . adjustOffset ( Long . parseLong ( entry . getKey ( ) ) ) ; SegmentChunk s = new SegmentChunk ( entry . getValue ( ) , offset ) ; Preconditions . checkArgument ( lastOffset <= s . getStartOffset ( ) , "SegmentChunk Entry '%s' has out-of-order offset (previous=%s)." , s , lastOffset ) ; segmentChunks . add ( s ) ; lastOffset = s . getStartOffset ( ) ; } } RollingSegmentHandle h = new RollingSegmentHandle ( headerHandle , policy , segmentChunks ) ; h . setHeaderLength ( serialization . length ) ; return h ; }
Deserializes the given byte array into a RollingSegmentHandle .
23,546
@ SneakyThrows ( IOException . class ) static ByteArraySegment serialize ( RollingSegmentHandle handle ) { try ( EnhancedByteArrayOutputStream os = new EnhancedByteArrayOutputStream ( ) ) { os . write ( combine ( KEY_POLICY_MAX_SIZE , Long . toString ( handle . getRollingPolicy ( ) . getMaxLength ( ) ) ) ) ; handle . chunks ( ) . forEach ( chunk -> os . write ( serializeChunk ( chunk ) ) ) ; return os . getData ( ) ; } }
Serializes an entire RollingSegmentHandle into a new ByteArraySegment .
23,547
static byte [ ] serializeChunk ( SegmentChunk segmentChunk ) { return combine ( Long . toString ( segmentChunk . getStartOffset ( ) ) , segmentChunk . getName ( ) ) ; }
Serializes a single SegmentChunk .
23,548
boolean isCompactionRequired ( SegmentProperties info ) { long startOffset = getCompactionStartOffset ( info ) ; long lastIndexOffset = this . indexReader . getLastIndexedOffset ( info ) ; if ( startOffset + this . connector . getMaxCompactionSize ( ) >= lastIndexOffset ) { return false ; } long totalEntryCount = this . indexReader . getTotalEntryCount ( info ) ; long entryCount = this . indexReader . getEntryCount ( info ) ; long utilization = totalEntryCount == 0 ? 100 : MathHelpers . minMax ( Math . round ( 100.0 * entryCount / totalEntryCount ) , 0 , 100 ) ; long utilizationThreshold = ( int ) MathHelpers . minMax ( this . indexReader . getCompactionUtilizationThreshold ( info ) , 0 , 100 ) ; return utilization < utilizationThreshold ; }
Determines if Table Compaction is required on a Table Segment .
23,549
long calculateTruncationOffset ( SegmentProperties info , long highestCopiedOffset ) { long truncateOffset = - 1 ; if ( highestCopiedOffset > 0 ) { truncateOffset = highestCopiedOffset ; } else if ( this . indexReader . getLastIndexedOffset ( info ) >= info . getLength ( ) ) { truncateOffset = this . indexReader . getCompactionOffset ( info ) ; } if ( truncateOffset <= info . getStartOffset ( ) ) { truncateOffset = - 1 ; } return truncateOffset ; }
Calculates the offset in the Segment where it is safe to truncate based on the current state of the Segment and the highest copied offset encountered during an index update .
23,550
private SegmentHandle doOpenRead ( String streamSegmentName ) { long traceId = LoggerHelpers . traceEnter ( log , "openRead" , streamSegmentName ) ; doGetStreamSegmentInfo ( streamSegmentName ) ; ExtendedS3SegmentHandle retHandle = ExtendedS3SegmentHandle . getReadHandle ( streamSegmentName ) ; LoggerHelpers . traceLeave ( log , "openRead" , traceId , streamSegmentName ) ; return retHandle ; }
region private sync implementation
23,551
public Cache getCache ( String id ) { Exceptions . checkNotClosed ( this . closed . get ( ) , this ) ; RocksDBCache result ; boolean isNew = false ; synchronized ( this . caches ) { result = this . caches . get ( id ) ; if ( result == null ) { result = new RocksDBCache ( id , this . config , this :: cacheClosed ) ; this . caches . put ( id , result ) ; isNew = true ; } } if ( isNew ) { result . initialize ( ) ; } return result ; }
region CacheFactory Implementation
23,552
CompletableFuture < Void > initialize ( Duration timeout ) { TimeoutTimer timer = new TimeoutTimer ( timeout ) ; Preconditions . checkState ( ! this . index . isInitialized ( ) , "SegmentAttributeIndex is already initialized." ) ; String attributeSegmentName = StreamSegmentNameUtils . getAttributeSegmentName ( this . segmentMetadata . getName ( ) ) ; return Futures . exceptionallyComposeExpecting ( this . storage . openWrite ( attributeSegmentName ) . thenAccept ( this . handle :: set ) , ex -> ex instanceof StreamSegmentNotExistsException , ( ) -> this . storage . create ( attributeSegmentName , this . config . getAttributeSegmentRollingPolicy ( ) , timer . getRemaining ( ) ) . thenAccept ( this . handle :: set ) ) . thenComposeAsync ( v -> this . index . initialize ( timer . getRemaining ( ) ) , this . executor ) . thenRun ( ( ) -> log . debug ( "{}: Initialized." , this . traceObjectId ) ) . exceptionally ( this :: handleIndexOperationException ) ; }
Initializes the SegmentAttributeIndex by inspecting the AttributeSegmentFile and creating it if needed .
23,553
static CompletableFuture < Void > delete ( String segmentName , Storage storage , Duration timeout ) { TimeoutTimer timer = new TimeoutTimer ( timeout ) ; String attributeSegmentName = StreamSegmentNameUtils . getAttributeSegmentName ( segmentName ) ; return Futures . exceptionallyExpecting ( storage . openWrite ( attributeSegmentName ) . thenCompose ( handle -> storage . delete ( handle , timer . getRemaining ( ) ) ) , ex -> ex instanceof StreamSegmentNotExistsException , null ) ; }
Deletes all the Attribute data associated with the given Segment .
23,554
void close ( boolean cleanCache ) { if ( ! this . closed . getAndSet ( true ) ) { if ( cleanCache ) { this . executor . execute ( ( ) -> { removeAllCacheEntries ( ) ; log . info ( "{}: Closed." , this . traceObjectId ) ; } ) ; } else { log . info ( "{}: Closed (no cache cleanup)." , this . traceObjectId ) ; } } }
Closes the SegmentAttributeIndex and optionally cleans the cache .
23,555
public CacheManager . CacheStatus getCacheStatus ( ) { int minGen = 0 ; int maxGen = 0 ; long size = 0 ; synchronized ( this . cacheEntries ) { for ( CacheEntry e : this . cacheEntries . values ( ) ) { if ( e != null ) { int g = e . getGeneration ( ) ; minGen = Math . min ( minGen , g ) ; maxGen = Math . max ( maxGen , g ) ; size += e . getSize ( ) ; } } } return new CacheManager . CacheStatus ( size , minGen , maxGen ) ; }
region CacheManager . Client implementation
23,556
private CompletableFuture < Void > executeConditionally ( Function < Duration , CompletableFuture < Long > > indexOperation , Duration timeout ) { TimeoutTimer timer = new TimeoutTimer ( timeout ) ; return UPDATE_RETRY . runAsync ( ( ) -> executeConditionallyOnce ( indexOperation , timer ) , this . executor ) . exceptionally ( this :: handleIndexOperationException ) . thenAccept ( Callbacks :: doNothing ) ; }
Executes the given Index Operation with retries . Retries are only performed in case of conditional update failures represented by BadOffsetException .
23,557
public synchronized void recordCommit ( int commitLength ) { Preconditions . checkArgument ( commitLength >= 0 , "commitLength must be a non-negative number." ) ; this . commitCount ++ ; this . accumulatedLength += commitLength ; int minCount = this . config . getCheckpointMinCommitCount ( ) ; int countThreshold = this . config . getCheckpointCommitCountThreshold ( ) ; long lengthThreshold = this . config . getCheckpointTotalCommitLengthThreshold ( ) ; if ( this . commitCount >= minCount && ( this . commitCount >= countThreshold || this . accumulatedLength >= lengthThreshold ) ) { this . commitCount = 0 ; this . accumulatedLength = 0 ; this . executor . execute ( this . createCheckpointCallback ) ; } }
Records that an operation with the given data length has been processed .
23,558
public static final UUID encode ( final TxnId txnId ) { Preconditions . checkNotNull ( txnId , "txnId" ) ; return new UUID ( txnId . getHighBits ( ) , txnId . getLowBits ( ) ) ; }
Returns UUID of transaction with given TxnId .
23,559
public static final Segment encode ( final SegmentId segment ) { Preconditions . checkNotNull ( segment , "segment" ) ; return new Segment ( segment . getStreamInfo ( ) . getScope ( ) , segment . getStreamInfo ( ) . getStream ( ) , segment . getSegmentId ( ) ) ; }
Helper to convert Segment Id into Segment object .
23,560
public static final RetentionPolicy encode ( final Controller . RetentionPolicy policy ) { if ( policy != null && policy . getRetentionType ( ) != Controller . RetentionPolicy . RetentionPolicyType . UNKNOWN ) { return RetentionPolicy . builder ( ) . retentionType ( RetentionPolicy . RetentionType . valueOf ( policy . getRetentionType ( ) . name ( ) ) ) . retentionParam ( policy . getRetentionParam ( ) ) . build ( ) ; } else { return null ; } }
Helper to convert retention policy from RPC call to internal representation .
23,561
public static final StreamConfiguration encode ( final StreamConfig config ) { Preconditions . checkNotNull ( config , "config" ) ; return StreamConfiguration . builder ( ) . scalingPolicy ( encode ( config . getScalingPolicy ( ) ) ) . retentionPolicy ( encode ( config . getRetentionPolicy ( ) ) ) . build ( ) ; }
Helper to convert StreamConfig into Stream Configuration Impl .
23,562
public static final PravegaNodeUri encode ( final NodeUri uri ) { Preconditions . checkNotNull ( uri , "uri" ) ; return new PravegaNodeUri ( uri . getEndpoint ( ) , uri . getPort ( ) ) ; }
Helper to convert NodeURI into PravegaNodeURI .
23,563
public static final List < Map . Entry < Double , Double > > encode ( final Map < Double , Double > keyRanges ) { Preconditions . checkNotNull ( keyRanges , "keyRanges" ) ; return keyRanges . entrySet ( ) . stream ( ) . map ( x -> new AbstractMap . SimpleEntry < > ( x . getKey ( ) , x . getValue ( ) ) ) . collect ( Collectors . toList ( ) ) ; }
Return list of key ranges available .
23,564
public static final Transaction . Status encode ( final TxnState . State state , final String logString ) { Preconditions . checkNotNull ( state , "state" ) ; Exceptions . checkNotNullOrEmpty ( logString , "logString" ) ; Transaction . Status result ; switch ( state ) { case COMMITTED : result = Transaction . Status . COMMITTED ; break ; case ABORTED : result = Transaction . Status . ABORTED ; break ; case OPEN : result = Transaction . Status . OPEN ; break ; case ABORTING : result = Transaction . Status . ABORTING ; break ; case COMMITTING : result = Transaction . Status . COMMITTING ; break ; case UNKNOWN : throw new RuntimeException ( "Unknown transaction: " + logString ) ; case UNRECOGNIZED : default : throw new IllegalStateException ( "Unknown status: " + state ) ; } return result ; }
Returns actual status of given transaction status instance .
23,565
public static final Transaction . PingStatus encode ( final Controller . PingTxnStatus . Status status , final String logString ) throws PingFailedException { Preconditions . checkNotNull ( status , "status" ) ; Exceptions . checkNotNullOrEmpty ( logString , "logString" ) ; Transaction . PingStatus result ; switch ( status ) { case OK : result = Transaction . PingStatus . OPEN ; break ; case COMMITTED : result = Transaction . PingStatus . COMMITTED ; break ; case ABORTED : result = Transaction . PingStatus . ABORTED ; break ; default : throw new PingFailedException ( "Ping transaction for " + logString + " failed with status " + status ) ; } return result ; }
Returns the status of Ping Transaction .
23,566
public static final SegmentWithRange encode ( final SegmentRange segmentRange ) { return new SegmentWithRange ( encode ( segmentRange . getSegmentId ( ) ) , segmentRange . getMinKey ( ) , segmentRange . getMaxKey ( ) ) ; }
Helper to convert SegmentRange to SegmentWithRange .
23,567
public static final TxnId decode ( final UUID txnId ) { Preconditions . checkNotNull ( txnId , "txnId" ) ; return TxnId . newBuilder ( ) . setHighBits ( txnId . getMostSignificantBits ( ) ) . setLowBits ( txnId . getLeastSignificantBits ( ) ) . build ( ) ; }
Returns TxnId object instance for a given transaction with UUID .
23,568
public static final SegmentId decode ( final Segment segment ) { Preconditions . checkNotNull ( segment , "segment" ) ; return createSegmentId ( segment . getScope ( ) , segment . getStreamName ( ) , segment . getSegmentId ( ) ) ; }
Decodes segment and returns an instance of SegmentId .
23,569
public static final Controller . ScalingPolicy decode ( final ScalingPolicy policyModel ) { Preconditions . checkNotNull ( policyModel , "policyModel" ) ; return Controller . ScalingPolicy . newBuilder ( ) . setScaleType ( Controller . ScalingPolicy . ScalingPolicyType . valueOf ( policyModel . getScaleType ( ) . name ( ) ) ) . setTargetRate ( policyModel . getTargetRate ( ) ) . setScaleFactor ( policyModel . getScaleFactor ( ) ) . setMinNumSegments ( policyModel . getMinNumSegments ( ) ) . build ( ) ; }
Decodes ScalingPolicy and returns an instance of Scaling Policy impl .
23,570
public static final Controller . RetentionPolicy decode ( final RetentionPolicy policyModel ) { if ( policyModel != null ) { return Controller . RetentionPolicy . newBuilder ( ) . setRetentionType ( Controller . RetentionPolicy . RetentionPolicyType . valueOf ( policyModel . getRetentionType ( ) . name ( ) ) ) . setRetentionParam ( policyModel . getRetentionParam ( ) ) . build ( ) ; } else { return null ; } }
Decodes RetentionPolicy and returns an instance of Retention Policy impl .
23,571
public static final StreamConfig decode ( String scope , String streamName , final StreamConfiguration configModel ) { Preconditions . checkNotNull ( configModel , "configModel" ) ; final StreamConfig . Builder builder = StreamConfig . newBuilder ( ) . setStreamInfo ( createStreamInfo ( scope , streamName ) ) . setScalingPolicy ( decode ( configModel . getScalingPolicy ( ) ) ) ; if ( configModel . getRetentionPolicy ( ) != null ) { builder . setRetentionPolicy ( decode ( configModel . getRetentionPolicy ( ) ) ) ; } return builder . build ( ) ; }
Converts StreamConfiguration into StreamConfig .
23,572
public static final NodeUri decode ( final PravegaNodeUri uri ) { Preconditions . checkNotNull ( uri , "uri" ) ; return NodeUri . newBuilder ( ) . setEndpoint ( uri . getEndpoint ( ) ) . setPort ( uri . getPort ( ) ) . build ( ) ; }
Converts PravegaNodeURI into NodeURI .
23,573
public static Controller . StreamCut decode ( final String scope , final String stream , Map < Long , Long > streamCut ) { return Controller . StreamCut . newBuilder ( ) . setStreamInfo ( createStreamInfo ( scope , stream ) ) . putAllCut ( streamCut ) . build ( ) ; }
Creates a stream cut object .
23,574
static LedgerHandle create ( BookKeeper bookKeeper , BookKeeperConfig config ) throws DurableDataLogException { try { return Exceptions . handleInterruptedCall ( ( ) -> bookKeeper . createLedger ( config . getBkEnsembleSize ( ) , config . getBkWriteQuorumSize ( ) , config . getBkAckQuorumSize ( ) , LEDGER_DIGEST_TYPE , config . getBKPassword ( ) ) ) ; } catch ( BKException . BKNotEnoughBookiesException bkEx ) { throw new DataLogNotAvailableException ( "Unable to create new BookKeeper Ledger." , bkEx ) ; } catch ( BKException bkEx ) { throw new DurableDataLogException ( "Unable to create new BookKeeper Ledger." , bkEx ) ; } }
Creates a new Ledger in BookKeeper .
23,575
static LedgerHandle openFence ( long ledgerId , BookKeeper bookKeeper , BookKeeperConfig config ) throws DurableDataLogException { try { return Exceptions . handleInterruptedCall ( ( ) -> bookKeeper . openLedger ( ledgerId , LEDGER_DIGEST_TYPE , config . getBKPassword ( ) ) ) ; } catch ( BKException bkEx ) { throw new DurableDataLogException ( String . format ( "Unable to open-fence ledger %d." , ledgerId ) , bkEx ) ; } }
Opens a ledger . This operation also fences out the ledger in case anyone else was writing to it .
23,576
static LedgerHandle openRead ( long ledgerId , BookKeeper bookKeeper , BookKeeperConfig config ) throws DurableDataLogException { try { return Exceptions . handleInterruptedCall ( ( ) -> bookKeeper . openLedgerNoRecovery ( ledgerId , LEDGER_DIGEST_TYPE , config . getBKPassword ( ) ) ) ; } catch ( BKException bkEx ) { throw new DurableDataLogException ( String . format ( "Unable to open-read ledger %d." , ledgerId ) , bkEx ) ; } }
Opens a ledger for reading . This operation does not fence out the ledger .
23,577
static void close ( LedgerHandle handle ) throws DurableDataLogException { try { Exceptions . handleInterrupted ( handle :: close ) ; } catch ( BKException bkEx ) { throw new DurableDataLogException ( String . format ( "Unable to close ledger %d." , handle . getId ( ) ) , bkEx ) ; } }
Closes the given LedgerHandle .
23,578
static void delete ( long ledgerId , BookKeeper bookKeeper ) throws DurableDataLogException { try { Exceptions . handleInterrupted ( ( ) -> bookKeeper . deleteLedger ( ledgerId ) ) ; } catch ( BKException bkEx ) { throw new DurableDataLogException ( String . format ( "Unable to delete Ledger %d." , ledgerId ) , bkEx ) ; } }
Deletes the Ledger with given LedgerId .
23,579
static Map < Long , Long > fenceOut ( List < LedgerMetadata > ledgers , BookKeeper bookKeeper , BookKeeperConfig config , String traceObjectId ) throws DurableDataLogException { int nonEmptyCount = 0 ; val result = new HashMap < Long , Long > ( ) ; val iterator = ledgers . listIterator ( ledgers . size ( ) ) ; while ( iterator . hasPrevious ( ) && ( nonEmptyCount < MIN_FENCE_LEDGER_COUNT ) ) { LedgerMetadata ledgerMetadata = iterator . previous ( ) ; LedgerHandle handle = openFence ( ledgerMetadata . getLedgerId ( ) , bookKeeper , config ) ; if ( handle . getLastAddConfirmed ( ) != NO_ENTRY_ID ) { nonEmptyCount ++ ; } if ( ledgerMetadata . getStatus ( ) == LedgerMetadata . Status . Unknown ) { result . put ( ledgerMetadata . getLedgerId ( ) , handle . getLastAddConfirmed ( ) ) ; } close ( handle ) ; log . info ( "{}: Fenced out Ledger {}." , traceObjectId , ledgerMetadata ) ; } return result ; }
Fences out a Log made up of the given ledgers .
23,580
protected void startUp ( ) { long traceId = LoggerHelpers . traceEnterWithContext ( log , this . objectId , "startUp" ) ; try { log . info ( "Starting REST server listening on port: {}" , this . restServerConfig . getPort ( ) ) ; if ( restServerConfig . isTlsEnabled ( ) ) { SSLContextConfigurator contextConfigurator = new SSLContextConfigurator ( ) ; contextConfigurator . setKeyStoreFile ( restServerConfig . getKeyFilePath ( ) ) ; contextConfigurator . setKeyStorePass ( JKSHelper . loadPasswordFrom ( restServerConfig . getKeyFilePasswordPath ( ) ) ) ; httpServer = GrizzlyHttpServerFactory . createHttpServer ( baseUri , resourceConfig , true , new SSLEngineConfigurator ( contextConfigurator , false , false , false ) ) ; } else { httpServer = GrizzlyHttpServerFactory . createHttpServer ( baseUri , resourceConfig , true ) ; } } finally { LoggerHelpers . traceLeave ( log , this . objectId , "startUp" , traceId ) ; } }
Start REST service .
23,581
protected void shutDown ( ) throws Exception { long traceId = LoggerHelpers . traceEnterWithContext ( log , this . objectId , "shutDown" ) ; try { log . info ( "Stopping REST server listening on port: {}" , this . restServerConfig . getPort ( ) ) ; final GrizzlyFuture < HttpServer > shutdown = httpServer . shutdown ( 30 , TimeUnit . SECONDS ) ; log . info ( "Awaiting termination of REST server" ) ; shutdown . get ( ) ; log . info ( "REST server terminated" ) ; } finally { LoggerHelpers . traceLeave ( log , this . objectId , "shutDown" , traceId ) ; } }
Gracefully stop REST service .
23,582
public SegmentOutputStream getSegmentOutputStreamForKey ( String routingKey ) { if ( currentSegments == null ) { return null ; } return writers . get ( getSegmentForEvent ( routingKey ) ) ; }
Selects which segment an event should be written to .
23,583
public List < PendingEvent > refreshSegmentEventWriters ( Consumer < Segment > segmentSealedCallBack ) { log . info ( "Refreshing segments for stream {}" , stream ) ; return updateSegments ( Futures . getAndHandleExceptions ( controller . getCurrentSegments ( stream . getScope ( ) , stream . getStreamName ( ) ) , RuntimeException :: new ) , segmentSealedCallBack ) ; }
Refresh the latest list of segments in the given stream .
23,584
public < T > void register ( CompletableFuture < T > future ) { if ( future . isDone ( ) ) { return ; } boolean autoCancel = false ; synchronized ( this . futures ) { Preconditions . checkNotNull ( future , "future" ) ; if ( this . cancellationRequested ) { autoCancel = true ; } else { this . futures . add ( future ) ; } } if ( autoCancel ) { future . cancel ( true ) ; return ; } future . whenComplete ( ( r , ex ) -> { synchronized ( this . futures ) { this . futures . remove ( future ) ; } } ) ; }
Registers the given Future to the token .
23,585
public void requestCancellation ( ) { Collection < CompletableFuture < ? > > toInvoke ; synchronized ( this . futures ) { this . cancellationRequested = true ; toInvoke = new ArrayList < > ( this . futures ) ; } toInvoke . forEach ( f -> f . cancel ( true ) ) ; synchronized ( this . futures ) { this . futures . clear ( ) ; } }
Cancels all registered futures .
23,586
public static String buildRequestDescriptor ( String ... requestInfo ) { return Stream . of ( requestInfo ) . collect ( Collectors . joining ( INTER_FIELD_DELIMITER ) ) ; }
Creates a request descriptor or key to locate the client request id .
23,587
public void trackRequest ( String requestDescriptor , long requestId ) { Preconditions . checkNotNull ( requestDescriptor , "Attempting to track a null request descriptor." ) ; if ( ! tracingEnabled ) { return ; } synchronized ( lock ) { List < Long > requestIds = ongoingRequests . getIfPresent ( requestDescriptor ) ; if ( requestIds == null ) { requestIds = Collections . synchronizedList ( new ArrayList < > ( ) ) ; } requestIds . add ( requestId ) ; ongoingRequests . put ( requestDescriptor , requestIds ) ; } log . debug ( "Tracking request {} with id {}." , requestDescriptor , requestId ) ; }
Adds a request descriptor and request id pair in the cache if tracing is enabled . In the case of tracking a request with an existing descriptor this method adds the request id to the list associated to the descriptor .
23,588
public long untrackRequest ( String requestDescriptor ) { Preconditions . checkNotNull ( requestDescriptor , "Attempting to untrack a null request descriptor." ) ; if ( ! tracingEnabled ) { return RequestTag . NON_EXISTENT_ID ; } long removedRequestId ; List < Long > requestIds ; synchronized ( lock ) { requestIds = ongoingRequests . getIfPresent ( requestDescriptor ) ; if ( requestIds == null || requestIds . size ( ) == 0 ) { log . debug ( "Attempting to untrack a non-existing key: {}." , requestDescriptor ) ; return RequestTag . NON_EXISTENT_ID ; } if ( requestIds . size ( ) > 1 ) { removedRequestId = requestIds . remove ( requestIds . size ( ) - 1 ) ; log . debug ( "{} concurrent requests with same descriptor: {}. Untracking the last of them {}." , requestIds , requestDescriptor , removedRequestId ) ; ongoingRequests . put ( requestDescriptor , requestIds ) ; } else { ongoingRequests . invalidate ( requestDescriptor ) ; removedRequestId = requestIds . get ( 0 ) ; } } log . debug ( "Untracking request {} with id {}." , requestDescriptor , requestIds ) ; return removedRequestId ; }
Removes and returns a request id from an associated request descriptor . If the request descriptor does not exist or tracing is disabled a default request id is returned . In the case that there is only one request id the whole entry is evicted from cache . If there are multiple request ids for a given descriptor the last request id in the list is deleted from the cache .
23,589
public CompletableFuture < List < Map . Entry < UUID , Long > > > getNext ( ) { return this . indexIterator . getNext ( ) . thenApply ( this :: mix ) ; }
region AttributeIterator Implementation
23,590
public static ScalingPolicy fixed ( int numSegments ) { Preconditions . checkArgument ( numSegments > 0 , "Number of segments should be > 0." ) ; return new ScalingPolicy ( ScaleType . FIXED_NUM_SEGMENTS , 0 , 0 , numSegments ) ; }
Create a scaling policy to configure a stream to have a fixed number of segments .
23,591
void process ( Iterator < Operation > operations ) throws DataCorruptionException { HashSet < Long > segmentIds = new HashSet < > ( ) ; while ( operations . hasNext ( ) ) { Operation op = operations . next ( ) ; process ( op ) ; if ( op instanceof SegmentOperation ) { segmentIds . add ( ( ( SegmentOperation ) op ) . getStreamSegmentId ( ) ) ; } } if ( ! this . recoveryMode . get ( ) ) { this . readIndex . triggerFutureReads ( segmentIds ) ; if ( this . commitSuccess != null ) { this . commitSuccess . run ( ) ; } } }
Processes the given operations and applies them to the ReadIndex and InMemory OperationLog .
23,592
void process ( Operation operation ) throws DataCorruptionException { if ( operation instanceof StorageOperation ) { addToReadIndex ( ( StorageOperation ) operation ) ; if ( operation instanceof StreamSegmentAppendOperation ) { try { operation = new CachedStreamSegmentAppendOperation ( ( StreamSegmentAppendOperation ) operation ) ; } catch ( Throwable ex ) { if ( Exceptions . mustRethrow ( ex ) ) { throw ex ; } else { throw new DataCorruptionException ( String . format ( "Unable to create a CachedStreamSegmentAppendOperation from operation '%s'." , operation ) , ex ) ; } } } } boolean added = this . inMemoryOperationLog . add ( operation ) ; if ( ! added ) { throw new DataCorruptionException ( "About to have added a Log Operation to InMemoryOperationLog that was out of order." ) ; } }
Processes the given operation and applies it to the ReadIndex and InMemory OperationLog .
23,593
private void addToReadIndex ( StorageOperation operation ) { try { if ( operation instanceof StreamSegmentAppendOperation ) { StreamSegmentAppendOperation appendOperation = ( StreamSegmentAppendOperation ) operation ; this . readIndex . append ( appendOperation . getStreamSegmentId ( ) , appendOperation . getStreamSegmentOffset ( ) , appendOperation . getData ( ) ) ; } else if ( operation instanceof MergeSegmentOperation ) { MergeSegmentOperation mergeOperation = ( MergeSegmentOperation ) operation ; this . readIndex . beginMerge ( mergeOperation . getStreamSegmentId ( ) , mergeOperation . getStreamSegmentOffset ( ) , mergeOperation . getSourceSegmentId ( ) ) ; } else { assert ! ( operation instanceof CachedStreamSegmentAppendOperation ) : "attempted to add a CachedStreamSegmentAppendOperation to the ReadIndex" ; } } catch ( ObjectClosedException | StreamSegmentNotExistsException ex ) { log . warn ( "Not adding operation '{}' to ReadIndex because it refers to a deleted StreamSegment." , operation ) ; } }
Registers the given operation in the ReadIndex .
23,594
public < ReturnType > CompletableFuture < ReturnType > add ( Collection < KeyType > keys , Supplier < CompletableFuture < ? extends ReturnType > > toRun ) { Preconditions . checkArgument ( ! keys . isEmpty ( ) , "keys cannot be empty." ) ; CompletableFuture < ReturnType > result = new CompletableFuture < > ( ) ; ArrayList < CompletableFuture < ? > > existingTasks = new ArrayList < > ( ) ; synchronized ( this . queue ) { Exceptions . checkNotClosed ( this . closed , this ) ; for ( KeyType key : keys ) { CompletableFuture < ? > existingTask = this . queue . get ( key ) ; if ( existingTask != null ) { existingTasks . add ( existingTask ) ; } for ( val e : this . filterQueue . entrySet ( ) ) { if ( e . getKey ( ) . test ( key ) ) { existingTasks . add ( e . getValue ( ) ) ; } } } executeAfterIfNeeded ( existingTasks , toRun , result ) ; keys . forEach ( key -> this . queue . put ( key , result ) ) ; } executeNowIfNeeded ( existingTasks , toRun , result ) ; result . whenComplete ( ( r , ex ) -> cleanup ( keys ) ) ; return result ; }
Queues up a new task to execute subject to the given dependency Keys .
23,595
public < ReturnType > CompletableFuture < ReturnType > addWithFilter ( Predicate < KeyType > keyFilter , Supplier < CompletableFuture < ? extends ReturnType > > toRun ) { CompletableFuture < ReturnType > result = new CompletableFuture < > ( ) ; ArrayList < CompletableFuture < ? > > existingTasks = new ArrayList < > ( ) ; synchronized ( this . queue ) { Exceptions . checkNotClosed ( this . closed , this ) ; for ( val e : this . queue . entrySet ( ) ) { if ( keyFilter . test ( e . getKey ( ) ) ) { existingTasks . add ( e . getValue ( ) ) ; } } executeAfterIfNeeded ( existingTasks , toRun , result ) ; this . filterQueue . put ( keyFilter , result ) ; } executeNowIfNeeded ( existingTasks , toRun , result ) ; result . whenComplete ( ( r , ex ) -> cleanupFilter ( keyFilter ) ) ; return result ; }
Queues up a new task to execute subject to the dependency keys that match the given filter .
23,596
public static boolean overlaps ( final Map . Entry < Double , Double > first , final Map . Entry < Double , Double > second ) { return second . getValue ( ) > first . getKey ( ) && second . getKey ( ) < first . getValue ( ) ; }
Method to check if two segment overlaps .
23,597
public CompletableFuture < T > getNext ( ) { return getNextBucket ( ) . thenCompose ( bucket -> { if ( bucket == null ) { return CompletableFuture . completedFuture ( null ) ; } else { return this . resultConverter . apply ( bucket ) ; } } ) ; }
region AsyncIterator Implementation
23,598
public static RevisionDataOutputStream wrap ( OutputStream outputStream ) throws IOException { if ( outputStream instanceof RandomAccessOutputStream ) { return new RandomRevisionDataOutput ( outputStream ) ; } else { return new NonSeekableRevisionDataOutput ( outputStream ) ; } }
Wraps the given OutputStream into a specific implementation of RevisionDataOutputStream .
23,599
public int getUTFLength ( String s ) { int charCount = s . length ( ) ; int length = 2 ; for ( int i = 0 ; i < charCount ; ++ i ) { char c = s . charAt ( i ) ; if ( c >= 1 && c <= 127 ) { length ++ ; } else if ( c > 2047 ) { length += 3 ; } else { length += 2 ; } } return length ; }
region RevisionDataOutput Implementation