idx int64 0 41.2k | question stringlengths 73 5.81k | target stringlengths 5 918 |
|---|---|---|
23,100 | synchronized List < Write > close ( ) { List < Write > items = new ArrayList < > ( this . writes ) ; this . writes . clear ( ) ; this . totalLength = 0 ; this . closed = true ; return items ; } | Clears the queue of all the items and closes it preventing any new writes from being added . |
23,101 | public static StatsdConfig createStatsDConfig ( MetricsConfig conf ) { log . info ( "Configuring stats with statsD at {}:{}" , conf . getStatsDHost ( ) , conf . getStatsDPort ( ) ) ; return new StatsdConfig ( ) { public Duration step ( ) { return Duration . ofSeconds ( conf . getOutputFrequencySeconds ( ) . getSeconds ( ) ) ; } public String prefix ( ) { return conf . getMetricsPrefix ( ) ; } public String host ( ) { return conf . getStatsDHost ( ) ; } public int port ( ) { return conf . getStatsDPort ( ) ; } public StatsdFlavor flavor ( ) { return StatsdFlavor . TELEGRAF ; } public String get ( String key ) { return null ; } } ; } | Create StatsdConfig for StatsD Register . |
23,102 | public static InfluxConfig createInfluxConfig ( MetricsConfig conf ) { log . info ( "Configuring stats with direct InfluxDB at {}" , conf . getInfluxDBUri ( ) ) ; return new InfluxConfig ( ) { public Duration step ( ) { return Duration . ofSeconds ( conf . getOutputFrequencySeconds ( ) . getSeconds ( ) ) ; } public String prefix ( ) { return conf . getMetricsPrefix ( ) ; } public String uri ( ) { return conf . getInfluxDBUri ( ) ; } public String db ( ) { return conf . getInfluxDBName ( ) ; } public String userName ( ) { return conf . getInfluxDBUserName ( ) ; } public String password ( ) { return conf . getInfluxDBPassword ( ) ; } public String retentionPolicy ( ) { return conf . getInfluxDBRetention ( ) ; } public String get ( String k ) { return null ; } } ; } | Create InfluxConfig for InfluxDB Register . |
23,103 | public static int readAll ( InputStream stream , byte [ ] target , int startOffset , int maxLength ) throws IOException { Preconditions . checkNotNull ( stream , "stream" ) ; Preconditions . checkNotNull ( stream , "target" ) ; Preconditions . checkElementIndex ( startOffset , target . length , "startOffset" ) ; Exceptions . checkArgument ( maxLength >= 0 , "maxLength" , "maxLength must be a non-negative number." ) ; int totalBytesRead = 0 ; while ( totalBytesRead < maxLength ) { int bytesRead = stream . read ( target , startOffset + totalBytesRead , maxLength - totalBytesRead ) ; if ( bytesRead < 0 ) { break ; } totalBytesRead += bytesRead ; } return totalBytesRead ; } | Reads at most maxLength bytes from the given input stream as long as the stream still has data to serve . |
23,104 | public static byte [ ] readAll ( InputStream source , int length ) throws IOException { byte [ ] ret = new byte [ length ] ; int readBytes = readAll ( source , ret , 0 , ret . length ) ; if ( readBytes < ret . length ) { throw new EOFException ( String . format ( "Was only able to read %d bytes, which is less than the requested length of %d." , readBytes , ret . length ) ) ; } return ret ; } | Reads a number of bytes from the given InputStream and returns it as the given byte array . |
23,105 | private CompletableFuture < String > getId ( ) { String id = this . idRef . get ( ) ; if ( ! Strings . isNullOrEmpty ( id ) ) { return CompletableFuture . completedFuture ( id ) ; } else { return Futures . exceptionallyExpecting ( getStreamPosition ( ) . thenApply ( pos -> { String s = pos . toString ( ) ; this . idRef . compareAndSet ( null , s ) ; return s ; } ) , e -> Exceptions . unwrap ( e ) instanceof StoreException . DataNotFoundException , "" ) ; } } | Method to retrieve unique Id for the stream . We use streamPosition as the unique id for the stream . If the id had been previously retrieved then this method simply returns the previous value else it retrieves the stored stream position from zookeeper . The id of a stream is fixed for lifecycle of a stream and only changes when the stream is deleted and recreated . The id is used for caching entities and safeguarding against stream recreation . |
23,106 | public WriterFlushResult withFlushResult ( WriterFlushResult flushResult ) { this . flushedBytes . addAndGet ( flushResult . flushedBytes . get ( ) ) ; this . mergedBytes . addAndGet ( flushResult . mergedBytes . get ( ) ) ; this . flushedAttributes . addAndGet ( flushResult . flushedAttributes . get ( ) ) ; return this ; } | Adds the given WriterFlushResult to this one . |
23,107 | void setWriteLedger ( WriteLedger writeLedger ) { this . writeLedger . set ( writeLedger ) ; this . entryId . set ( Long . MIN_VALUE ) ; } | Sets the WriteLedger to be associated with this write . |
23,108 | Timer complete ( ) { Preconditions . checkState ( this . entryId . get ( ) >= 0 , "entryId not set; cannot complete Write." ) ; this . failureCause . set ( null ) ; this . result . complete ( new LedgerAddress ( this . writeLedger . get ( ) . metadata , this . entryId . get ( ) ) ) ; return endAttempt ( ) ; } | Indicates that this write completed successfully . This will set the final result on the externalCompletion future . |
23,109 | void fail ( Throwable cause , boolean complete ) { if ( cause != null ) { Throwable e = this . failureCause . get ( ) ; if ( e != null && e != cause ) { cause . addSuppressed ( e ) ; } this . failureCause . set ( cause ) ; } endAttempt ( ) ; WriteLedger ledger = this . writeLedger . get ( ) ; if ( ledger != null && ledger . isRolledOver ( ) ) { this . attemptCount . updateAndGet ( v -> Math . max ( 0 , v - 1 ) ) ; } if ( complete ) { this . result . completeExceptionally ( this . failureCause . get ( ) ) ; } } | Indicates that this write failed . |
23,110 | public CompletableFuture < Void > execute ( CommitEvent event ) { String scope = event . getScope ( ) ; String stream = event . getStream ( ) ; OperationContext context = streamMetadataStore . createContext ( scope , stream ) ; log . debug ( "Attempting to commit available transactions on stream {}/{}" , event . getScope ( ) , event . getStream ( ) ) ; CompletableFuture < Void > future = new CompletableFuture < > ( ) ; tryCommitTransactions ( scope , stream , context ) . whenComplete ( ( r , e ) -> { if ( e != null ) { Throwable cause = Exceptions . unwrap ( e ) ; if ( cause instanceof StoreException . OperationNotAllowedException ) { log . debug ( "Cannot commit transaction on stream {}/{}. Postponing" , scope , stream ) ; } else { log . error ( "Exception while attempting to commit transaction on stream {}/{}" , scope , stream , e ) ; } future . completeExceptionally ( cause ) ; } else { if ( r >= 0 ) { log . debug ( "Successfully committed transactions on epoch {} on stream {}/{}" , r , scope , stream ) ; } else { log . debug ( "No transactions found in committing state on stream {}/{}" , r , scope , stream ) ; } if ( processedEvents != null ) { try { processedEvents . offer ( event ) ; } catch ( Exception ex ) { } } future . complete ( null ) ; } } ) ; return future ; } | This method attempts to collect all transactions in the epoch that are marked for commit and decides if they can be committed in active epoch or if it needs to roll the transactions . |
23,111 | private CompletableFuture < Integer > tryCommitTransactions ( final String scope , final String stream , final OperationContext context ) { return streamMetadataStore . getVersionedState ( scope , stream , context , executor ) . thenComposeAsync ( state -> { final AtomicReference < VersionedMetadata < State > > stateRecord = new AtomicReference < > ( state ) ; CompletableFuture < VersionedMetadata < CommittingTransactionsRecord > > commitFuture = streamMetadataStore . startCommitTransactions ( scope , stream , context , executor ) . thenComposeAsync ( versionedMetadata -> { if ( versionedMetadata . getObject ( ) . equals ( CommittingTransactionsRecord . EMPTY ) ) { return CompletableFuture . completedFuture ( versionedMetadata ) ; } else { int txnEpoch = versionedMetadata . getObject ( ) . getEpoch ( ) ; List < UUID > txnList = versionedMetadata . getObject ( ) . getTransactionsToCommit ( ) ; CompletableFuture < Void > future ; if ( state . getObject ( ) . equals ( State . SEALING ) ) { future = CompletableFuture . completedFuture ( null ) ; } else { future = streamMetadataStore . updateVersionedState ( scope , stream , State . COMMITTING_TXN , state , context , executor ) . thenAccept ( stateRecord :: set ) ; } return future . thenCompose ( v -> getEpochRecords ( scope , stream , txnEpoch , context ) . thenCompose ( records -> { EpochRecord txnEpochRecord = records . get ( 0 ) ; EpochRecord activeEpochRecord = records . get ( 1 ) ; if ( activeEpochRecord . getEpoch ( ) == txnEpoch || activeEpochRecord . getReferenceEpoch ( ) == txnEpochRecord . getReferenceEpoch ( ) ) { return commitTransactions ( scope , stream , new ArrayList < > ( activeEpochRecord . getSegmentIds ( ) ) , txnList ) . thenApply ( x -> versionedMetadata ) ; } else { return rollTransactions ( scope , stream , txnEpochRecord , activeEpochRecord , versionedMetadata , context ) ; } } ) ) ; } } , executor ) ; return commitFuture . thenCompose ( versionedMetadata -> streamMetadataStore . completeCommitTransactions ( scope , stream , versionedMetadata , context , executor ) . thenCompose ( v -> resetStateConditionally ( scope , stream , stateRecord . get ( ) , context ) ) . thenApply ( v -> versionedMetadata . getObject ( ) . getEpoch ( ) ) ) ; } , executor ) ; } | Try creating txn commit list first . if node already exists and doesn t match the processing in the event throw operation not allowed . This will result in event being posted back in the stream and retried later . Generally if a transaction commit starts it will come to an end . However during failover once we have created the node we are guaranteed that it will be only that transaction that will be getting committed at that time . |
23,112 | private CompletableFuture < Void > copyTxnEpochSegmentsAndCommitTxns ( String scope , String stream , List < UUID > transactionsToCommit , List < Long > segmentIds ) { String delegationToken = streamMetadataTasks . retrieveDelegationToken ( ) ; CompletableFuture < Void > createSegmentsFuture = Futures . allOf ( segmentIds . stream ( ) . map ( segment -> { return streamMetadataTasks . notifyNewSegment ( scope , stream , segment , ScalingPolicy . fixed ( 1 ) , delegationToken ) ; } ) . collect ( Collectors . toList ( ) ) ) ; return createSegmentsFuture . thenCompose ( v -> { log . debug ( "Rolling transaction, successfully created duplicate txn epoch {} for stream {}/{}" , segmentIds , scope , stream ) ; return commitTransactions ( scope , stream , segmentIds , transactionsToCommit ) ; } ) . thenCompose ( v -> streamMetadataTasks . notifySealedSegments ( scope , stream , segmentIds , delegationToken ) ) ; } | This method is called in the rolling transaction flow . This method creates duplicate segments for transaction epoch . It then merges all transactions from the list into those duplicate segments . |
23,113 | private CompletableFuture < Void > commitTransactions ( String scope , String stream , List < Long > segments , List < UUID > transactionsToCommit ) { CompletableFuture < Void > future = CompletableFuture . completedFuture ( null ) ; for ( UUID txnId : transactionsToCommit ) { log . debug ( "Committing transaction {} on stream {}/{}" , txnId , scope , stream ) ; future = future . thenCompose ( v -> streamMetadataTasks . notifyTxnCommit ( scope , stream , segments , txnId ) ) ; } return future ; } | This method loops over each transaction in the list and commits them in order At the end of this method s execution all transactions in the list would have committed into given list of segments . |
23,114 | private CompletableFuture < List < EpochRecord > > getEpochRecords ( String scope , String stream , int epoch , OperationContext context ) { List < CompletableFuture < EpochRecord > > list = new ArrayList < > ( ) ; list . add ( streamMetadataStore . getEpoch ( scope , stream , epoch , context , executor ) ) ; list . add ( streamMetadataStore . getActiveEpoch ( scope , stream , context , true , executor ) ) ; return Futures . allOfWithResults ( list ) ; } | Fetches epoch history records for active epoch and the supplied epoch from the store . |
23,115 | void close ( boolean cleanCache ) { if ( ! this . closed ) { this . closed = true ; this . storageReadManager . close ( ) ; ArrayList < Iterator < FutureReadResultEntry > > futureReads = new ArrayList < > ( ) ; futureReads . add ( this . futureReads . close ( ) . iterator ( ) ) ; synchronized ( this . lock ) { this . pendingMergers . values ( ) . forEach ( pm -> futureReads . add ( pm . seal ( ) . iterator ( ) ) ) ; } cancelFutureReads ( Iterators . concat ( futureReads . iterator ( ) ) ) ; if ( cleanCache ) { this . executor . execute ( ( ) -> { removeAllEntries ( ) ; log . info ( "{}: Closed." , this . traceObjectId ) ; } ) ; } else { log . info ( "{}: Closed (no cache cleanup)." , this . traceObjectId ) ; } } } | Closes the ReadIndex and optionally cleans the cache . |
23,116 | private void removeAllEntries ( ) { Preconditions . checkState ( this . closed , "Cannot call removeAllEntries unless the ReadIndex is closed." ) ; int count ; synchronized ( this . lock ) { this . indexEntries . forEach ( entry -> { if ( entry . isDataEntry ( ) ) { CacheKey key = getCacheKey ( entry ) ; this . cache . remove ( key ) ; } } ) ; count = this . indexEntries . size ( ) ; this . indexEntries . clear ( ) ; } if ( count > 0 ) { log . debug ( "{}: Cleared all cache entries ({})." , this . traceObjectId , count ) ; } } | Removes all entries from the cache and the SortedIndex regardless of their state . |
23,117 | void markMerged ( ) { Exceptions . checkNotClosed ( this . closed , this ) ; Preconditions . checkState ( ! this . merged , "StreamSegmentReadIndex %d is already merged." , this . metadata . getId ( ) ) ; log . debug ( "{}: Merged." , this . traceObjectId ) ; this . merged = true ; } | Marks this Read Index as merged into another one . |
23,118 | void append ( long offset , byte [ ] data ) { Exceptions . checkNotClosed ( this . closed , this ) ; Preconditions . checkState ( ! isMerged ( ) , "StreamSegment has been merged into a different one. Cannot append more ReadIndex entries." ) ; if ( data . length == 0 ) { return ; } long length = this . metadata . getLength ( ) ; long endOffset = offset + data . length ; Exceptions . checkArgument ( endOffset <= length , "offset" , "The given range of bytes (%d-%d) is beyond the StreamSegment Length (%d)." , offset , endOffset , length ) ; this . cache . insert ( new CacheKey ( this . metadata . getId ( ) , offset ) , data ) ; appendEntry ( new CacheIndexEntry ( offset , data . length ) ) ; } | Appends the given range of bytes at the given offset . |
23,119 | void completeMerge ( SegmentMetadata sourceMetadata ) { long traceId = LoggerHelpers . traceEnterWithContext ( log , this . traceObjectId , "completeMerge" , sourceMetadata . getId ( ) ) ; Exceptions . checkNotClosed ( this . closed , this ) ; Exceptions . checkArgument ( sourceMetadata . isDeleted ( ) , "sourceSegmentStreamId" , "Given StreamSegmentReadIndex refers to a StreamSegment that has not been deleted yet." ) ; if ( sourceMetadata . getLength ( ) == 0 ) { return ; } RedirectIndexEntry redirectEntry ; PendingMerge pendingMerge ; synchronized ( this . lock ) { pendingMerge = this . pendingMergers . getOrDefault ( sourceMetadata . getId ( ) , null ) ; Exceptions . checkArgument ( pendingMerge != null , "sourceSegmentStreamId" , "Given StreamSegmentReadIndex's merger with this one has not been initiated using beginMerge. Cannot finalize the merger." ) ; ReadIndexEntry indexEntry = this . indexEntries . get ( pendingMerge . getMergeOffset ( ) ) ; assert indexEntry != null && ! indexEntry . isDataEntry ( ) : String . format ( "pendingMergers points to a ReadIndexEntry that does not exist or is of the wrong type. sourceStreamSegmentId = %d, offset = %d, treeEntry = %s." , sourceMetadata . getId ( ) , pendingMerge . getMergeOffset ( ) , indexEntry ) ; redirectEntry = ( RedirectIndexEntry ) indexEntry ; } StreamSegmentReadIndex sourceIndex = redirectEntry . getRedirectReadIndex ( ) ; List < MergedIndexEntry > sourceEntries = sourceIndex . getAllEntries ( redirectEntry . getStreamSegmentOffset ( ) ) ; synchronized ( this . lock ) { this . indexEntries . remove ( pendingMerge . getMergeOffset ( ) ) ; this . pendingMergers . remove ( sourceMetadata . getId ( ) ) ; sourceEntries . forEach ( this :: addToIndex ) ; } List < FutureReadResultEntry > pendingReads = pendingMerge . seal ( ) ; if ( pendingReads . size ( ) > 0 ) { log . debug ( "{}: triggerFutureReads for Pending Merge (Count = {}, MergeOffset = {}, MergeLength = {})." , this . traceObjectId , pendingReads . size ( ) , pendingMerge . getMergeOffset ( ) , sourceIndex . getSegmentLength ( ) ) ; triggerFutureReads ( pendingReads ) ; } LoggerHelpers . traceLeave ( log , this . traceObjectId , "completeMerge" , traceId ) ; } | Executes Step 2 of the 2 - Step Merge Process . The StreamSegments are physically merged in the Storage . The Source StreamSegment does not exist anymore . The ReadIndex entries of the two Streams are actually joined together . |
23,120 | void triggerFutureReads ( ) { Exceptions . checkNotClosed ( this . closed , this ) ; Preconditions . checkState ( ! this . recoveryMode , "StreamSegmentReadIndex is in Recovery Mode." ) ; boolean sealed = this . metadata . isSealed ( ) ; Collection < FutureReadResultEntry > futureReads ; if ( sealed ) { futureReads = this . futureReads . pollAll ( ) ; log . debug ( "{}: triggerFutureReads (Count = {}, Offset = {}, Sealed = True)." , this . traceObjectId , futureReads . size ( ) , this . metadata . getLength ( ) ) ; } else { ReadIndexEntry lastEntry ; synchronized ( this . lock ) { lastEntry = this . indexEntries . getLast ( ) ; } if ( lastEntry == null ) { return ; } futureReads = this . futureReads . poll ( lastEntry . getLastStreamSegmentOffset ( ) ) ; log . debug ( "{}: triggerFutureReads (Count = {}, Offset = {}, Sealed = False)." , this . traceObjectId , futureReads . size ( ) , lastEntry . getLastStreamSegmentOffset ( ) ) ; } triggerFutureReads ( futureReads ) ; } | Triggers all future reads that have a starting offset before the given value . |
23,121 | private void triggerFutureReads ( Collection < FutureReadResultEntry > futureReads ) { for ( FutureReadResultEntry r : futureReads ) { ReadResultEntry entry = getSingleReadResultEntry ( r . getStreamSegmentOffset ( ) , r . getRequestedReadLength ( ) ) ; assert entry != null : "Serving a StorageReadResultEntry with a null result" ; assert ! ( entry instanceof FutureReadResultEntry ) : "Serving a FutureReadResultEntry with another FutureReadResultEntry." ; log . trace ( "{}: triggerFutureReads (Offset = {}, Type = {})." , this . traceObjectId , r . getStreamSegmentOffset ( ) , entry . getType ( ) ) ; if ( entry . getType ( ) == ReadResultEntryType . EndOfStreamSegment ) { r . fail ( new StreamSegmentSealedException ( String . format ( "StreamSegment has been sealed at offset %d. There can be no more reads beyond this offset." , this . metadata . getLength ( ) ) ) ) ; } else { if ( ! entry . getContent ( ) . isDone ( ) ) { entry . requestContent ( this . config . getStorageReadDefaultTimeout ( ) ) ; } CompletableFuture < ReadResultEntryContents > entryContent = entry . getContent ( ) ; entryContent . thenAccept ( r :: complete ) ; Futures . exceptionListener ( entryContent , r :: fail ) ; } } } | Triggers all the Future Reads in the given collection . |
23,122 | ReadResult read ( long startOffset , int maxLength , Duration timeout ) { Exceptions . checkNotClosed ( this . closed , this ) ; Preconditions . checkState ( ! this . recoveryMode , "StreamSegmentReadIndex is in Recovery Mode." ) ; Exceptions . checkArgument ( startOffset >= 0 , "startOffset" , "startOffset must be a non-negative number." ) ; Exceptions . checkArgument ( maxLength >= 0 , "maxLength" , "maxLength must be a non-negative number." ) ; Exceptions . checkArgument ( checkReadAvailability ( startOffset , true ) != ReadAvailability . BeyondLastOffset , "startOffset" , "StreamSegment is sealed and startOffset is beyond the last offset of the StreamSegment." ) ; log . debug ( "{}: Read (Offset = {}, MaxLength = {})." , this . traceObjectId , startOffset , maxLength ) ; return new StreamSegmentReadResult ( startOffset , maxLength , this :: getMultiReadResultEntry , this . traceObjectId ) ; } | Reads a range of bytes from the StreamSegment . |
23,123 | private ReadAvailability checkReadAvailability ( long offset , boolean lastOffsetInclusive ) { if ( offset < this . metadata . getStartOffset ( ) ) { return ReadAvailability . BeforeStartOffset ; } else if ( this . metadata . isSealed ( ) ) { return offset < ( this . metadata . getLength ( ) + ( lastOffsetInclusive ? 1 : 0 ) ) ? ReadAvailability . Available : ReadAvailability . BeyondLastOffset ; } return ReadAvailability . Available ; } | Determines the availability of reading at a particular offset given the state of a segment . |
23,124 | private CompletableReadResultEntry getSingleReadResultEntry ( long resultStartOffset , int maxLength ) { Exceptions . checkNotClosed ( this . closed , this ) ; if ( maxLength < 0 ) { return null ; } CompletableReadResultEntry result = null ; ReadAvailability ra = checkReadAvailability ( resultStartOffset , false ) ; if ( ra == ReadAvailability . BeyondLastOffset ) { result = new EndOfStreamSegmentReadResultEntry ( resultStartOffset , maxLength ) ; } else if ( ra == ReadAvailability . BeforeStartOffset ) { result = new TruncatedReadResultEntry ( resultStartOffset , maxLength , this . metadata . getStartOffset ( ) ) ; } else { synchronized ( this . lock ) { ReadIndexEntry indexEntry = this . indexEntries . getFloor ( resultStartOffset ) ; if ( indexEntry == null ) { result = createDataNotAvailableRead ( resultStartOffset , maxLength ) ; } else { if ( resultStartOffset > indexEntry . getLastStreamSegmentOffset ( ) ) { result = createDataNotAvailableRead ( resultStartOffset , maxLength ) ; } else if ( indexEntry . isDataEntry ( ) ) { result = createMemoryRead ( indexEntry , resultStartOffset , maxLength , true ) ; } else if ( indexEntry instanceof RedirectIndexEntry ) { result = createRedirectedRead ( resultStartOffset , maxLength , ( RedirectIndexEntry ) indexEntry ) ; } } } } assert result != null : String . format ( "Reached the end of getSingleReadResultEntry(id=%d, offset=%d, length=%d) " + "with no plausible result in sight. This means we missed a case." , this . metadata . getId ( ) , resultStartOffset , maxLength ) ; return result ; } | Returns the first ReadResultEntry that matches the specified search parameters . |
23,125 | private ReadResultEntryBase createDataNotAvailableRead ( long streamSegmentOffset , int maxLength ) { maxLength = getLengthUntilNextEntry ( streamSegmentOffset , maxLength ) ; long storageLength = this . metadata . getStorageLength ( ) ; if ( streamSegmentOffset < storageLength ) { long actualReadLength = storageLength - streamSegmentOffset ; if ( actualReadLength > maxLength ) { actualReadLength = maxLength ; } return createStorageRead ( streamSegmentOffset , ( int ) actualReadLength ) ; } else { return createFutureRead ( streamSegmentOffset , maxLength ) ; } } | Creates a ReadResultEntry that is a placeholder for data that is not currently available in memory . |
23,126 | @ GuardedBy ( "lock" ) private CacheReadResultEntry createMemoryRead ( ReadIndexEntry entry , long streamSegmentOffset , int maxLength , boolean updateStats ) { assert streamSegmentOffset >= entry . getStreamSegmentOffset ( ) : String . format ( "streamSegmentOffset{%d} < entry.getStreamSegmentOffset{%d}" , streamSegmentOffset , entry . getStreamSegmentOffset ( ) ) ; int entryOffset = ( int ) ( streamSegmentOffset - entry . getStreamSegmentOffset ( ) ) ; int length = ( int ) Math . min ( maxLength , entry . getLength ( ) - entryOffset ) ; assert length > 0 : String . format ( "length{%d} <= 0. streamSegmentOffset = %d, maxLength = %d, entry.offset = %d, entry.length = %d" , length , streamSegmentOffset , maxLength , entry . getStreamSegmentOffset ( ) , entry . getLength ( ) ) ; byte [ ] data = this . cache . get ( getCacheKey ( entry ) ) ; assert data != null : String . format ( "No Cache Entry could be retrieved for entry %s" , entry ) ; if ( updateStats ) { int generation = this . summary . touchOne ( entry . getGeneration ( ) ) ; entry . setGeneration ( generation ) ; } return new CacheReadResultEntry ( entry . getStreamSegmentOffset ( ) , data , entryOffset , length ) ; } | Creates a ReadResultEntry for data that is readily available in memory . |
23,127 | @ GuardedBy ( "lock" ) private int getLengthUntilNextEntry ( long startOffset , int maxLength ) { ReadIndexEntry ceilingEntry = this . indexEntries . getCeiling ( startOffset ) ; if ( ceilingEntry != null ) { maxLength = ( int ) Math . min ( maxLength , ceilingEntry . getStreamSegmentOffset ( ) - startOffset ) ; } return maxLength ; } | Returns the length from the given offset until the beginning of the next index entry . If no such entry exists or if the length is greater than maxLength then maxLength is returned . |
23,128 | private int getReadAlignedLength ( long offset , int readLength ) { int lengthSinceLastMultiple = ( int ) ( offset % this . config . getStorageReadAlignment ( ) ) ; return Math . min ( readLength , this . config . getStorageReadAlignment ( ) - lengthSinceLastMultiple ) ; } | Returns an adjusted read length based on the given input making sure the end of the Read Request is aligned with a multiple of STORAGE_READ_MAX_LEN . |
23,129 | private ReadResultEntryBase createFutureRead ( long streamSegmentOffset , int maxLength ) { FutureReadResultEntry entry = new FutureReadResultEntry ( streamSegmentOffset , maxLength ) ; this . futureReads . add ( entry ) ; return entry ; } | Creates a ReadResultEntry that is a placeholder for data that is not in memory or in storage which has a starting offset beyond the length of the StreamSegment . |
23,130 | private List < MergedIndexEntry > getAllEntries ( long offsetAdjustment ) { Exceptions . checkArgument ( offsetAdjustment >= 0 , "offsetAdjustment" , "offsetAdjustment must be a non-negative number." ) ; synchronized ( this . lock ) { List < MergedIndexEntry > result = new ArrayList < > ( this . indexEntries . size ( ) ) ; this . indexEntries . forEach ( entry -> { if ( entry . isDataEntry ( ) ) { result . add ( new MergedIndexEntry ( entry . getStreamSegmentOffset ( ) + offsetAdjustment , this . metadata . getId ( ) , ( CacheIndexEntry ) entry ) ) ; } } ) ; return result ; } } | Gets a copy of all the ReadIndexEntries in this Index that are not RedirectReadIndices . All returned entries have their offsets adjusted by the given amount . |
23,131 | private CompletableFuture < Void > waitForCheckpointComplete ( String checkpointName , ScheduledExecutorService backgroundExecutor ) { AtomicBoolean checkpointPending = new AtomicBoolean ( true ) ; return Futures . loop ( checkpointPending :: get , ( ) -> { return Futures . delayedTask ( ( ) -> { synchronizer . fetchUpdates ( ) ; checkpointPending . set ( ! synchronizer . getState ( ) . isCheckpointComplete ( checkpointName ) ) ; if ( checkpointPending . get ( ) ) { log . debug ( "Waiting on checkpoint: {} currentState is: {}" , checkpointName , synchronizer . getState ( ) ) ; } return null ; } , Duration . ofMillis ( 500 ) , backgroundExecutor ) ; } , backgroundExecutor ) ; } | Periodically check the state synchronizer if the given Checkpoint is complete . |
23,132 | public void resetReadersToCheckpoint ( Checkpoint checkpoint ) { synchronizer . updateState ( ( state , updates ) -> { ReaderGroupConfig config = state . getConfig ( ) ; Map < Segment , Long > positions = new HashMap < > ( ) ; for ( StreamCut cut : checkpoint . asImpl ( ) . getPositions ( ) . values ( ) ) { positions . putAll ( cut . asImpl ( ) . getPositions ( ) ) ; } updates . add ( new ReaderGroupStateInit ( config , positions , getEndSegmentsForStreams ( config ) ) ) ; } ) ; } | Used to reset a reset a reader group to a checkpoint . This should be removed in time . |
23,133 | CompletableFuture < Void > deletePath ( final String path , final boolean deleteEmptyContainer ) { final CompletableFuture < Void > result = new CompletableFuture < > ( ) ; final CompletableFuture < Void > deleteNode = new CompletableFuture < > ( ) ; try { client . delete ( ) . inBackground ( callback ( event -> deleteNode . complete ( null ) , e -> { if ( e instanceof StoreException . DataNotFoundException ) { deleteNode . complete ( null ) ; } else { deleteNode . completeExceptionally ( e ) ; } } , path ) , executor ) . forPath ( path ) ; } catch ( Exception e ) { deleteNode . completeExceptionally ( StoreException . create ( StoreException . Type . UNKNOWN , e , path ) ) ; } deleteNode . whenComplete ( ( res , ex ) -> { if ( ex != null ) { result . completeExceptionally ( ex ) ; } else if ( deleteEmptyContainer ) { final String container = ZKPaths . getPathAndNode ( path ) . getPath ( ) ; try { client . delete ( ) . inBackground ( callback ( event -> result . complete ( null ) , e -> { if ( e instanceof StoreException . DataNotFoundException ) { result . complete ( null ) ; } else if ( e instanceof StoreException . DataNotEmptyException ) { result . complete ( null ) ; } else { result . completeExceptionally ( e ) ; } } , path ) , executor ) . forPath ( container ) ; } catch ( Exception e ) { result . completeExceptionally ( StoreException . create ( StoreException . Type . UNKNOWN , e , path ) ) ; } } else { result . complete ( null ) ; } } ) ; return result ; } | region curator client store access |
23,134 | public < T > CompletableFuture < VersionedMetadata < T > > getData ( final String path , Function < byte [ ] , T > fromBytes ) { final CompletableFuture < VersionedMetadata < T > > result = new CompletableFuture < > ( ) ; try { client . getData ( ) . inBackground ( callback ( event -> { try { T deserialized = fromBytes . apply ( event . getData ( ) ) ; result . complete ( new VersionedMetadata < > ( deserialized , new Version . IntVersion ( event . getStat ( ) . getVersion ( ) ) ) ) ; } catch ( Exception e ) { log . error ( "Exception thrown while deserializing the data" , e ) ; result . completeExceptionally ( e ) ; } } , result :: completeExceptionally , path ) , executor ) . forPath ( path ) ; } catch ( Exception e ) { result . completeExceptionally ( StoreException . create ( StoreException . Type . UNKNOWN , e , path ) ) ; } return result ; } | Method to retrieve an entity from zookeeper and then deserialize it using the supplied fromBytes function . |
23,135 | @ SneakyThrows ( IOException . class ) public static String compressToBase64 ( final String string ) { Preconditions . checkNotNull ( string , "string" ) ; final ByteArrayOutputStream byteArrayOutputStream = new ByteArrayOutputStream ( ) ; final OutputStream base64OutputStream = Base64 . getEncoder ( ) . wrap ( byteArrayOutputStream ) ; final GZIPOutputStream gzipOutputStream = new GZIPOutputStream ( base64OutputStream ) ; gzipOutputStream . write ( string . getBytes ( UTF_8 ) ) ; gzipOutputStream . close ( ) ; return byteArrayOutputStream . toString ( UTF_8 . name ( ) ) ; } | Convert the given string to its compressed base64 representation . |
23,136 | @ SneakyThrows ( IOException . class ) public static String decompressFromBase64 ( final String base64CompressedString ) { Exceptions . checkNotNullOrEmpty ( base64CompressedString , "base64CompressedString" ) ; try { final ByteArrayInputStream byteArrayInputStream = new ByteArrayInputStream ( base64CompressedString . getBytes ( UTF_8 ) ) ; final InputStream base64InputStream = Base64 . getDecoder ( ) . wrap ( byteArrayInputStream ) ; final GZIPInputStream gzipInputStream = new GZIPInputStream ( base64InputStream ) ; return IOUtils . toString ( gzipInputStream , UTF_8 ) ; } catch ( ZipException | EOFException e ) { throw new IllegalArgumentException ( "Invalid base64 input." , e ) ; } } | Get the original string from its compressed base64 representation . |
23,137 | public static StreamTruncationRecord complete ( StreamTruncationRecord toComplete ) { Preconditions . checkState ( toComplete . updating ) ; ImmutableSet . Builder < Long > builder = ImmutableSet . builder ( ) ; builder . addAll ( toComplete . deletedSegments ) ; builder . addAll ( toComplete . toDelete ) ; return StreamTruncationRecord . builder ( ) . updating ( false ) . span ( toComplete . span ) . streamCut ( toComplete . streamCut ) . deletedSegments ( builder . build ( ) ) . toDelete ( ImmutableSet . of ( ) ) . sizeTill ( toComplete . sizeTill ) . build ( ) ; } | Method to complete a given ongoing truncation record by setting updating flag to false and merging toDelete in deletedSegments . |
23,138 | void recordIterationStarted ( AbstractTimer timer ) { this . iterationId . incrementAndGet ( ) ; this . currentIterationStartTime . set ( timer . getElapsed ( ) ) ; this . lastIterationError . set ( false ) ; } | Records the fact that an iteration started . |
23,139 | void setLastTruncatedSequenceNumber ( long value ) { Preconditions . checkArgument ( value >= this . lastTruncatedSequenceNumber . get ( ) , "New LastTruncatedSequenceNumber cannot be smaller than the previous one." ) ; this . lastTruncatedSequenceNumber . set ( value ) ; } | Sets the Sequence Number of the last Truncated Operation . |
23,140 | void setLastReadSequenceNumber ( long value ) { Preconditions . checkArgument ( value >= this . lastReadSequenceNumber . get ( ) , "New LastReadSequenceNumber cannot be smaller than the previous one." ) ; this . lastReadSequenceNumber . set ( value ) ; } | Sets the Sequence Number of the last read Operation . |
23,141 | public ProcessStarter sysProp ( String name , Object value ) { this . systemProps . put ( name , value . toString ( ) ) ; return this ; } | Includes the given System Property as part of the start . |
23,142 | public ProcessStarter env ( String name , Object value ) { this . builder . environment ( ) . put ( name , value . toString ( ) ) ; return this ; } | Includes the given Environment Variable as part of the start . |
23,143 | public Process start ( ) throws IOException { ArrayList < String > cmd = new ArrayList < > ( ) ; cmd . add ( System . getProperty ( "java.home" ) + File . separator + "bin" + File . separator + "java" ) ; cmd . add ( "-cp" ) ; cmd . add ( System . getProperty ( "java.class.path" ) ) ; for ( val e : this . systemProps . entrySet ( ) ) { cmd . add ( String . format ( "-D%s=%s" , e . getKey ( ) , e . getValue ( ) ) ) ; } cmd . add ( this . target . getCanonicalName ( ) ) ; if ( this . args != null ) { for ( Object arg : this . args ) { cmd . add ( arg . toString ( ) ) ; } } this . builder . command ( cmd ) ; return this . builder . start ( ) ; } | Executes the Class using the accumulated options . |
23,144 | public CompletableFuture < CreateScopeStatus > createScope ( final String scope ) { Exceptions . checkNotNullOrEmpty ( scope , "scope" ) ; try { NameUtils . validateScopeName ( scope ) ; } catch ( IllegalArgumentException | NullPointerException e ) { log . warn ( "Create scope failed due to invalid scope name {}" , scope ) ; return CompletableFuture . completedFuture ( CreateScopeStatus . newBuilder ( ) . setStatus ( CreateScopeStatus . Status . INVALID_SCOPE_NAME ) . build ( ) ) ; } return streamStore . createScope ( scope ) ; } | Controller Service API to create scope . |
23,145 | public CompletableFuture < DeleteScopeStatus > deleteScope ( final String scope ) { Exceptions . checkNotNullOrEmpty ( scope , "scope" ) ; return streamStore . deleteScope ( scope ) ; } | Controller Service API to delete scope . |
23,146 | public CompletableFuture < String > getScope ( final String scopeName ) { Preconditions . checkNotNull ( scopeName ) ; return streamStore . getScopeConfiguration ( scopeName ) ; } | Retrieve a scope . |
23,147 | private void reportCreateStreamMetrics ( String scope , String streamName , int initialSegments , CreateStreamStatus . Status status , Duration latency ) { if ( status . equals ( CreateStreamStatus . Status . SUCCESS ) ) { streamMetrics . createStream ( scope , streamName , initialSegments , latency ) ; } else if ( status . equals ( CreateStreamStatus . Status . FAILURE ) ) { streamMetrics . createStreamFailed ( scope , streamName ) ; } } | Metrics reporting region |
23,148 | private CompletableFuture < Void > compactIfNeeded ( DirectSegmentAccess segment , long highestCopiedOffset , TimeoutTimer timer ) { SegmentProperties info = segment . getInfo ( ) ; CompletableFuture < Void > result ; if ( this . compactor . isCompactionRequired ( info ) ) { result = this . compactor . compact ( segment , timer ) ; } else { log . debug ( "{}: No compaction required at this time." , this . traceObjectId ) ; result = CompletableFuture . completedFuture ( null ) ; } return result . thenComposeAsync ( v -> { long truncateOffset = this . compactor . calculateTruncationOffset ( segment . getInfo ( ) , highestCopiedOffset ) ; if ( truncateOffset > 0 ) { log . debug ( "{}: Truncating segment at offset {}." , this . traceObjectId , truncateOffset ) ; return segment . truncate ( truncateOffset , timer . getRemaining ( ) ) ; } else { log . debug ( "{}: No segment truncation possible now." , this . traceObjectId ) ; return CompletableFuture . completedFuture ( null ) ; } } , this . executor ) . exceptionally ( ex -> { log . error ( "{}: Compaction failed." , this . traceObjectId , ex ) ; return null ; } ) ; } | Performs a Table Segment Compaction if needed . |
23,149 | private CompletableFuture < TableWriterFlushResult > flushOnce ( DirectSegmentAccess segment , TimeoutTimer timer ) { KeyUpdateCollection keyUpdates = readKeysFromSegment ( segment , this . aggregator . getFirstOffset ( ) , this . aggregator . getLastOffset ( ) , timer ) ; log . debug ( "{}: Flush.ReadFromSegment {} UpdateKeys(s)." , this . traceObjectId , keyUpdates . getUpdates ( ) . size ( ) ) ; return this . indexWriter . groupByBucket ( segment , keyUpdates . getUpdates ( ) , timer ) . thenComposeAsync ( builders -> fetchExistingKeys ( builders , segment , timer ) . thenComposeAsync ( v -> { val bucketUpdates = builders . stream ( ) . map ( BucketUpdate . Builder :: build ) . collect ( Collectors . toList ( ) ) ; logBucketUpdates ( bucketUpdates ) ; return this . indexWriter . updateBuckets ( segment , bucketUpdates , this . aggregator . getLastIndexedOffset ( ) , keyUpdates . getLastIndexedOffset ( ) , keyUpdates . getTotalUpdateCount ( ) , timer . getRemaining ( ) ) ; } , this . executor ) , this . executor ) . thenApply ( ignored -> new TableWriterFlushResult ( keyUpdates . getLastIndexedOffset ( ) , keyUpdates . getHighestCopiedOffset ( ) ) ) ; } | Performs a single flush attempt . |
23,150 | @ SneakyThrows ( IOException . class ) private KeyUpdateCollection readKeysFromSegment ( DirectSegmentAccess segment , long firstOffset , long lastOffset , TimeoutTimer timer ) { KeyUpdateCollection keyUpdates = new KeyUpdateCollection ( ) ; try ( InputStream input = readFromInMemorySegment ( segment , firstOffset , lastOffset , timer ) ) { long segmentOffset = firstOffset ; while ( segmentOffset < lastOffset ) { segmentOffset += indexSingleKey ( input , segmentOffset , keyUpdates ) ; } } return keyUpdates ; } | Reads all the Keys from the given Segment between the given offsets and indexes them by key . |
23,151 | private int indexSingleKey ( InputStream input , long entryOffset , KeyUpdateCollection keyUpdateCollection ) throws IOException { val e = AsyncTableEntryReader . readEntryComponents ( input , entryOffset , this . connector . getSerializer ( ) ) ; HashedArray key = new HashedArray ( e . getKey ( ) ) ; val update = new BucketUpdate . KeyUpdate ( key , entryOffset , e . getVersion ( ) , e . getHeader ( ) . isDeletion ( ) ) ; keyUpdateCollection . add ( update , e . getHeader ( ) . getTotalLength ( ) , e . getHeader ( ) . getEntryVersion ( ) ) ; return e . getHeader ( ) . getTotalLength ( ) ; } | Indexes a single Key for a Table Entry that begins with the first byte of the given InputStream . |
23,152 | private InputStream readFromInMemorySegment ( DirectSegmentAccess segment , long startOffset , long endOffset , TimeoutTimer timer ) { long readOffset = startOffset ; long remainingLength = endOffset - startOffset ; ArrayList < InputStream > inputs = new ArrayList < > ( ) ; while ( remainingLength > 0 ) { int readLength = ( int ) Math . min ( remainingLength , Integer . MAX_VALUE ) ; try ( ReadResult readResult = segment . read ( readOffset , readLength , timer . getRemaining ( ) ) ) { inputs . addAll ( readResult . readRemaining ( readLength , timer . getRemaining ( ) ) ) ; assert readResult . getConsumedLength ( ) == readLength : "Expecting a full read (from memory)." ; remainingLength -= readResult . getConsumedLength ( ) ; readOffset += readResult . getConsumedLength ( ) ; } } return new SequenceInputStream ( Iterators . asEnumeration ( inputs . iterator ( ) ) ) ; } | Reads from the Segment between the given offsets . This method assumes all the data is readily available in the cache otherwise it will block synchronously for Storage retrieval . |
23,153 | int getOutstandingCheckpoints ( ) { return ( int ) checkpoints . stream ( ) . filter ( checkpoint -> ! ( isCheckpointSilent ( checkpoint ) || isCheckpointComplete ( checkpoint ) ) ) . count ( ) ; } | Get the number of outstanding Checkpoints . It should not take silent Checkpoints into account . |
23,154 | private < T extends Metric , V extends MetricProxy < T > > V getOrSet ( ConcurrentHashMap < String , V > cache , String name , Function < String , T > createMetric , ProxyCreator < T , V > createProxy , String ... tags ) { MetricsNames . MetricKey keys = metricKey ( name , tags ) ; T newMetric = createMetric . apply ( keys . getRegistryKey ( ) ) ; V newProxy = createProxy . apply ( newMetric , keys . getCacheKey ( ) , cache :: remove ) ; V existingProxy = cache . putIfAbsent ( newProxy . getProxyName ( ) , newProxy ) ; if ( existingProxy != null ) { newProxy . close ( ) ; newMetric . close ( ) ; return existingProxy ; } else { return newProxy ; } } | Atomically gets an existing MetricProxy from the given cache or creates a new one and adds it . |
23,155 | public void stopBookie ( int bookieIndex ) { Preconditions . checkState ( this . servers . size ( ) > 0 , "No Bookies initialized. Call startAll()." ) ; Preconditions . checkState ( this . servers . get ( 0 ) != null , "Bookie already stopped." ) ; val bk = this . servers . get ( bookieIndex ) ; bk . shutdown ( ) ; this . servers . set ( bookieIndex , null ) ; log . info ( "Bookie {} stopped." , bookieIndex ) ; } | Stops the BookieService with the given index . |
23,156 | public void startBookie ( int bookieIndex ) throws Exception { Preconditions . checkState ( this . servers . size ( ) > 0 , "No Bookies initialized. Call startAll()." ) ; Preconditions . checkState ( this . servers . get ( 0 ) == null , "Bookie already running." ) ; this . servers . set ( bookieIndex , runBookie ( this . bookiePorts . get ( bookieIndex ) ) ) ; log . info ( "Bookie {} stopped." , bookieIndex ) ; } | Restarts the BookieService with the given index . |
23,157 | public void startAll ( ) throws Exception { this . cleanup . set ( new Thread ( this :: cleanupOnShutdown ) ) ; Runtime . getRuntime ( ) . addShutdownHook ( this . cleanup . get ( ) ) ; if ( this . startZk ) { resumeZooKeeper ( ) ; } initializeZookeeper ( ) ; runBookies ( ) ; } | Starts the BookKeeper cluster in - process . |
23,158 | public static void main ( String [ ] args ) throws Exception { val b = BookKeeperServiceRunner . builder ( ) ; b . startZk ( false ) ; try { int bkBasePort = Integer . parseInt ( System . getProperty ( PROPERTY_BASE_PORT ) ) ; int bkCount = Integer . parseInt ( System . getProperty ( PROPERTY_BOOKIE_COUNT ) ) ; val bkPorts = new ArrayList < Integer > ( ) ; for ( int i = 0 ; i < bkCount ; i ++ ) { bkPorts . add ( bkBasePort + i ) ; } b . bookiePorts ( bkPorts ) ; b . zkPort ( Integer . parseInt ( System . getProperty ( PROPERTY_ZK_PORT ) ) ) ; b . ledgersPath ( System . getProperty ( PROPERTY_LEDGERS_PATH ) ) ; b . startZk ( Boolean . parseBoolean ( System . getProperty ( PROPERTY_START_ZK , "false" ) ) ) ; b . tLSKeyStore ( System . getProperty ( TLS_KEY_STORE , "../../../config/bookie.keystore.jks" ) ) ; b . tLSKeyStorePasswordPath ( System . getProperty ( TLS_KEY_STORE_PASSWD , "../../../config/bookie.keystore.jks.passwd" ) ) ; b . secureBK ( Boolean . parseBoolean ( System . getProperty ( PROPERTY_SECURE_BK , "false" ) ) ) ; } catch ( Exception ex ) { System . out . println ( String . format ( "Invalid or missing arguments (via system properties). Expected: %s(int), " + "%s(int), %s(int), %s(String). (%s)." , PROPERTY_BASE_PORT , PROPERTY_BOOKIE_COUNT , PROPERTY_ZK_PORT , PROPERTY_LEDGERS_PATH , ex . getMessage ( ) ) ) ; System . exit ( - 1 ) ; return ; } BookKeeperServiceRunner runner = b . build ( ) ; runner . startAll ( ) ; Thread . sleep ( Long . MAX_VALUE ) ; } | Main method that can be used to start BookKeeper out - of - process using BookKeeperServiceRunner . This is used when invoking this class via ProcessStarter . |
23,159 | public int read ( ByteBuffer toFill ) { int originalPos = toFill . position ( ) ; while ( dataAvailable ( ) > 0 && toFill . hasRemaining ( ) ) { readHelper ( toFill ) ; } return toFill . position ( ) - originalPos ; } | Copies into the given ByteBuffer . |
23,160 | public int fill ( ByteBuffer fillFrom ) { int origionalPos = fillFrom . position ( ) ; while ( capacityAvailable ( ) > 0 && fillFrom . hasRemaining ( ) ) { fillHelper ( fillFrom ) ; } return fillFrom . position ( ) - origionalPos ; } | Copies from the given ByteBuffer . |
23,161 | public int dataAvailable ( ) { if ( readBuffer . position ( ) < fillBuffer . position ( ) ) { return readBuffer . remaining ( ) ; } else if ( readBuffer . position ( ) > fillBuffer . position ( ) ) { return capacity - fillBuffer . remaining ( ) ; } else { if ( readBuffer . hasRemaining ( ) ) { return readBuffer . remaining ( ) + fillBuffer . position ( ) ; } else { return 0 ; } } } | Gets the number of bytes that can be read . |
23,162 | @ SuppressWarnings ( "unchecked" ) private CompletableFuture < Object > execute ( final String oldHostId , final TaskData taskData , final TaggedResource taggedResource ) { log . debug ( "Host={} attempting to execute task {}-{} for child <{}, {}> of {}" , this . hostId , taskData . getMethodName ( ) , taskData . getMethodVersion ( ) , taggedResource . getResource ( ) , taggedResource . getTag ( ) , oldHostId ) ; try { String key = getKey ( taskData . getMethodName ( ) , taskData . getMethodVersion ( ) ) ; if ( methodMap . containsKey ( key ) ) { Method method = methodMap . get ( key ) ; if ( objectMap . get ( key ) . isReady ( ) ) { TaskBase o = objectMap . get ( key ) . copyWithContext ( new TaskBase . Context ( hostId , oldHostId , taggedResource . getTag ( ) , taggedResource . getResource ( ) ) ) ; return ( CompletableFuture < Object > ) method . invoke ( o , ( Object [ ] ) taskData . getParameters ( ) ) ; } else { log . info ( "Task module for method {} not yet ready, delaying processing it" , method . getName ( ) ) ; return Futures . delayedFuture ( Duration . ofMillis ( 100 ) , executor ) . thenApplyAsync ( ignore -> null , executor ) ; } } else { CompletableFuture < Object > error = new CompletableFuture < > ( ) ; log . warn ( "Task {} not found" , taskData . getMethodName ( ) ) ; error . completeExceptionally ( new RuntimeException ( String . format ( "Task %s not found" , taskData . getMethodName ( ) ) ) ) ; return error ; } } catch ( Exception e ) { CompletableFuture < Object > result = new CompletableFuture < > ( ) ; result . completeExceptionally ( e ) ; return result ; } } | This method identifies correct method to execute form among the task classes and executes it . |
23,163 | private void initializeMappingTable ( ) { for ( TaskBase taskClassObject : taskClassObjects ) { Class < ? extends TaskBase > claz = taskClassObject . getClass ( ) ; for ( Method method : claz . getDeclaredMethods ( ) ) { for ( Annotation annotation : method . getAnnotations ( ) ) { if ( annotation instanceof Task ) { String methodName = ( ( Task ) annotation ) . name ( ) ; String methodVersion = ( ( Task ) annotation ) . version ( ) ; String key = getKey ( methodName , methodVersion ) ; if ( ! methodMap . containsKey ( key ) ) { methodMap . put ( key , method ) ; objectMap . put ( key , taskClassObject ) ; } else { throw new DuplicateTaskAnnotationException ( methodName , methodVersion ) ; } } } } } } | Creates the table mapping method names and versions to Method objects and corresponding TaskBase objects |
23,164 | public static void createPathIfNotExists ( final CuratorFramework client , final String basePath , final byte [ ] initData ) { Preconditions . checkNotNull ( client , "client" ) ; Preconditions . checkNotNull ( basePath , "basePath" ) ; Preconditions . checkNotNull ( initData , "initData" ) ; try { if ( client . checkExists ( ) . forPath ( basePath ) == null ) { client . create ( ) . creatingParentsIfNeeded ( ) . forPath ( basePath , initData ) ; } } catch ( KeeperException . NodeExistsException e ) { log . debug ( "Path exists {}, ignoring exception" , basePath ) ; } catch ( Exception e ) { throw new RuntimeException ( "Exception while creating znode: " + basePath , e ) ; } } | Creates the znode if is doesn t already exist in zookeeper . |
23,165 | DelayResult getThrottlingDelay ( ) { int maxDelay = 0 ; boolean maximum = false ; for ( Throttler t : this . throttlers ) { int delay = t . getDelayMillis ( ) ; if ( delay >= MAX_DELAY_MILLIS ) { maxDelay = MAX_DELAY_MILLIS ; maximum = true ; break ; } maxDelay = Math . max ( maxDelay , delay ) ; } return new DelayResult ( maxDelay , maximum ) ; } | Calculates the amount of time needed to delay based on the configured throttlers . The computed result is not additive as there is no benefit to adding delays from various Throttle Calculators together . For example a cache throttling delay will have increased batching as a side effect so there s no need to include the batching one as well . |
23,166 | void initialize ( ) { Preconditions . checkState ( this . database . get ( ) == null , "%s has already been initialized." , this . logId ) ; try { clear ( true ) ; this . database . set ( openDatabase ( ) ) ; } catch ( Exception ex ) { try { close ( ) ; } catch ( Exception closeEx ) { ex . addSuppressed ( closeEx ) ; } throw ex ; } log . info ( "{}: Initialized." , this . logId ) ; } | Initializes this instance of the RocksDB cache . This method must be invoked before the cache can be used . |
23,167 | public void insert ( Key key , byte [ ] data ) { ensureInitializedAndNotClosed ( ) ; Timer timer = new Timer ( ) ; byte [ ] serializedKey = key . serialize ( ) ; try { this . database . get ( ) . put ( this . writeOptions , serializedKey , data ) ; } catch ( RocksDBException ex ) { throw convert ( ex , "insert key '%s'" , key ) ; } RocksDBMetrics . insert ( timer . getElapsedMillis ( ) , serializedKey . length + ( ( data != null ) ? data . length : 0 ) ) ; } | region Cache Implementation |
23,168 | private void handleLogSealed ( Segment segment ) { sealedSegmentQueue . add ( segment ) ; retransmitPool . execute ( ( ) -> { Retry . indefinitelyWithExpBackoff ( config . getInitalBackoffMillis ( ) , config . getBackoffMultiple ( ) , config . getMaxBackoffMillis ( ) , t -> log . error ( "Encountered excemption when handeling a sealed segment: " , t ) ) . run ( ( ) -> { synchronized ( writeSealLock ) { Segment toSeal = sealedSegmentQueue . poll ( ) ; log . info ( "Sealing segment {} " , toSeal ) ; while ( toSeal != null ) { resend ( selector . refreshSegmentEventWritersUponSealed ( toSeal , segmentSealedCallBack ) ) ; for ( SegmentOutputStream writer : selector . getWriters ( ) ) { try { writer . write ( PendingEvent . withoutHeader ( null , ByteBufferUtils . EMPTY , null ) ) ; writer . flush ( ) ; } catch ( SegmentSealedException e ) { log . info ( "Flush on segment {} failed due to {}, it will be retried." , writer . getSegmentName ( ) , e . getMessage ( ) ) ; } } toSeal = sealedSegmentQueue . poll ( ) ; log . info ( "Sealing another segment {} " , toSeal ) ; } } return null ; } ) ; } ) ; } | If a log sealed is encountered we need to 1 . Find the new segments to write to . 2 . For each outstanding message find which new segment it should go to and send it there . |
23,169 | static RevisionDataInputStream wrap ( InputStream inputStream ) throws IOException { int bound = BitConverter . readInt ( inputStream ) ; return new RevisionDataInputStream ( new BoundedInputStream ( inputStream , bound ) ) ; } | Creates a new instance of the RevisionDataInputStream class . Upon a successful call to this method 4 bytes will have been read from the InputStream representing the expected length of the serialization . |
23,170 | public void createStream ( String scope , String streamName , int minNumSegments , Duration latency ) { DYNAMIC_LOGGER . incCounterValue ( CREATE_STREAM , 1 ) ; DYNAMIC_LOGGER . reportGaugeValue ( OPEN_TRANSACTIONS , 0 , streamTags ( scope , streamName ) ) ; DYNAMIC_LOGGER . reportGaugeValue ( SEGMENTS_COUNT , minNumSegments , streamTags ( scope , streamName ) ) ; DYNAMIC_LOGGER . incCounterValue ( SEGMENTS_SPLITS , 0 , streamTags ( scope , streamName ) ) ; DYNAMIC_LOGGER . incCounterValue ( SEGMENTS_MERGES , 0 , streamTags ( scope , streamName ) ) ; createStreamLatency . reportSuccessValue ( latency . toMillis ( ) ) ; } | This method increments the global and Stream - specific counters of Stream creations initializes other stream - specific metrics and reports the latency of the operation . |
23,171 | public void createStreamFailed ( String scope , String streamName ) { DYNAMIC_LOGGER . incCounterValue ( globalMetricName ( CREATE_STREAM_FAILED ) , 1 ) ; DYNAMIC_LOGGER . incCounterValue ( CREATE_STREAM_FAILED , 1 , streamTags ( scope , streamName ) ) ; } | This method increments the global counter of failed Stream creations in the system as well as the failed creation attempts for this specific Stream . |
23,172 | public void deleteStream ( String scope , String streamName , Duration latency ) { DYNAMIC_LOGGER . incCounterValue ( DELETE_STREAM , 1 ) ; deleteStreamLatency . reportSuccessValue ( latency . toMillis ( ) ) ; } | This method increments the global and Stream - specific counters of Stream deletions and reports the latency of the operation . |
23,173 | public void deleteStreamFailed ( String scope , String streamName ) { DYNAMIC_LOGGER . incCounterValue ( globalMetricName ( DELETE_STREAM_FAILED ) , 1 ) ; DYNAMIC_LOGGER . incCounterValue ( DELETE_STREAM_FAILED , 1 , streamTags ( scope , streamName ) ) ; } | This method increments the counter of failed Stream deletions in the system as well as the failed deletion attempts for this specific Stream . |
23,174 | public void sealStream ( String scope , String streamName , Duration latency ) { DYNAMIC_LOGGER . incCounterValue ( SEAL_STREAM , 1 ) ; DYNAMIC_LOGGER . reportGaugeValue ( OPEN_TRANSACTIONS , 0 , streamTags ( scope , streamName ) ) ; sealStreamLatency . reportSuccessValue ( latency . toMillis ( ) ) ; } | This method increments the global and Stream - specific counters of seal Stream operations set the number of open Transactions to 0 and reports the latency of the operation . |
23,175 | public void sealStreamFailed ( String scope , String streamName ) { DYNAMIC_LOGGER . incCounterValue ( globalMetricName ( SEAL_STREAM_FAILED ) , 1 ) ; DYNAMIC_LOGGER . incCounterValue ( SEAL_STREAM_FAILED , 1 , streamTags ( scope , streamName ) ) ; } | This method increments the counter of failed Stream seal operations in the system as well as the failed seal attempts for this specific Stream . |
23,176 | public void updateStream ( String scope , String streamName , Duration latency ) { DYNAMIC_LOGGER . incCounterValue ( globalMetricName ( UPDATE_STREAM ) , 1 ) ; DYNAMIC_LOGGER . incCounterValue ( UPDATE_STREAM , 1 , streamTags ( scope , streamName ) ) ; updateStreamLatency . reportSuccessValue ( latency . toMillis ( ) ) ; } | This method increments the global and Stream - specific counters of Stream updates and reports the latency of the operation . |
23,177 | public void updateStreamFailed ( String scope , String streamName ) { DYNAMIC_LOGGER . incCounterValue ( globalMetricName ( UPDATE_STREAM_FAILED ) , 1 ) ; DYNAMIC_LOGGER . incCounterValue ( UPDATE_STREAM_FAILED , 1 , streamTags ( scope , streamName ) ) ; } | This method increments the counter of failed Stream update operations in the system as well as the failed update attempts for this specific Stream . |
23,178 | public void truncateStream ( String scope , String streamName , Duration latency ) { DYNAMIC_LOGGER . incCounterValue ( globalMetricName ( TRUNCATE_STREAM ) , 1 ) ; DYNAMIC_LOGGER . incCounterValue ( TRUNCATE_STREAM , 1 , streamTags ( scope , streamName ) ) ; truncateStreamLatency . reportSuccessValue ( latency . toMillis ( ) ) ; } | This method increments the global and Stream - specific counters of Stream truncations and reports the latency of the operation . |
23,179 | public void truncateStreamFailed ( String scope , String streamName ) { DYNAMIC_LOGGER . incCounterValue ( globalMetricName ( TRUNCATE_STREAM_FAILED ) , 1 ) ; DYNAMIC_LOGGER . incCounterValue ( TRUNCATE_STREAM_FAILED , 1 , streamTags ( scope , streamName ) ) ; } | This method increments the counter of failed Stream truncate operations in the system as well as the failed truncate attempts for this specific Stream . |
23,180 | public static void reportRetentionEvent ( String scope , String streamName ) { DYNAMIC_LOGGER . recordMeterEvents ( RETENTION_FREQUENCY , 1 , streamTags ( scope , streamName ) ) ; } | This method increments the Stream - specific counter of retention operations . |
23,181 | public static void reportActiveSegments ( String scope , String streamName , int numSegments ) { DYNAMIC_LOGGER . reportGaugeValue ( SEGMENTS_COUNT , numSegments , streamTags ( scope , streamName ) ) ; } | Reports the number of active segments for a Stream . |
23,182 | public static void reportSegmentSplitsAndMerges ( String scope , String streamName , long splits , long merges ) { DYNAMIC_LOGGER . updateCounterValue ( globalMetricName ( SEGMENTS_SPLITS ) , splits ) ; DYNAMIC_LOGGER . updateCounterValue ( SEGMENTS_SPLITS , splits , streamTags ( scope , streamName ) ) ; DYNAMIC_LOGGER . updateCounterValue ( globalMetricName ( SEGMENTS_MERGES ) , merges ) ; DYNAMIC_LOGGER . updateCounterValue ( SEGMENTS_MERGES , merges , streamTags ( scope , streamName ) ) ; } | Reports the number of segment splits and merges related to a particular scale operation on a Stream . Both global and Stream - specific counters are updated . |
23,183 | private HDFSSegmentHandle asWritableHandle ( SegmentHandle handle ) { Preconditions . checkArgument ( ! handle . isReadOnly ( ) , "handle must not be read-only." ) ; return asReadableHandle ( handle ) ; } | Casts the given handle as a HDFSSegmentHandle that has isReadOnly == false . |
23,184 | private HDFSSegmentHandle asReadableHandle ( SegmentHandle handle ) { Preconditions . checkArgument ( handle instanceof HDFSSegmentHandle , "handle must be of type HDFSSegmentHandle." ) ; return ( HDFSSegmentHandle ) handle ; } | Casts the given handle as a HDFSSegmentHandle irrespective of its isReadOnly value . |
23,185 | private Path getFilePath ( String segmentName , long epoch ) { Preconditions . checkState ( segmentName != null && segmentName . length ( ) > 0 , "segmentName must be non-null and non-empty" ) ; Preconditions . checkState ( epoch >= 0 , "epoch must be non-negative " + epoch ) ; return new Path ( String . format ( NAME_FORMAT , getPathPrefix ( segmentName ) , epoch ) ) ; } | Gets the full HDFS Path to a file for the given Segment startOffset and epoch . |
23,186 | private Path getSealedFilePath ( String segmentName ) { Preconditions . checkState ( segmentName != null && segmentName . length ( ) > 0 , "segmentName must be non-null and non-empty" ) ; return new Path ( String . format ( NAME_FORMAT , getPathPrefix ( segmentName ) , SEALED ) ) ; } | Gets the full HDFS path when sealed . |
23,187 | private FileStatus findStatusForSegment ( String segmentName , boolean enforceExistence ) throws IOException { FileStatus [ ] rawFiles = findAllRaw ( segmentName ) ; if ( rawFiles == null || rawFiles . length == 0 ) { if ( enforceExistence ) { throw HDFSExceptionHelpers . segmentNotExistsException ( segmentName ) ; } return null ; } val result = Arrays . stream ( rawFiles ) . sorted ( this :: compareFileStatus ) . collect ( Collectors . toList ( ) ) ; return result . get ( result . size ( ) - 1 ) ; } | Gets the filestatus representing the segment . |
23,188 | private boolean makeReadOnly ( FileStatus file ) throws IOException { if ( isReadOnly ( file ) ) { return false ; } this . fileSystem . setPermission ( file . getPath ( ) , READONLY_PERMISSION ) ; log . debug ( "MakeReadOnly '{}'." , file . getPath ( ) ) ; return true ; } | Makes the file represented by the given FileStatus read - only . |
23,189 | public static boolean deleteFileOrDirectory ( File file ) { if ( file . exists ( ) && file . isDirectory ( ) ) { File [ ] files = file . listFiles ( ) ; if ( files != null ) { for ( File f : files ) { deleteFileOrDirectory ( f ) ; } } } return file . delete ( ) ; } | Deletes the given file or directory . If a directory recursively deletes all sub - directories and files . |
23,190 | public void initialize ( ) { Exceptions . checkNotClosed ( this . closed . get ( ) , this ) ; long traceId = LoggerHelpers . traceEnter ( log , "initialize" ) ; long containerCount = this . segmentToContainerMapper . getTotalContainerCount ( ) ; ArrayList < CompletableFuture < Void > > futures = new ArrayList < > ( ) ; for ( int containerId = 0 ; containerId < containerCount ; containerId ++ ) { futures . add ( this . registry . startContainer ( containerId , INIT_TIMEOUT_PER_CONTAINER ) . thenAccept ( this :: registerHandle ) ) ; } Futures . allOf ( futures ) . join ( ) ; LoggerHelpers . traceLeave ( log , "initialize" , traceId ) ; } | region SegmentContainerManager Implementation |
23,191 | public static String failMetricName ( String metricName ) { if ( Strings . isNullOrEmpty ( metricName ) ) { return metricName ; } String [ ] tags = metricName . split ( "\\." ) ; if ( tags . length >= 2 && Ints . tryParse ( tags [ tags . length - 1 ] ) != null ) { tags [ tags . length - 2 ] += "_fail" ; return String . join ( "." , tags ) ; } else { return metricName + "_fail" ; } } | For some metrics such as OpStats Pravega generates corresponding fail metrics automatically this method is called to create the name of fail metric for a given metric . |
23,192 | public static MetricKey metricKey ( String metric , String ... tags ) { if ( tags == null || tags . length == 0 ) { return new MetricKey ( metric , metric ) ; } else { StringBuilder sb = new StringBuilder ( metric ) ; Preconditions . checkArgument ( ( tags . length % 2 ) == 0 , "Tags is a set of key/value pair so the size must be even: %s" , tags . length ) ; for ( int i = 0 ; i < tags . length ; i += 2 ) { Preconditions . checkArgument ( ! Strings . isNullOrEmpty ( tags [ i ] ) || ! Strings . isNullOrEmpty ( tags [ i + 1 ] ) , "Tag name or value cannot be empty or null" ) ; sb . append ( '.' ) . append ( tags [ i + 1 ] ) ; } return new MetricKey ( sb . toString ( ) , metric ) ; } } | Create an MetricKey object based on metric name metric type and tags associated . The MetricKey object contains cache key for cache lookup and registry key for registry lookup . |
23,193 | public static String getTransactionNameFromId ( String parentStreamSegmentName , UUID transactionId ) { StringBuilder result = new StringBuilder ( ) ; result . append ( parentStreamSegmentName ) ; result . append ( TRANSACTION_DELIMITER ) ; result . append ( String . format ( FULL_HEX_FORMAT , transactionId . getMostSignificantBits ( ) ) ) ; result . append ( String . format ( FULL_HEX_FORMAT , transactionId . getLeastSignificantBits ( ) ) ) ; return result . toString ( ) ; } | Returns the transaction name for a TransactionStreamSegment based on the name of the current Parent StreamSegment and the transactionId . |
23,194 | public static String getParentStreamSegmentName ( String transactionName ) { int endOfStreamNamePos = transactionName . lastIndexOf ( TRANSACTION_DELIMITER ) ; if ( endOfStreamNamePos < 0 || endOfStreamNamePos + TRANSACTION_DELIMITER . length ( ) + TRANSACTION_ID_LENGTH > transactionName . length ( ) ) { return null ; } return transactionName . substring ( 0 , endOfStreamNamePos ) ; } | Attempts to extract the name of the Parent StreamSegment for the given Transaction StreamSegment . This method returns a valid value only if the Transaction StreamSegmentName was generated using the generateTransactionStreamSegmentName method . |
23,195 | public static boolean isTransactionSegment ( String streamSegmentName ) { int endOfStreamNamePos = streamSegmentName . lastIndexOf ( TRANSACTION_DELIMITER ) ; if ( endOfStreamNamePos < 0 || endOfStreamNamePos + TRANSACTION_DELIMITER . length ( ) + TRANSACTION_ID_LENGTH > streamSegmentName . length ( ) ) { return false ; } return true ; } | Checks if the given stream segment name is formatted for a Transaction Segment or regular segment . |
23,196 | public static String extractPrimaryStreamSegmentName ( String streamSegmentName ) { if ( isTransactionSegment ( streamSegmentName ) ) { return extractPrimaryStreamSegmentName ( getParentStreamSegmentName ( streamSegmentName ) ) ; } int endOfStreamNamePos = streamSegmentName . lastIndexOf ( EPOCH_DELIMITER ) ; if ( endOfStreamNamePos < 0 ) { return streamSegmentName ; } return streamSegmentName . substring ( 0 , endOfStreamNamePos ) ; } | Attempts to extract the primary part of stream segment name before the epoch delimiter . This method returns a valid value only if the StreamSegmentName was generated using the getQualifiedStreamSegmentName method . |
23,197 | public static String getAttributeSegmentName ( String segmentName ) { Preconditions . checkArgument ( ! segmentName . endsWith ( ATTRIBUTE_SUFFIX ) , "segmentName is already an attribute segment name" ) ; return segmentName + ATTRIBUTE_SUFFIX ; } | Gets the name of the meta - Segment mapped to the given Segment Name that is responsible with storing extended attributes . |
23,198 | public static String getHeaderSegmentName ( String segmentName ) { Preconditions . checkArgument ( ! segmentName . endsWith ( HEADER_SUFFIX ) , "segmentName is already a segment header name" ) ; return segmentName + HEADER_SUFFIX ; } | Gets the name of the meta - Segment mapped to the given Segment Name that is responsible with storing its Rollover information . Existence of this file should also indicate that a Segment with this file has a rollover policy in place . |
23,199 | public static String getSegmentNameFromHeader ( String headerSegmentName ) { Preconditions . checkArgument ( headerSegmentName . endsWith ( HEADER_SUFFIX ) ) ; return headerSegmentName . substring ( 0 , headerSegmentName . length ( ) - HEADER_SUFFIX . length ( ) ) ; } | Gets the name of the Segment name from its Header Segment Name . |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.