idx
int64
0
41.2k
question
stringlengths
73
5.81k
target
stringlengths
5
918
23,300
@ Task ( name = "createStream" , version = "1.0" , resource = "{scope}/{stream}" ) public CompletableFuture < CreateStreamStatus . Status > createStream ( String scope , String stream , StreamConfiguration config , long createTimestamp ) { return execute ( new Resource ( scope , stream ) , new Serializable [ ] { scope , stream , config , createTimestamp } , ( ) -> createStreamBody ( scope , stream , config , createTimestamp ) ) ; }
Create stream .
23,301
public CompletableFuture < UpdateStreamStatus . Status > updateStream ( String scope , String stream , StreamConfiguration newConfig , OperationContext contextOpt ) { final OperationContext context = contextOpt == null ? streamMetadataStore . createContext ( scope , stream ) : contextOpt ; final long requestId = requestTracker . getRequestIdFor ( "updateStream" , scope , stream ) ; return streamMetadataStore . getConfigurationRecord ( scope , stream , context , executor ) . thenCompose ( configProperty -> { if ( ! configProperty . getObject ( ) . isUpdating ( ) ) { return addIndexAndSubmitTask ( new UpdateStreamEvent ( scope , stream , requestId ) , ( ) -> streamMetadataStore . startUpdateConfiguration ( scope , stream , newConfig , context , executor ) ) . thenCompose ( x -> checkDone ( ( ) -> isUpdated ( scope , stream , newConfig , context ) ) . thenApply ( y -> UpdateStreamStatus . Status . SUCCESS ) ) ; } else { log . warn ( requestId , "Another update in progress for {}/{}" , scope , stream ) ; return CompletableFuture . completedFuture ( UpdateStreamStatus . Status . FAILURE ) ; } } ) . exceptionally ( ex -> { log . warn ( requestId , "Exception thrown in trying to update stream configuration {}" , ex . getMessage ( ) ) ; return handleUpdateStreamError ( ex , requestId ) ; } ) ; }
Update stream s configuration .
23,302
public CompletableFuture < StreamCutRecord > generateStreamCut ( final String scope , final String stream , final StreamCutRecord previous , final OperationContext contextOpt , String delegationToken ) { final OperationContext context = contextOpt == null ? streamMetadataStore . createContext ( scope , stream ) : contextOpt ; return streamMetadataStore . getActiveSegments ( scope , stream , context , executor ) . thenCompose ( activeSegments -> Futures . allOfWithResults ( activeSegments . stream ( ) . parallel ( ) . collect ( Collectors . toMap ( x -> x , x -> getSegmentOffset ( scope , stream , x . segmentId ( ) , delegationToken ) ) ) ) ) . thenCompose ( map -> { final long generationTime = System . currentTimeMillis ( ) ; ImmutableMap . Builder < Long , Long > builder = ImmutableMap . builder ( ) ; map . forEach ( ( key , value ) -> builder . put ( key . segmentId ( ) , value ) ) ; ImmutableMap < Long , Long > streamCutMap = builder . build ( ) ; return streamMetadataStore . getSizeTillStreamCut ( scope , stream , streamCutMap , Optional . ofNullable ( previous ) , context , executor ) . thenApply ( sizeTill -> new StreamCutRecord ( generationTime , sizeTill , streamCutMap ) ) ; } ) ; }
Generate a new stream cut .
23,303
public CompletableFuture < UpdateStreamStatus . Status > truncateStream ( final String scope , final String stream , final Map < Long , Long > streamCut , final OperationContext contextOpt ) { final OperationContext context = contextOpt == null ? streamMetadataStore . createContext ( scope , stream ) : contextOpt ; final long requestId = requestTracker . getRequestIdFor ( "truncateStream" , scope , stream ) ; return startTruncation ( scope , stream , streamCut , context , requestId ) . thenCompose ( truncationStarted -> { if ( truncationStarted ) { return checkDone ( ( ) -> isTruncated ( scope , stream , streamCut , context ) , 1000L ) . thenApply ( y -> UpdateStreamStatus . Status . SUCCESS ) ; } else { log . warn ( requestId , "Unable to start truncation for {}/{}" , scope , stream ) ; return CompletableFuture . completedFuture ( UpdateStreamStatus . Status . FAILURE ) ; } } ) . exceptionally ( ex -> { log . warn ( requestId , "Exception thrown in trying to update stream configuration {}" , ex ) ; return handleUpdateStreamError ( ex , requestId ) ; } ) ; }
Truncate a stream .
23,304
public CompletableFuture < UpdateStreamStatus . Status > sealStream ( String scope , String stream , OperationContext contextOpt ) { final OperationContext context = contextOpt == null ? streamMetadataStore . createContext ( scope , stream ) : contextOpt ; final long requestId = requestTracker . getRequestIdFor ( "sealStream" , scope , stream ) ; SealStreamEvent event = new SealStreamEvent ( scope , stream , requestId ) ; return addIndexAndSubmitTask ( event , ( ) -> streamMetadataStore . getVersionedState ( scope , stream , context , executor ) . thenCompose ( state -> { if ( state . getObject ( ) . equals ( State . SEALED ) ) { return CompletableFuture . completedFuture ( state ) ; } else { return streamMetadataStore . updateVersionedState ( scope , stream , State . SEALING , state , context , executor ) ; } } ) ) . thenCompose ( result -> { if ( result . getObject ( ) . equals ( State . SEALED ) || result . getObject ( ) . equals ( State . SEALING ) ) { return checkDone ( ( ) -> isSealed ( scope , stream , context ) ) . thenApply ( x -> UpdateStreamStatus . Status . SUCCESS ) ; } else { return CompletableFuture . completedFuture ( UpdateStreamStatus . Status . FAILURE ) ; } } ) . exceptionally ( ex -> { log . warn ( requestId , "Exception thrown in trying to notify sealed segments {}" , ex . getMessage ( ) ) ; return handleUpdateStreamError ( ex , requestId ) ; } ) ; }
Seal a stream .
23,305
public CompletableFuture < DeleteStreamStatus . Status > deleteStream ( final String scope , final String stream , final OperationContext contextOpt ) { final OperationContext context = contextOpt == null ? streamMetadataStore . createContext ( scope , stream ) : contextOpt ; final long requestId = requestTracker . getRequestIdFor ( "deleteStream" , scope , stream ) ; return streamMetadataStore . getState ( scope , stream , false , context , executor ) . thenCompose ( state -> { if ( ! state . equals ( State . SEALED ) ) { return CompletableFuture . completedFuture ( false ) ; } else { return streamMetadataStore . getCreationTime ( scope , stream , context , executor ) . thenApply ( time -> new DeleteStreamEvent ( scope , stream , requestId , time ) ) . thenCompose ( event -> writeEvent ( event ) ) . thenApply ( x -> true ) ; } } ) . thenCompose ( result -> { if ( result ) { return checkDone ( ( ) -> isDeleted ( scope , stream ) ) . thenApply ( x -> DeleteStreamStatus . Status . SUCCESS ) ; } else { return CompletableFuture . completedFuture ( DeleteStreamStatus . Status . STREAM_NOT_SEALED ) ; } } ) . exceptionally ( ex -> { log . warn ( requestId , "Exception thrown while deleting stream {}" , ex . getMessage ( ) ) ; return handleDeleteStreamError ( ex , requestId ) ; } ) ; }
Delete a stream . Precondition for deleting a stream is that the stream sholud be sealed .
23,306
public CompletableFuture < ScaleResponse > manualScale ( String scope , String stream , List < Long > segmentsToSeal , List < Map . Entry < Double , Double > > newRanges , long scaleTimestamp , OperationContext context ) { final long requestId = requestTracker . getRequestIdFor ( "scaleStream" , scope , stream , String . valueOf ( scaleTimestamp ) ) ; ScaleOpEvent event = new ScaleOpEvent ( scope , stream , segmentsToSeal , newRanges , true , scaleTimestamp , requestId ) ; return addIndexAndSubmitTask ( event , ( ) -> streamMetadataStore . submitScale ( scope , stream , segmentsToSeal , new ArrayList < > ( newRanges ) , scaleTimestamp , null , context , executor ) ) . handle ( ( startScaleResponse , e ) -> { ScaleResponse . Builder response = ScaleResponse . newBuilder ( ) ; if ( e != null ) { Throwable cause = Exceptions . unwrap ( e ) ; if ( cause instanceof EpochTransitionOperationExceptions . PreConditionFailureException ) { response . setStatus ( ScaleResponse . ScaleStreamStatus . PRECONDITION_FAILED ) ; } else { log . warn ( requestId , "Scale for stream {}/{} failed with exception {}" , scope , stream , cause ) ; response . setStatus ( ScaleResponse . ScaleStreamStatus . FAILURE ) ; } } else { log . info ( requestId , "scale for stream {}/{} started successfully" , scope , stream ) ; response . setStatus ( ScaleResponse . ScaleStreamStatus . STARTED ) ; response . addAllSegments ( startScaleResponse . getObject ( ) . getNewSegmentsWithRange ( ) . entrySet ( ) . stream ( ) . map ( segment -> convert ( scope , stream , segment ) ) . collect ( Collectors . toList ( ) ) ) ; response . setEpoch ( startScaleResponse . getObject ( ) . getActiveEpoch ( ) ) ; } return response . build ( ) ; } ) ; }
Helper method to perform scale operation against an scale request . This method posts a request in the request stream and then starts the scale operation while tracking it s progress . Eventually after scale completion it sends a response to the caller .
23,307
public CompletableFuture < ScaleStatusResponse > checkScale ( String scope , String stream , int epoch , OperationContext context ) { CompletableFuture < EpochRecord > activeEpochFuture = streamMetadataStore . getActiveEpoch ( scope , stream , context , true , executor ) ; CompletableFuture < State > stateFuture = streamMetadataStore . getState ( scope , stream , true , context , executor ) ; CompletableFuture < EpochTransitionRecord > etrFuture = streamMetadataStore . getEpochTransition ( scope , stream , context , executor ) . thenApply ( VersionedMetadata :: getObject ) ; return CompletableFuture . allOf ( stateFuture , activeEpochFuture ) . handle ( ( r , ex ) -> { ScaleStatusResponse . Builder response = ScaleStatusResponse . newBuilder ( ) ; if ( ex != null ) { Throwable e = Exceptions . unwrap ( ex ) ; if ( e instanceof StoreException . DataNotFoundException ) { response . setStatus ( ScaleStatusResponse . ScaleStatus . INVALID_INPUT ) ; } else { response . setStatus ( ScaleStatusResponse . ScaleStatus . INTERNAL_ERROR ) ; } } else { EpochRecord activeEpoch = activeEpochFuture . join ( ) ; State state = stateFuture . join ( ) ; EpochTransitionRecord etr = etrFuture . join ( ) ; if ( epoch > activeEpoch . getEpoch ( ) ) { response . setStatus ( ScaleStatusResponse . ScaleStatus . INVALID_INPUT ) ; } else if ( activeEpoch . getEpoch ( ) == epoch || activeEpoch . getReferenceEpoch ( ) == epoch ) { response . setStatus ( ScaleStatusResponse . ScaleStatus . IN_PROGRESS ) ; } else { if ( epoch + 1 == activeEpoch . getReferenceEpoch ( ) && state . equals ( State . SCALING ) && ( etr . equals ( EpochTransitionRecord . EMPTY ) || etr . getNewEpoch ( ) == activeEpoch . getEpoch ( ) ) ) { response . setStatus ( ScaleStatusResponse . ScaleStatus . IN_PROGRESS ) ; } else { response . setStatus ( ScaleStatusResponse . ScaleStatus . SUCCESS ) ; } } } return response . build ( ) ; } ) ; }
Helper method to check if scale operation against an epoch completed or not .
23,308
public StreamCutReferenceRecord findStreamCutReferenceForTime ( long time ) { int beforeIndex = getGreatestLowerBound ( this , time , StreamCutReferenceRecord :: getRecordingTime ) ; if ( beforeIndex < 0 ) { return null ; } return retentionRecords . get ( beforeIndex ) ; }
Find retention record on or before the given time .
23,309
public StreamCutReferenceRecord findStreamCutReferenceForSize ( long size ) { int beforeIndex = getGreatestLowerBound ( this , size , StreamCutReferenceRecord :: getRecordingSize ) ; if ( beforeIndex < 0 ) { return null ; } return retentionRecords . get ( beforeIndex ) ; }
Find retention record on or before the given size .
23,310
public static RetentionSet removeStreamCutBefore ( RetentionSet set , StreamCutReferenceRecord record ) { Preconditions . checkNotNull ( record ) ; int beforeIndex = getGreatestLowerBound ( set , record . getRecordingTime ( ) , StreamCutReferenceRecord :: getRecordingTime ) ; if ( beforeIndex < 0 ) { return set ; } if ( beforeIndex + 1 == set . retentionRecords . size ( ) ) { return new RetentionSet ( ImmutableList . of ( ) ) ; } return new RetentionSet ( set . retentionRecords . subList ( beforeIndex + 1 , set . retentionRecords . size ( ) ) ) ; }
Creates a new retention set object by removing all records on or before given record .
23,311
public CompletableFuture < Void > initialize ( Duration timeout ) { Preconditions . checkState ( ! this . initialized . get ( ) , "TableMetadataStore is already initialized." ) ; val attributes = TableAttributes . DEFAULT_VALUES . entrySet ( ) . stream ( ) . map ( e -> new AttributeUpdate ( e . getKey ( ) , AttributeUpdateType . None , e . getValue ( ) ) ) . collect ( Collectors . toList ( ) ) ; return submitAssignment ( SegmentInfo . newSegment ( this . metadataSegmentName , attributes ) , true , timeout ) . thenAccept ( segmentId -> { this . initialized . set ( true ) ; log . info ( "{}: Metadata Segment pinned. Name = '{}', Id = '{}'" , this . metadataSegmentName , segmentId ) ; } ) ; }
region MetadataStore Implementation
23,312
CompletableFuture < Integer > updateBuckets ( DirectSegmentAccess segment , Collection < BucketUpdate > bucketUpdates , long firstIndexedOffset , long lastIndexedOffset , int processedCount , Duration timeout ) { UpdateInstructions update = new UpdateInstructions ( ) ; for ( BucketUpdate bucketUpdate : bucketUpdates ) { generateAttributeUpdates ( bucketUpdate , update ) ; } if ( lastIndexedOffset > firstIndexedOffset ) { generateTableAttributeUpdates ( firstIndexedOffset , lastIndexedOffset , processedCount , update ) ; } if ( update . getAttributes ( ) . isEmpty ( ) ) { log . debug ( "IndexWriter[{}]: FirstIdxOffset={}, LastIdxOffset={}, No Changes." , segment . getSegmentId ( ) , firstIndexedOffset , lastIndexedOffset ) ; return CompletableFuture . completedFuture ( 0 ) ; } else { log . debug ( "IndexWriter[{}]: FirstIdxOffset={}, LastIdxOffset={}, AttrUpdates={}, Processed={}, Entries+={}, Buckets+={}." , segment . getSegmentId ( ) , firstIndexedOffset , lastIndexedOffset , update . getAttributes ( ) . size ( ) , processedCount , update . getEntryCountDelta ( ) , update . getBucketCountDelta ( ) ) ; return segment . updateAttributes ( update . getAttributes ( ) , timeout ) . thenApply ( v -> update . getAttributes ( ) . size ( ) ) ; } }
Determines what Segment Attribute Updates are necessary to apply the given bucket updates and executes them onto the given Segment .
23,313
private AttributeUpdate generateBackpointerUpdate ( long fromOffset , long toOffset ) { return new AttributeUpdate ( getBackpointerAttributeKey ( fromOffset ) , AttributeUpdateType . Replace , toOffset ) ; }
Generates an AttributeUpdate that creates a new or updates an existing Backpointer .
23,314
private AttributeUpdate generateBackpointerRemoval ( long fromOffset ) { return new AttributeUpdate ( getBackpointerAttributeKey ( fromOffset ) , AttributeUpdateType . Replace , Attributes . NULL_ATTRIBUTE_VALUE ) ; }
Generates an AttributeUpdate that removes a Backpointer whether it exists or not .
23,315
@ SneakyThrows ( Exception . class ) private void tryInit ( ) { if ( ! zkInit ) { ZKUtils . createPathIfNotExists ( zkClient , zkPath , HostContainerMap . EMPTY . toBytes ( ) ) ; hostContainerMapNode . getListenable ( ) . addListener ( this :: updateMap ) ; hostContainerMapNode . start ( true ) ; updateMap ( ) ; zkInit = true ; } }
Ensure required zk node is present in zookeeper .
23,316
protected void complete ( ReadResultEntryContents readResultEntryContents ) { Preconditions . checkState ( ! this . contents . isDone ( ) , "ReadResultEntry has already had its result set." ) ; CompletionConsumer callback = this . completionCallback ; if ( callback != null ) { callback . accept ( readResultEntryContents . getLength ( ) ) ; } this . contents . complete ( readResultEntryContents ) ; }
Completes the Future of this ReadResultEntry by setting the given content .
23,317
protected void fail ( Throwable exception ) { Preconditions . checkState ( ! this . contents . isDone ( ) , "ReadResultEntry has already had its result set." ) ; this . contents . completeExceptionally ( exception ) ; }
Fails the Future of this ReadResultEntry with the given exception .
23,318
public DurableDataLog . ReadItem getNext ( ) throws DurableDataLogException { Exceptions . checkNotClosed ( this . closed . get ( ) , this ) ; if ( this . currentLedger == null ) { openNextLedger ( this . metadata . getNextAddress ( this . metadata . getTruncationAddress ( ) , Long . MAX_VALUE ) ) ; } while ( this . currentLedger != null && ( ! this . currentLedger . canRead ( ) ) ) { val lastAddress = new LedgerAddress ( this . currentLedger . metadata , this . currentLedger . handle . getLastAddConfirmed ( ) ) ; Ledgers . close ( this . currentLedger . handle ) ; openNextLedger ( this . metadata . getNextAddress ( lastAddress , this . currentLedger . handle . getLastAddConfirmed ( ) ) ) ; } if ( this . currentLedger == null || this . currentLedger . reader == null ) { return null ; } return new LogReader . ReadItem ( this . currentLedger . reader . nextElement ( ) , this . currentLedger . metadata ) ; }
region CloseableIterator Implementation
23,319
public static int writeShort ( ArrayView target , int offset , short value ) { return writeShort ( target . array ( ) , target . arrayOffset ( ) + offset , value ) ; }
Writes the given 16 - bit Short to the given ArrayView at the given offset .
23,320
public static int writeShort ( byte [ ] target , int offset , short value ) { target [ offset ] = ( byte ) ( value >>> 8 & 255 ) ; target [ offset + 1 ] = ( byte ) ( value & 255 ) ; return Short . BYTES ; }
Writes the given 16 - bit Short to the given byte array at the given offset .
23,321
public static int writeInt ( ArrayView target , int offset , int value ) { return writeInt ( target . array ( ) , target . arrayOffset ( ) + offset , value ) ; }
Writes the given 32 - bit Integer to the given ArrayView at the given offset .
23,322
public static int writeInt ( byte [ ] target , int offset , int value ) { target [ offset ] = ( byte ) ( value >>> 24 ) ; target [ offset + 1 ] = ( byte ) ( value >>> 16 ) ; target [ offset + 2 ] = ( byte ) ( value >>> 8 ) ; target [ offset + 3 ] = ( byte ) value ; return Integer . BYTES ; }
Writes the given 32 - bit Integer to the given byte array at the given offset .
23,323
public static int writeInt ( OutputStream target , int value ) throws IOException { target . write ( value >>> 24 ) ; target . write ( value >>> 16 ) ; target . write ( value >>> 8 ) ; target . write ( value ) ; return Integer . BYTES ; }
Writes the given 32 - bit Integer to the given OutputStream .
23,324
public static short readShort ( ArrayView source , int position ) { return ( short ) ( ( source . get ( position ) & 0xFF ) << 8 | ( source . get ( position + 1 ) & 0xFF ) ) ; }
Reads a 16 - bit Short from the given ArrayView starting at the given position .
23,325
public static int readInt ( byte [ ] source , int position ) { return ( source [ position ] & 0xFF ) << 24 | ( source [ position + 1 ] & 0xFF ) << 16 | ( source [ position + 2 ] & 0xFF ) << 8 | ( source [ position + 3 ] & 0xFF ) ; }
Reads a 32 - bit integer from the given byte array starting at the given position .
23,326
public static int readInt ( ArrayView source , int position ) { return ( source . get ( position ) & 0xFF ) << 24 | ( source . get ( position + 1 ) & 0xFF ) << 16 | ( source . get ( position + 2 ) & 0xFF ) << 8 | ( source . get ( position + 3 ) & 0xFF ) ; }
Reads a 32 - bit integer from the given ArrayView starting at the given position .
23,327
public static int readInt ( InputStream source ) throws IOException { int b1 = source . read ( ) ; int b2 = source . read ( ) ; int b3 = source . read ( ) ; int b4 = source . read ( ) ; if ( ( b1 | b2 | b3 | b4 ) < 0 ) { throw new EOFException ( ) ; } else { return ( b1 << 24 ) + ( b2 << 16 ) + ( b3 << 8 ) + b4 ; } }
Reads a 32 - bit integer from the given InputStream that was encoded using BitConverter . writeInt .
23,328
public static int writeLong ( ArrayView target , int offset , long value ) { return writeLong ( target . array ( ) , target . arrayOffset ( ) + offset , value ) ; }
Writes the given 64 - bit Long to the given ArrayView at the given offset .
23,329
public static int writeLong ( byte [ ] target , int offset , long value ) { target [ offset ] = ( byte ) ( value >>> 56 ) ; target [ offset + 1 ] = ( byte ) ( value >>> 48 ) ; target [ offset + 2 ] = ( byte ) ( value >>> 40 ) ; target [ offset + 3 ] = ( byte ) ( value >>> 32 ) ; target [ offset + 4 ] = ( byte ) ( value >>> 24 ) ; target [ offset + 5 ] = ( byte ) ( value >>> 16 ) ; target [ offset + 6 ] = ( byte ) ( value >>> 8 ) ; target [ offset + 7 ] = ( byte ) value ; return Long . BYTES ; }
Writes the given 64 - bit Long to the given byte array at the given offset .
23,330
public static int writeUUID ( byte [ ] target , int offset , UUID value ) { writeLong ( target , offset , value . getMostSignificantBits ( ) ) ; writeLong ( target , offset + Long . BYTES , value . getLeastSignificantBits ( ) ) ; return 2 * Long . BYTES ; }
Writes the given 128 - bit UUID to the given byte array at the given offset .
23,331
public static UUID readUUID ( ArrayView source , int position ) { long msb = readLong ( source , position ) ; long lsb = readLong ( source , position + Long . BYTES ) ; return new UUID ( msb , lsb ) ; }
Reads a 128 - bit UUID from the given ArrayView starting at the given position .
23,332
public static long readLong ( ArrayView source , int position ) { return ( long ) ( source . get ( position ) & 0xFF ) << 56 | ( long ) ( source . get ( position + 1 ) & 0xFF ) << 48 | ( long ) ( source . get ( position + 2 ) & 0xFF ) << 40 | ( long ) ( source . get ( position + 3 ) & 0xFF ) << 32 | ( long ) ( source . get ( position + 4 ) & 0xFF ) << 24 | ( source . get ( position + 5 ) & 0xFF ) << 16 | ( source . get ( position + 6 ) & 0xFF ) << 8 | ( source . get ( position + 7 ) & 0xFF ) ; }
Reads a 64 - bit long from the given ArrayView starting at the given position .
23,333
public static long readLong ( byte [ ] source , int position ) { return ( long ) ( source [ position ] & 0xFF ) << 56 | ( long ) ( source [ position + 1 ] & 0xFF ) << 48 | ( long ) ( source [ position + 2 ] & 0xFF ) << 40 | ( long ) ( source [ position + 3 ] & 0xFF ) << 32 | ( long ) ( source [ position + 4 ] & 0xFF ) << 24 | ( source [ position + 5 ] & 0xFF ) << 16 | ( source [ position + 6 ] & 0xFF ) << 8 | ( source [ position + 7 ] & 0xFF ) ; }
Reads a 64 - bit long from the given byte array starting at the given position .
23,334
public static long readLong ( InputStream source ) throws IOException { int b1 = source . read ( ) ; int b2 = source . read ( ) ; int b3 = source . read ( ) ; int b4 = source . read ( ) ; int b5 = source . read ( ) ; int b6 = source . read ( ) ; int b7 = source . read ( ) ; int b8 = source . read ( ) ; if ( ( b1 | b2 | b3 | b4 | b5 | b6 | b7 | b8 ) < 0 ) { throw new EOFException ( ) ; } else { return ( ( long ) b1 << 56 ) + ( ( long ) ( b2 & 255 ) << 48 ) + ( ( long ) ( b3 & 255 ) << 40 ) + ( ( long ) ( b4 & 255 ) << 32 ) + ( ( long ) ( b5 & 255 ) << 24 ) + ( long ) ( ( b6 & 255 ) << 16 ) + ( long ) ( ( b7 & 255 ) << 8 ) + ( long ) ( ( b8 & 255 ) ) ; } }
Reads a 64 - bit long from the given InputStream that was encoded using BitConverter . writeLong .
23,335
LogMetadata addLedger ( long ledgerId ) { Preconditions . checkState ( this . enabled , "Log is not enabled. Cannot perform any modifications on it." ) ; List < LedgerMetadata > newLedgers = new ArrayList < > ( this . ledgers . size ( ) + 1 ) ; newLedgers . addAll ( this . ledgers ) ; int sequence = this . ledgers . size ( ) == 0 ? INITIAL_LEDGER_SEQUENCE : this . ledgers . get ( this . ledgers . size ( ) - 1 ) . getSequence ( ) + 1 ; newLedgers . add ( new LedgerMetadata ( ledgerId , sequence ) ) ; return new LogMetadata ( this . epoch + 1 , this . enabled , Collections . unmodifiableList ( newLedgers ) , this . truncationAddress , this . updateVersion . get ( ) ) ; }
Creates a new instance of the LogMetadata class which contains an additional ledger .
23,336
LogMetadata removeEmptyLedgers ( int skipCountFromEnd ) { val newLedgers = new ArrayList < LedgerMetadata > ( ) ; int cutoffIndex = this . ledgers . size ( ) - skipCountFromEnd ; for ( int i = 0 ; i < cutoffIndex ; i ++ ) { LedgerMetadata lm = this . ledgers . get ( i ) ; if ( lm . getStatus ( ) != LedgerMetadata . Status . Empty ) { newLedgers . add ( lm ) ; } } for ( int i = cutoffIndex ; i < this . ledgers . size ( ) ; i ++ ) { newLedgers . add ( this . ledgers . get ( i ) ) ; } return new LogMetadata ( this . epoch , this . enabled , Collections . unmodifiableList ( newLedgers ) , this . truncationAddress , this . updateVersion . get ( ) ) ; }
Removes LedgerMetadata instances for those Ledgers that are known to be empty .
23,337
LogMetadata updateLedgerStatus ( Map < Long , Long > lastAddConfirmed ) { if ( lastAddConfirmed . isEmpty ( ) ) { return this ; } val newLedgers = this . ledgers . stream ( ) . map ( lm -> { long lac = lastAddConfirmed . getOrDefault ( lm . getLedgerId ( ) , Long . MIN_VALUE ) ; if ( lm . getStatus ( ) == LedgerMetadata . Status . Unknown && lac != Long . MIN_VALUE ) { LedgerMetadata . Status e = lac == Ledgers . NO_ENTRY_ID ? LedgerMetadata . Status . Empty : LedgerMetadata . Status . NotEmpty ; lm = new LedgerMetadata ( lm . getLedgerId ( ) , lm . getSequence ( ) , e ) ; } return lm ; } ) . collect ( Collectors . toList ( ) ) ; return new LogMetadata ( this . epoch , this . enabled , Collections . unmodifiableList ( newLedgers ) , this . truncationAddress , this . updateVersion . get ( ) ) ; }
Updates the LastAddConfirmed on individual LedgerMetadata instances based on the provided argument .
23,338
LogMetadata withUpdateVersion ( int value ) { Preconditions . checkArgument ( value >= this . updateVersion . get ( ) , "versions must increase" ) ; this . updateVersion . set ( value ) ; return this ; }
Updates the current version of the metadata .
23,339
LogMetadata asEnabled ( ) { return this . enabled ? this : new LogMetadata ( this . epoch , true , this . ledgers , this . truncationAddress , this . updateVersion . get ( ) ) ; }
Returns a LogMetadata class with the exact contents of this instance but the enabled flag set to true . No changes are performed on this instance .
23,340
LogMetadata asDisabled ( ) { return this . enabled ? new LogMetadata ( this . epoch , false , this . ledgers , this . truncationAddress , this . updateVersion . get ( ) ) : this ; }
Returns a LogMetadata class with the exact contents of this instance but the enabled flag set to false . No changes are performed on this instance .
23,341
LedgerMetadata getLedger ( long ledgerId ) { int index = getLedgerMetadataIndex ( ledgerId ) ; if ( index >= 0 ) { return this . ledgers . get ( index ) ; } return null ; }
Gets the LedgerMetadata for the ledger with given ledger Id .
23,342
LedgerAddress getNextAddress ( LedgerAddress address , long lastEntryId ) { if ( this . ledgers . size ( ) == 0 ) { return null ; } LedgerAddress result = null ; LedgerMetadata firstLedger = this . ledgers . get ( 0 ) ; if ( address . getLedgerSequence ( ) < firstLedger . getSequence ( ) ) { result = new LedgerAddress ( firstLedger , 0 ) ; } else if ( address . getEntryId ( ) < lastEntryId ) { result = new LedgerAddress ( address . getLedgerSequence ( ) , address . getLedgerId ( ) , address . getEntryId ( ) + 1 ) ; } else { LedgerMetadata ledgerMetadata = null ; int index = getLedgerMetadataIndex ( address . getLedgerId ( ) ) + 1 ; if ( index > 0 ) { if ( index < this . ledgers . size ( ) ) { ledgerMetadata = this . ledgers . get ( index ) ; } } else { for ( LedgerMetadata lm : this . ledgers ) { if ( lm . getLedgerId ( ) > address . getLedgerId ( ) ) { ledgerMetadata = lm ; break ; } } } if ( ledgerMetadata != null ) { result = new LedgerAddress ( ledgerMetadata , 0 ) ; } } if ( result != null && result . compareTo ( this . truncationAddress ) < 0 ) { result = this . truncationAddress ; } return result ; }
Gets the Ledger Address immediately following the given address .
23,343
synchronized void remove ( PageWrapper page ) { super . remove ( page ) ; if ( this . incompleteNewPageOffset == page . getOffset ( ) ) { this . incompleteNewPageOffset = PagePointer . NO_OFFSET ; } this . deletedPageOffsets . add ( page . getOffset ( ) ) ; page . setOffset ( PagePointer . NO_OFFSET ) ; }
Removes the given PageWrapper from this PageCollection .
23,344
synchronized void complete ( PageWrapper page ) { Preconditions . checkArgument ( this . pageByOffset . containsKey ( page . getOffset ( ) ) , "Given page is not registered." ) ; Preconditions . checkArgument ( this . incompleteNewPageOffset == PagePointer . NO_OFFSET || this . incompleteNewPageOffset == page . getOffset ( ) , "Not expecting this page to be completed." ) ; this . incompleteNewPageOffset = PagePointer . NO_OFFSET ; long pageOffset = this . indexLength ; this . indexLength += page . getPage ( ) . getLength ( ) ; this . pageByOffset . remove ( page . getOffset ( ) ) ; page . setOffset ( pageOffset ) ; this . pageByOffset . put ( page . getOffset ( ) , page ) ; }
Indicates that any modifications to the given PageWrapper have completed .
23,345
synchronized void collectPages ( Collection < Long > offsets , Collection < PageWrapper > target ) { offsets . forEach ( offset -> { PageWrapper p = this . pageByOffset . getOrDefault ( offset , null ) ; if ( p != null ) { target . add ( p ) ; } } ) ; }
Collects the PageWrappers with given offsets into the given Collection .
23,346
synchronized List < PageWrapper > getPagesSortedByOffset ( ) { return this . pageByOffset . values ( ) . stream ( ) . sorted ( Comparator . comparingLong ( PageWrapper :: getOffset ) ) . collect ( Collectors . toList ( ) ) ; }
Gets a new List containing all the PageWrappers in this PageCollection ordered by their offset .
23,347
public < ReturnType > CompletableFuture < ReturnType > add ( Supplier < CompletableFuture < ? extends ReturnType > > toRun ) { CompletableFuture < ReturnType > result = new CompletableFuture < > ( ) ; CompletableFuture < ? > existingTask ; synchronized ( this . lock ) { Exceptions . checkNotClosed ( this . closed , this ) ; existingTask = this . lastTask ; if ( existingTask != null ) { existingTask . whenCompleteAsync ( ( r , ex ) -> Futures . completeAfter ( toRun , result ) , this . executor ) ; } this . lastTask = result ; } if ( existingTask == null ) { Futures . completeAfter ( toRun , result ) ; } result . whenComplete ( ( r , ex ) -> { synchronized ( this . lock ) { if ( this . lastTask != null && this . lastTask . isDone ( ) ) { this . lastTask = null ; } } } ) ; return result ; }
Queues up a new task to execute .
23,348
public Collection < WriterSegmentProcessor > createWriterSegmentProcessors ( UpdateableSegmentMetadata metadata ) { Exceptions . checkNotClosed ( this . closed . get ( ) , this ) ; if ( ! metadata . getAttributes ( ) . containsKey ( TableAttributes . INDEX_OFFSET ) ) { return Collections . emptyList ( ) ; } return Collections . singletonList ( new WriterTableProcessor ( new TableWriterConnectorImpl ( metadata ) , this . executor ) ) ; }
region ContainerTableExtension Implementation
23,349
public void registerAndRunReleaser ( Runnable willCallRelease , CompletableFuture < T > toNotify ) { boolean run = false ; boolean complete = false ; T result = null ; Throwable e = null ; synchronized ( lock ) { if ( released ) { complete = true ; result = this . result ; e = this . e ; } else { waitingFutures . add ( toNotify ) ; if ( runningThreadId == null ) { run = true ; runningThreadId = Thread . currentThread ( ) . getId ( ) ; } } } if ( run ) { log . debug ( "Running releaser now, runningThread:{}" , Thread . currentThread ( ) . getName ( ) ) ; boolean success = false ; try { willCallRelease . run ( ) ; success = true ; } finally { if ( ! success ) { synchronized ( lock ) { if ( runningThreadId != null && runningThreadId == Thread . currentThread ( ) . getId ( ) ) { runningThreadId = null ; } } } } } if ( complete ) { if ( e == null ) { toNotify . complete ( result ) ; } else { toNotify . completeExceptionally ( e ) ; } } }
If the latch is released completes the provided future without invoking the provided runnable . If the latch is not released it will add the provided future to the list to be notified and runs the provided runnable if there is not already one running .
23,350
public ServiceBuilder withDataLogFactory ( Function < ComponentSetup , DurableDataLogFactory > dataLogFactoryCreator ) { Preconditions . checkNotNull ( dataLogFactoryCreator , "dataLogFactoryCreator" ) ; this . dataLogFactoryCreator = dataLogFactoryCreator ; return this ; }
Attaches the given DurableDataLogFactory creator to this ServiceBuilder . The given Function will only not be invoked right away ; it will be called when needed .
23,351
public ServiceBuilder withStorageFactory ( Function < ComponentSetup , StorageFactory > storageFactoryCreator ) { Preconditions . checkNotNull ( storageFactoryCreator , "storageFactoryCreator" ) ; this . storageFactoryCreator = storageFactoryCreator ; return this ; }
Attaches the given StorageFactory creator to this ServiceBuilder . The given Function will only not be invoked right away ; it will be called when needed .
23,352
public ServiceBuilder withContainerManager ( Function < ComponentSetup , SegmentContainerManager > segmentContainerManagerCreator ) { Preconditions . checkNotNull ( segmentContainerManagerCreator , "segmentContainerManagerCreator" ) ; this . segmentContainerManagerCreator = segmentContainerManagerCreator ; return this ; }
Attaches the given SegmentContainerManager creator to this ServiceBuilder . The given Function will only not be invoked right away ; it will be called when needed .
23,353
public ServiceBuilder withCacheFactory ( Function < ComponentSetup , CacheFactory > cacheFactoryCreator ) { Preconditions . checkNotNull ( cacheFactoryCreator , "cacheFactoryCreator" ) ; this . cacheFactoryCreator = cacheFactoryCreator ; return this ; }
Attaches the given CacheFactory creator to this ServiceBuilder . The given Function will only not be invoked right away ; it will be called when needed .
23,354
public ServiceBuilder withStreamSegmentStore ( Function < ComponentSetup , StreamSegmentStore > streamSegmentStoreCreator ) { Preconditions . checkNotNull ( streamSegmentStoreCreator , "streamSegmentStoreCreator" ) ; this . streamSegmentStoreCreator = streamSegmentStoreCreator ; return this ; }
Attaches the given StreamSegmentStore creator to this ServiceBuilder . The given Function will not be invoked right away ; it will be called when needed .
23,355
public TableStore createTableStoreService ( ) { return getSingleton ( this . tableStoreService , setup -> new TableService ( setup . getContainerRegistry ( ) , setup . getSegmentToContainerMapper ( ) ) ) ; }
Creates a new instance of TableStore using the components generated by this class .
23,356
public void initialize ( ) throws DurableDataLogException { this . cacheManager . startAsync ( ) . awaitRunning ( ) ; getSingleton ( this . dataLogFactory , this . dataLogFactoryCreator ) . initialize ( ) ; getSingleton ( this . containerManager , this . segmentContainerManagerCreator ) . initialize ( ) ; }
Initializes the ServiceBuilder .
23,357
public static ServiceBuilder newInMemoryBuilder ( ServiceBuilderConfig builderConfig , ExecutorBuilder executorBuilder ) { ServiceConfig serviceConfig = builderConfig . getConfig ( ServiceConfig :: builder ) ; ServiceBuilder builder ; if ( serviceConfig . isReadOnlySegmentStore ( ) ) { builder = new ReadOnlyServiceBuilder ( builderConfig , serviceConfig , executorBuilder ) ; } else { builder = new ServiceBuilder ( builderConfig , serviceConfig , executorBuilder ) . withCacheFactory ( setup -> new InMemoryCacheFactory ( ) ) ; } return builder . withDataLogFactory ( setup -> new InMemoryDurableDataLogFactory ( setup . getCoreExecutor ( ) ) ) . withContainerManager ( setup -> new LocalSegmentContainerManager ( setup . getContainerRegistry ( ) , setup . getSegmentToContainerMapper ( ) ) ) . withStorageFactory ( setup -> new InMemoryStorageFactory ( setup . getStorageExecutor ( ) ) ) . withStreamSegmentStore ( setup -> new StreamSegmentService ( setup . getContainerRegistry ( ) , setup . getSegmentToContainerMapper ( ) ) ) ; }
Creates a new instance of the ServiceBuilder class which is contained in memory . Any data added to this service will be lost when the object is garbage collected or the process terminates .
23,358
public < T extends Notification > void removeListener ( final String type , final Listener < T > listener ) { map . get ( type ) . removeIf ( e -> e . getListener ( ) . equals ( listener ) ) ; }
Remove Listener of a given notification type .
23,359
private < R > CompletableFuture < R > supplyAsync ( Callable < R > operation , String ... segmentNames ) { Exceptions . checkNotClosed ( this . closed . get ( ) , this ) ; return this . taskProcessor . add ( Arrays . asList ( segmentNames ) , ( ) -> execute ( operation ) ) ; }
Executes the given Callable asynchronously and returns a CompletableFuture that will be completed with the result .
23,360
private CompletableFuture < Void > runAsync ( RunnableWithException operation , String ... segmentNames ) { return supplyAsync ( ( ) -> { operation . run ( ) ; return null ; } , segmentNames ) ; }
Executes the given RunnableWithException asynchronously and returns a CompletableFuture that will be completed when the Runnable completes .
23,361
private < R > CompletableFuture < R > execute ( Callable < R > operation ) { return CompletableFuture . supplyAsync ( ( ) -> { try { return operation . call ( ) ; } catch ( Exception ex ) { throw new CompletionException ( ex ) ; } } , this . executor ) ; }
Executes the given Callable synchronously and invokes cleanup when done .
23,362
public static String ofStreamInScope ( String scopeName , String streamName ) { Exceptions . checkNotNullOrEmpty ( streamName , "streamName" ) ; return String . format ( "%s/%s" , ofStreamsInScope ( scopeName ) , streamName ) ; }
Creates a resource representation for use in authorization of actions pertaining to the specified stream within the specified scope .
23,363
public static String ofReaderGroupInScope ( String scopeName , String readerGroupName ) { Exceptions . checkNotNullOrEmpty ( readerGroupName , "readerGroupName" ) ; return String . format ( "%s/%s" , ofReaderGroupsInScope ( scopeName ) , readerGroupName ) ; }
Creates a resource representation for use in authorization of actions pertaining to the specified reader group within the specified scope .
23,364
public static Throwable unwrap ( Throwable ex ) { if ( canInspectCause ( ex ) ) { Throwable cause = ex . getCause ( ) ; if ( cause != null ) { return unwrap ( cause ) ; } } return ex ; }
If the provided exception is a CompletionException or ExecutionException which need be unwrapped .
23,365
public static boolean shouldUnwrap ( Class < ? extends Exception > c ) { return c . equals ( CompletionException . class ) || c . equals ( ExecutionException . class ) ; }
Returns true if the provided class is CompletionException or ExecutionException which need to be unwrapped .
23,366
public static String checkNotNullOrEmpty ( String arg , String argName ) throws NullPointerException , IllegalArgumentException { Preconditions . checkNotNull ( arg , argName ) ; checkArgument ( arg . length ( ) > 0 , argName , "Cannot be an empty string." ) ; return arg ; }
Throws a NullPointerException if the arg argument is null . Throws an IllegalArgumentException if the String arg argument has a length of zero .
23,367
public static < T , V extends Collection < T > > V checkNotNullOrEmpty ( V arg , String argName ) throws NullPointerException , IllegalArgumentException { Preconditions . checkNotNull ( arg , argName ) ; checkArgument ( ! arg . isEmpty ( ) , argName , "Cannot be an empty collection." ) ; return arg ; }
Throws a NullPointerException if the arg argument is null . Throws an IllegalArgumentException if the Collections arg argument has a size of zero .
23,368
public static < K , V > Map < K , V > checkNotNullOrEmpty ( Map < K , V > arg , String argName ) throws NullPointerException , IllegalArgumentException { Preconditions . checkNotNull ( arg , argName ) ; checkArgument ( ! arg . isEmpty ( ) , argName , "Cannot be an empty map." ) ; return arg ; }
Throws a NullPointerException if the arg argument is null . Throws an IllegalArgumentException if the Map arg argument has a size of zero .
23,369
public static void checkArgument ( boolean validCondition , String argName , String message , Object ... args ) throws IllegalArgumentException { if ( ! validCondition ) { throw new IllegalArgumentException ( badArgumentMessage ( argName , message , args ) ) ; } }
Throws an IllegalArgumentException if the validCondition argument is false .
23,370
public static void checkArrayRange ( long startIndex , int length , long arrayLength , String startIndexArgName , String lengthArgName ) throws ArrayIndexOutOfBoundsException , IllegalArgumentException { if ( length < 0 ) { throw new IllegalArgumentException ( badArgumentMessage ( lengthArgName , "length must be a non-negative integer." ) ) ; } if ( startIndex < 0 || startIndex >= arrayLength ) { if ( ! ( startIndex == 0 && length == 0 && arrayLength == 0 ) ) { throw new ArrayIndexOutOfBoundsException ( badStartOffsetMessage ( startIndex , arrayLength , startIndexArgName ) ) ; } } if ( startIndex + length > arrayLength ) { throw new ArrayIndexOutOfBoundsException ( badLengthMessage ( startIndex , length , arrayLength , startIndexArgName , lengthArgName ) ) ; } }
Throws an appropriate exception if the given range is not included in the given array interval .
23,371
CompletableFuture < Void > removeEntities ( String scope , String stream , Collection < Long > entities ) { Set < Integer > collections = entities . stream ( ) . collect ( Collectors . groupingBy ( x -> new Position ( x ) . collectionNumber ) ) . keySet ( ) ; return Futures . allOf ( entities . stream ( ) . map ( entity -> storeHelper . deletePath ( getEntityPath ( scope , stream , entity ) , false ) ) . collect ( Collectors . toList ( ) ) ) . thenCompose ( v -> Futures . allOf ( collections . stream ( ) . map ( collectionNum -> isSealed ( scope , stream , collectionNum ) . thenCompose ( sealed -> { if ( sealed ) { return tryDeleteSealedCollection ( scope , stream , collectionNum ) ; } else { return CompletableFuture . completedFuture ( null ) ; } } ) ) . collect ( Collectors . toList ( ) ) ) ) . whenComplete ( ( r , e ) -> { if ( e != null ) { log . error ( "error encountered while trying to remove entity positions {} for stream {}/{}" , entities , scope , stream , e ) ; } else { log . debug ( "entities at positions {} removed for stream {}/{}" , entities , scope , stream ) ; } } ) ; }
Method to remove entities from the ordered set . Entities are referred to by their position pointer .
23,372
private CompletableFuture < Void > tryDeleteSealedCollection ( String scope , String stream , Integer collectionNum ) { return getLatestCollection ( scope , stream ) . thenCompose ( latestcollectionNum -> storeHelper . getChildren ( getEntitiesPath ( scope , stream , collectionNum ) ) . thenCompose ( entitiesPos -> { String entitiesPath = getEntitiesPath ( scope , stream , collectionNum ) ; return Futures . allOf ( entitiesPos . stream ( ) . filter ( pos -> getPositionFromPath ( pos ) > rollOverAfter ) . map ( pos -> storeHelper . deletePath ( ZKPaths . makePath ( entitiesPath , pos ) , false ) ) . collect ( Collectors . toList ( ) ) ) ; } ) ) . thenCompose ( x -> { return Futures . exceptionallyExpecting ( storeHelper . deletePath ( getEntitiesPath ( scope , stream , collectionNum ) , false ) . thenCompose ( v -> storeHelper . deleteTree ( getCollectionPath ( scope , stream , collectionNum ) ) ) , e -> Exceptions . unwrap ( e ) instanceof StoreException . DataNotEmptyException , null ) ; } ) ; }
Collection should be sealed while calling this method
23,373
void preProcessOperation ( StreamSegmentAppendOperation operation ) throws StreamSegmentSealedException , StreamSegmentMergedException , BadOffsetException , BadAttributeUpdateException { ensureSegmentId ( operation ) ; if ( this . merged ) { throw new StreamSegmentMergedException ( this . name ) ; } if ( this . sealed ) { throw new StreamSegmentSealedException ( this . name ) ; } if ( ! this . recoveryMode ) { long operationOffset = operation . getStreamSegmentOffset ( ) ; if ( operationOffset >= 0 ) { if ( operationOffset != this . length ) { throw new BadOffsetException ( this . name , this . length , operationOffset ) ; } } else { operation . setStreamSegmentOffset ( this . length ) ; } preProcessAttributes ( operation . getAttributeUpdates ( ) ) ; } }
Pre - processes a StreamSegmentAppendOperation . After this method returns the given operation will have its SegmentOffset property set to the current SegmentLength and all AttributeUpdates will be set to the current values .
23,374
void preProcessOperation ( UpdateAttributesOperation operation ) throws StreamSegmentSealedException , StreamSegmentMergedException , BadAttributeUpdateException { ensureSegmentId ( operation ) ; if ( this . merged ) { throw new StreamSegmentMergedException ( this . name ) ; } if ( this . sealed ) { throw new StreamSegmentSealedException ( this . name ) ; } if ( ! this . recoveryMode ) { preProcessAttributes ( operation . getAttributeUpdates ( ) ) ; } }
Pre - processes a UpdateAttributesOperation . After this method returns the given operation will have its AttributeUpdates set to the current values of those attributes .
23,375
void preProcessOperation ( StreamSegmentSealOperation operation ) throws StreamSegmentSealedException , StreamSegmentMergedException { ensureSegmentId ( operation ) ; if ( this . merged ) { throw new StreamSegmentMergedException ( this . name ) ; } if ( this . sealed ) { throw new StreamSegmentSealedException ( this . name ) ; } if ( ! this . recoveryMode ) { operation . setStreamSegmentOffset ( this . length ) ; } }
Pre - processes a StreamSegmentSealOperation . After this method returns the operation will have its SegmentLength property set to the current length of the Segment .
23,376
void preProcessOperation ( StreamSegmentTruncateOperation operation ) throws BadOffsetException { ensureSegmentId ( operation ) ; if ( operation . getStreamSegmentOffset ( ) < this . startOffset || operation . getStreamSegmentOffset ( ) > this . length ) { String msg = String . format ( "Truncation Offset must be at least %d and at most %d, given %d." , this . startOffset , this . length , operation . getStreamSegmentOffset ( ) ) ; throw new BadOffsetException ( this . name , this . startOffset , operation . getStreamSegmentOffset ( ) , msg ) ; } }
Pre - processes a StreamSegmentTruncateOperation .
23,377
void preProcessAsSourceSegment ( MergeSegmentOperation operation ) throws StreamSegmentNotSealedException , StreamSegmentMergedException , StreamSegmentTruncatedException { Exceptions . checkArgument ( this . id == operation . getSourceSegmentId ( ) , "operation" , "Invalid Operation Source Segment Id." ) ; if ( this . merged ) { throw new StreamSegmentMergedException ( this . name ) ; } if ( ! this . sealed ) { throw new StreamSegmentNotSealedException ( this . name ) ; } if ( this . startOffset > 0 ) { throw new StreamSegmentTruncatedException ( this . name , "Segment cannot be merged because it is truncated." , null ) ; } if ( ! this . recoveryMode ) { operation . setLength ( this . length ) ; } }
Pre - processes the given operation as a Source Segment .
23,378
void acceptOperation ( StreamSegmentAppendOperation operation ) throws MetadataUpdateException { ensureSegmentId ( operation ) ; if ( operation . getStreamSegmentOffset ( ) != this . length ) { throw new MetadataUpdateException ( this . containerId , String . format ( "SegmentAppendOperation offset mismatch. Expected %d, actual %d." , this . length , operation . getStreamSegmentOffset ( ) ) ) ; } this . length += operation . getData ( ) . length ; acceptAttributes ( operation . getAttributeUpdates ( ) ) ; this . isChanged = true ; }
Accepts a StreamSegmentAppendOperation in the metadata .
23,379
void acceptOperation ( UpdateAttributesOperation operation ) { ensureSegmentId ( operation ) ; acceptAttributes ( operation . getAttributeUpdates ( ) ) ; this . isChanged = true ; }
Accepts an UpdateAttributesOperation in the metadata .
23,380
void acceptOperation ( StreamSegmentSealOperation operation ) throws MetadataUpdateException { ensureSegmentId ( operation ) ; if ( operation . getStreamSegmentOffset ( ) < 0 ) { throw new MetadataUpdateException ( containerId , "StreamSegmentSealOperation cannot be accepted if it hasn't been pre-processed: " + operation ) ; } this . sealed = true ; this . isChanged = true ; }
Accepts a StreamSegmentSealOperation in the metadata .
23,381
void acceptOperation ( StreamSegmentTruncateOperation operation ) { ensureSegmentId ( operation ) ; this . startOffset = operation . getStreamSegmentOffset ( ) ; this . isChanged = true ; }
Accepts a StreamSegmentTruncateOperation in the metadata .
23,382
void acceptAsSourceSegment ( MergeSegmentOperation operation ) { Exceptions . checkArgument ( this . id == operation . getSourceSegmentId ( ) , "operation" , "Invalid Operation Source Segment Id." ) ; this . sealed = true ; this . merged = true ; this . isChanged = true ; }
Accepts the given operation as a Source Segment .
23,383
private void acceptAttributes ( Collection < AttributeUpdate > attributeUpdates ) { if ( attributeUpdates == null ) { return ; } for ( AttributeUpdate au : attributeUpdates ) { this . attributeUpdates . put ( au . getAttributeId ( ) , au . getValue ( ) ) ; } }
Accepts a collection of AttributeUpdates in the metadata .
23,384
void updateStorageState ( long storageLength , boolean storageSealed , boolean deleted , boolean storageDeleted ) { this . storageLength = storageLength ; this . sealedInStorage = storageSealed ; this . deleted = deleted ; this . deletedInStorage = storageDeleted ; this . isChanged = true ; }
Updates the transaction with the given state of the segment in storage .
23,385
void apply ( UpdateableSegmentMetadata target ) { if ( ! this . isChanged ) { return ; } Preconditions . checkArgument ( target . getId ( ) == this . id , "Target Segment Id mismatch. Expected %s, given %s." , this . id , target . getId ( ) ) ; Preconditions . checkArgument ( target . getName ( ) . equals ( this . name ) , "Target Segment Name mismatch. Expected %s, given %s." , name , target . getName ( ) ) ; target . setLastUsed ( this . lastUsed ) ; target . updateAttributes ( this . attributeUpdates ) ; target . setLength ( this . length ) ; target . setStartOffset ( this . startOffset ) ; if ( this . storageLength >= 0 ) { target . setStorageLength ( this . storageLength ) ; } if ( this . sealed ) { target . markSealed ( ) ; if ( this . sealedInStorage ) { target . markSealedInStorage ( ) ; } } if ( this . merged ) { target . markMerged ( ) ; } if ( this . deleted ) { target . markDeleted ( ) ; if ( this . deletedInStorage ) { target . markDeletedInStorage ( ) ; } } if ( this . pinned ) { target . markPinned ( ) ; } }
Applies all the outstanding changes to the base SegmentMetadata object .
23,386
public CompletableFuture < CreateScopeStatus > createScope ( final String scopeName ) { return getScope ( scopeName ) . createScope ( ) . handle ( ( result , ex ) -> { if ( ex == null ) { return CreateScopeStatus . newBuilder ( ) . setStatus ( CreateScopeStatus . Status . SUCCESS ) . build ( ) ; } if ( ex instanceof StoreException . DataExistsException || ex . getCause ( ) instanceof StoreException . DataExistsException ) { return CreateScopeStatus . newBuilder ( ) . setStatus ( CreateScopeStatus . Status . SCOPE_EXISTS ) . build ( ) ; } else { log . debug ( "Create scope failed due to " , ex ) ; return CreateScopeStatus . newBuilder ( ) . setStatus ( CreateScopeStatus . Status . FAILURE ) . build ( ) ; } } ) ; }
Create a scope with given name .
23,387
public CompletableFuture < DeleteScopeStatus > deleteScope ( final String scopeName ) { return getScope ( scopeName ) . deleteScope ( ) . handle ( ( result , e ) -> { Throwable ex = Exceptions . unwrap ( e ) ; if ( ex == null ) { return DeleteScopeStatus . newBuilder ( ) . setStatus ( DeleteScopeStatus . Status . SUCCESS ) . build ( ) ; } if ( ex instanceof StoreException . DataNotFoundException ) { return DeleteScopeStatus . newBuilder ( ) . setStatus ( DeleteScopeStatus . Status . SCOPE_NOT_FOUND ) . build ( ) ; } else if ( ex instanceof StoreException . DataNotEmptyException ) { return DeleteScopeStatus . newBuilder ( ) . setStatus ( DeleteScopeStatus . Status . SCOPE_NOT_EMPTY ) . build ( ) ; } else { log . debug ( "DeleteScope failed due to {} " , ex ) ; return DeleteScopeStatus . newBuilder ( ) . setStatus ( DeleteScopeStatus . Status . FAILURE ) . build ( ) ; } } ) ; }
Delete a scope with given name .
23,388
public CompletableFuture < Boolean > sealSegment ( final String scope , final String stream , final long segmentId , String delegationToken , final long clientRequestId ) { final Controller . NodeUri uri = getSegmentUri ( scope , stream , segmentId ) ; final String qualifiedName = getQualifiedStreamSegmentName ( scope , stream , segmentId ) ; final long requestId = ( clientRequestId == RequestTag . NON_EXISTENT_ID ) ? idGenerator . get ( ) : clientRequestId ; return sealSegment ( qualifiedName , uri , delegationToken , requestId ) ; }
This method sends segment sealed message for the specified segment .
23,389
public CompletableFuture < Boolean > createTableSegment ( final String tableName , String delegationToken , final long clientRequestId ) { final CompletableFuture < Boolean > result = new CompletableFuture < > ( ) ; final Controller . NodeUri uri = getTableUri ( tableName ) ; final WireCommandType type = WireCommandType . CREATE_TABLE_SEGMENT ; final long requestId = ( clientRequestId == RequestTag . NON_EXISTENT_ID ) ? idGenerator . get ( ) : clientRequestId ; final FailingReplyProcessor replyProcessor = new FailingReplyProcessor ( ) { public void connectionDropped ( ) { log . warn ( requestId , "CreateTableSegment {} Connection dropped" , tableName ) ; result . completeExceptionally ( new WireCommandFailedException ( type , WireCommandFailedException . Reason . ConnectionDropped ) ) ; } public void wrongHost ( WireCommands . WrongHost wrongHost ) { log . warn ( requestId , "CreateTableSegment {} wrong host" , tableName ) ; result . completeExceptionally ( new WireCommandFailedException ( type , WireCommandFailedException . Reason . UnknownHost ) ) ; } public void segmentAlreadyExists ( WireCommands . SegmentAlreadyExists segmentAlreadyExists ) { log . info ( requestId , "CreateTableSegment {} segmentAlreadyExists" , tableName ) ; result . complete ( true ) ; } public void segmentCreated ( WireCommands . SegmentCreated segmentCreated ) { log . info ( requestId , "CreateTableSegment {} SegmentCreated" , tableName ) ; result . complete ( true ) ; } public void processingFailure ( Exception error ) { log . error ( requestId , "CreateTableSegment {} threw exception" , tableName , error ) ; handleError ( error , result , type ) ; } public void authTokenCheckFailed ( WireCommands . AuthTokenCheckFailed authTokenCheckFailed ) { result . completeExceptionally ( new WireCommandFailedException ( new AuthenticationException ( authTokenCheckFailed . toString ( ) ) , type , WireCommandFailedException . Reason . AuthFailed ) ) ; } } ; WireCommands . CreateTableSegment request = new WireCommands . CreateTableSegment ( requestId , tableName , delegationToken ) ; sendRequestAsync ( request , replyProcessor , result , ModelHelper . encode ( uri ) ) ; return result ; }
This method sends a WireCommand to create a table segment .
23,390
public CompletableFuture < Boolean > deleteTableSegment ( final String tableName , final boolean mustBeEmpty , String delegationToken , final long clientRequestId ) { final CompletableFuture < Boolean > result = new CompletableFuture < > ( ) ; final Controller . NodeUri uri = getTableUri ( tableName ) ; final WireCommandType type = WireCommandType . DELETE_TABLE_SEGMENT ; final long requestId = ( clientRequestId == RequestTag . NON_EXISTENT_ID ) ? idGenerator . get ( ) : clientRequestId ; final FailingReplyProcessor replyProcessor = new FailingReplyProcessor ( ) { public void connectionDropped ( ) { log . warn ( requestId , "deleteTableSegment {} Connection dropped." , tableName ) ; result . completeExceptionally ( new WireCommandFailedException ( type , WireCommandFailedException . Reason . ConnectionDropped ) ) ; } public void wrongHost ( WireCommands . WrongHost wrongHost ) { log . warn ( requestId , "deleteTableSegment {} wrong host." , tableName ) ; result . completeExceptionally ( new WireCommandFailedException ( type , WireCommandFailedException . Reason . UnknownHost ) ) ; } public void noSuchSegment ( WireCommands . NoSuchSegment noSuchSegment ) { log . info ( requestId , "deleteTableSegment {} NoSuchSegment." , tableName ) ; result . complete ( true ) ; } public void segmentDeleted ( WireCommands . SegmentDeleted segmentDeleted ) { log . info ( requestId , "deleteTableSegment {} SegmentDeleted." , tableName ) ; result . complete ( true ) ; } public void tableSegmentNotEmpty ( WireCommands . TableSegmentNotEmpty tableSegmentNotEmpty ) { log . warn ( requestId , "deleteTableSegment {} TableSegmentNotEmpty." , tableName ) ; result . completeExceptionally ( new WireCommandFailedException ( type , WireCommandFailedException . Reason . TableSegmentNotEmpty ) ) ; } public void processingFailure ( Exception error ) { log . error ( requestId , "deleteTableSegment {} failed." , tableName , error ) ; handleError ( error , result , type ) ; } public void authTokenCheckFailed ( WireCommands . AuthTokenCheckFailed authTokenCheckFailed ) { result . completeExceptionally ( new WireCommandFailedException ( new AuthenticationException ( authTokenCheckFailed . toString ( ) ) , type , WireCommandFailedException . Reason . AuthFailed ) ) ; } } ; WireCommands . DeleteTableSegment request = new WireCommands . DeleteTableSegment ( requestId , tableName , mustBeEmpty , delegationToken ) ; sendRequestAsync ( request , replyProcessor , result , ModelHelper . encode ( uri ) ) ; return result ; }
This method sends a WireCommand to delete a table segment .
23,391
public CompletableFuture < TableSegment . IteratorItem < TableKey < byte [ ] > > > readTableKeys ( final String tableName , final int suggestedKeyCount , final IteratorState state , final String delegationToken , final long clientRequestId ) { final Controller . NodeUri uri = getTableUri ( tableName ) ; final WireCommandType type = WireCommandType . READ_TABLE_KEYS ; final long requestId = ( clientRequestId == RequestTag . NON_EXISTENT_ID ) ? idGenerator . get ( ) : clientRequestId ; final IteratorState token = ( state == null ) ? IteratorState . EMPTY : state ; final CompletableFuture < TableSegment . IteratorItem < TableKey < byte [ ] > > > result = new CompletableFuture < > ( ) ; final FailingReplyProcessor replyProcessor = new FailingReplyProcessor ( ) { public void connectionDropped ( ) { log . warn ( requestId , "readTableKeys {} Connection dropped" , tableName ) ; result . completeExceptionally ( new WireCommandFailedException ( type , WireCommandFailedException . Reason . ConnectionDropped ) ) ; } public void wrongHost ( WireCommands . WrongHost wrongHost ) { log . warn ( requestId , "readTableKeys {} wrong host" , tableName ) ; result . completeExceptionally ( new WireCommandFailedException ( type , WireCommandFailedException . Reason . UnknownHost ) ) ; } public void noSuchSegment ( WireCommands . NoSuchSegment noSuchSegment ) { log . warn ( requestId , "readTableKeys {} NoSuchSegment" , tableName ) ; result . completeExceptionally ( new WireCommandFailedException ( type , WireCommandFailedException . Reason . SegmentDoesNotExist ) ) ; } public void tableKeysRead ( WireCommands . TableKeysRead tableKeysRead ) { log . info ( requestId , "readTableKeys {} successful." , tableName ) ; final IteratorState state = IteratorState . fromBytes ( tableKeysRead . getContinuationToken ( ) ) ; final List < TableKey < byte [ ] > > keys = tableKeysRead . getKeys ( ) . stream ( ) . map ( k -> new TableKeyImpl < > ( getArray ( k . getData ( ) ) , new KeyVersionImpl ( k . getKeyVersion ( ) ) ) ) . collect ( Collectors . toList ( ) ) ; result . complete ( new TableSegment . IteratorItem < > ( state , keys ) ) ; } public void processingFailure ( Exception error ) { log . error ( requestId , "readTableKeys {} failed" , tableName , error ) ; handleError ( error , result , type ) ; } public void authTokenCheckFailed ( WireCommands . AuthTokenCheckFailed authTokenCheckFailed ) { result . completeExceptionally ( new WireCommandFailedException ( new AuthenticationException ( authTokenCheckFailed . toString ( ) ) , type , WireCommandFailedException . Reason . AuthFailed ) ) ; } } ; WireCommands . ReadTableKeys cmd = new WireCommands . ReadTableKeys ( requestId , tableName , delegationToken , suggestedKeyCount , token . toBytes ( ) ) ; sendRequestAsync ( cmd , replyProcessor , result , ModelHelper . encode ( uri ) ) ; return result ; }
The method sends a WireCommand to iterate over table keys .
23,392
private void sendRequestAsync ( final WireCommand request , final ReplyProcessor replyProcessor , final CompletableFuture < ? > resultFuture , final PravegaNodeUri uri ) { try { CompletableFuture < ConnectionWrapper > connectionFuture = connectionManager . getConnection ( uri , replyProcessor ) ; connectionFuture . whenComplete ( ( connection , e ) -> connectionCompleteCallback ( request , resultFuture , connection , e ) ) ; resultFuture . whenComplete ( ( result , e ) -> requestCompleteCallback ( connectionFuture , e ) ) ; } catch ( Exception e ) { resultFuture . completeExceptionally ( e ) ; } }
This method takes a new connection from the pool and associates replyProcessor with that connection and sends the supplied request over that connection . It takes a resultFuture that is completed when the response from the store is processed successfully . If there is a failure in establishing connection or sending the request over the wire the resultFuture is completedExceptionally explicitly by this method . Otherwise it simply registers a callback on result future s completion to return the connection back to the pool .
23,393
private void connectionCompleteCallback ( WireCommand request , CompletableFuture < ? > resultFuture , ConnectionWrapper connection , Throwable e ) { if ( connection == null || e != null ) { ConnectionFailedException cause = e != null ? new ConnectionFailedException ( e ) : new ConnectionFailedException ( ) ; resultFuture . completeExceptionally ( new WireCommandFailedException ( cause , request . getType ( ) , WireCommandFailedException . Reason . ConnectionFailed ) ) ; } else { connection . sendAsync ( request , resultFuture ) ; } }
Connection completion callback method . This is invoked when the future returned by the connection pool completes . If it succeeded we will have a connection object where send the request . If it failed the resultFuture is failed with ConnectionFailedException .
23,394
private void requestCompleteCallback ( CompletableFuture < ConnectionWrapper > connectionFuture , Throwable e ) { if ( e != null ) { Throwable unwrap = Exceptions . unwrap ( e ) ; if ( hasConnectionFailed ( unwrap ) ) { connectionFuture . thenAccept ( connectionObject -> { connectionObject . failConnection ( ) ; connectionObject . close ( ) ; } ) ; } else { connectionFuture . thenAccept ( ConnectionWrapper :: close ) ; } } else { connectionFuture . thenAccept ( ConnectionWrapper :: close ) ; } }
Request Complete callback is invoked when the request is complete either by sending and receiving a response from segment store or by way of failure of connection . This is responsible for returning the connection back to the connection pool .
23,395
public static ThreadFactory getThreadFactory ( String groupName ) { return new ThreadFactory ( ) { final AtomicInteger threadCount = new AtomicInteger ( ) ; public Thread newThread ( Runnable r ) { Thread thread = new Thread ( r , groupName + "-" + threadCount . incrementAndGet ( ) ) ; thread . setDaemon ( true ) ; return thread ; } } ; }
Creates and returns a thread factory that will create threads with the given name prefix .
23,396
public static ScheduledExecutorService newScheduledThreadPool ( int size , String poolName ) { ScheduledThreadPoolExecutor result = new ScheduledThreadPoolExecutor ( size , getThreadFactory ( poolName ) , new CallerRuns ( ) ) ; result . setContinueExistingPeriodicTasksAfterShutdownPolicy ( false ) ; result . setExecuteExistingDelayedTasksAfterShutdownPolicy ( false ) ; result . setRemoveOnCancelPolicy ( true ) ; return result ; }
Creates a new ScheduledExecutorService that will use daemon threads with appropriate names the threads .
23,397
public static Snapshot getSnapshot ( ExecutorService service ) { Preconditions . checkNotNull ( service , "service" ) ; if ( service instanceof ThreadPoolExecutor ) { val tpe = ( ThreadPoolExecutor ) service ; return new Snapshot ( tpe . getQueue ( ) . size ( ) , tpe . getActiveCount ( ) , tpe . getPoolSize ( ) ) ; } else if ( service instanceof ForkJoinPool ) { val fjp = ( ForkJoinPool ) service ; return new Snapshot ( fjp . getQueuedSubmissionCount ( ) , fjp . getActiveThreadCount ( ) , fjp . getPoolSize ( ) ) ; } else { return null ; } }
Gets a snapshot of the given ExecutorService .
23,398
public static ThreadPoolExecutor getShrinkingExecutor ( int maxThreadCount , int threadTimeout , String poolName ) { return new ThreadPoolExecutor ( 0 , maxThreadCount , threadTimeout , TimeUnit . MILLISECONDS , new LinkedBlockingQueue < > ( ) , getThreadFactory ( poolName ) , new CallerRuns ( ) ) ; }
Operates like Executors . cachedThreadPool but with a custom thread timeout and pool name .
23,399
public static void execute ( RunnableWithException task , Consumer < Throwable > exceptionHandler , Runnable runFinally , Executor executor ) { Preconditions . checkNotNull ( task , "task" ) ; Preconditions . checkNotNull ( exceptionHandler , "exceptionHandler" ) ; Preconditions . checkNotNull ( runFinally , "runFinally" ) ; boolean scheduledSuccess = false ; try { executor . execute ( ( ) -> { try { task . run ( ) ; } catch ( Throwable ex ) { if ( ! Exceptions . mustRethrow ( ex ) ) { exceptionHandler . accept ( ex ) ; } } finally { runFinally . run ( ) ; } } ) ; scheduledSuccess = true ; } finally { if ( ! scheduledSuccess ) { runFinally . run ( ) ; } } }
Executes the given task on the given Executor .