idx int64 0 41.2k | question stringlengths 73 5.81k | target stringlengths 5 918 |
|---|---|---|
23,400 | public static DelimitedStringParser parser ( String pairDelimiter , String keyValueDelimiter ) { Exceptions . checkNotNullOrEmpty ( pairDelimiter , "pairDelimiter" ) ; Exceptions . checkNotNullOrEmpty ( keyValueDelimiter , "keyValueDelimiter" ) ; Preconditions . checkArgument ( ! pairDelimiter . equals ( keyValueDelimiter ) , "pairDelimiter (%s) cannot be the same as keyValueDelimiter (%s)" , pairDelimiter , keyValueDelimiter ) ; return new DelimitedStringParser ( pairDelimiter , keyValueDelimiter ) ; } | Creates a new DelimitedStringParser with the given Pair and KeyValue delimiters . |
23,401 | public void parse ( String s ) { Preconditions . checkNotNull ( s , "s" ) ; if ( s . length ( ) == 0 ) { return ; } val pairs = s . split ( pairDelimiter ) ; for ( String pair : pairs ) { int delimiterPos = pair . indexOf ( keyValueDelimiter ) ; if ( delimiterPos < 0 ) { throw new IllegalArgumentException ( String . format ( "Invalid pair '%s' (missing key-value delimiter)." , pair ) ) ; } String key = pair . substring ( 0 , delimiterPos ) ; String value ; if ( delimiterPos == pair . length ( ) - 1 ) { value = "" ; } else { value = pair . substring ( delimiterPos + 1 ) ; } Extractor < ? > e = this . extractors . get ( key ) ; Preconditions . checkArgument ( e != null , String . format ( "No extractor provided for key '%s'." , key ) ) ; e . process ( value ) ; } } | Parses the given string using the configuration set on this parser . |
23,402 | public < T > CompletableFuture < VersionedMetadata < T > > getCachedData ( String table , String key , Function < byte [ ] , T > fromBytes ) { return cache . getCachedData ( new TableCacheKey < > ( table , key , fromBytes ) ) . thenApply ( this :: getVersionedMetadata ) ; } | Api to read cached value for the specified key from the requested table . |
23,403 | public void invalidateCache ( String table , String key ) { cache . invalidateCache ( new TableCacheKey < > ( table , key , x -> null ) ) ; } | Method to invalidate cached value in the cache for the specified table . |
23,404 | public CompletableFuture < Void > createTable ( String tableName ) { log . debug ( "create table called for table: {}" , tableName ) ; return Futures . toVoid ( withRetries ( ( ) -> segmentHelper . createTableSegment ( tableName , authToken . get ( ) , RequestTag . NON_EXISTENT_ID ) , ( ) -> String . format ( "create table: %s" , tableName ) ) ) . whenCompleteAsync ( ( r , e ) -> { if ( e != null ) { log . warn ( "create table {} threw exception" , tableName , e ) ; } else { log . debug ( "table {} created successfully" , tableName ) ; } } , executor ) ; } | Method to create a new Table . If the table already exists segment helper responds with success . |
23,405 | public CompletableFuture < Void > addNewEntriesIfAbsent ( String tableName , Map < String , byte [ ] > toAdd ) { List < TableEntry < byte [ ] , byte [ ] > > entries = toAdd . entrySet ( ) . stream ( ) . map ( x -> new TableEntryImpl < > ( new TableKeyImpl < > ( x . getKey ( ) . getBytes ( Charsets . UTF_8 ) , KeyVersion . NOT_EXISTS ) , x . getValue ( ) ) ) . collect ( Collectors . toList ( ) ) ; Supplier < String > errorMessage = ( ) -> String . format ( "addNewEntriesIfAbsent: table: %s" , tableName ) ; return expectingDataExists ( withRetries ( ( ) -> segmentHelper . updateTableEntries ( tableName , entries , authToken . get ( ) , RequestTag . NON_EXISTENT_ID ) , errorMessage ) . handle ( ( r , e ) -> { if ( e != null ) { Throwable unwrap = Exceptions . unwrap ( e ) ; if ( unwrap instanceof StoreException . WriteConflictException ) { throw StoreException . create ( StoreException . Type . DATA_EXISTS , errorMessage . get ( ) ) ; } else { log . debug ( "add new entries to {} threw exception {} {}" , tableName , unwrap . getClass ( ) , unwrap . getMessage ( ) ) ; throw new CompletionException ( e ) ; } } else { log . trace ( "entries added to table {}" , tableName ) ; return null ; } } ) , null ) ; } | Method to add a batch of entries if absent . Table implementation on segment store guarantees that either all or none of the entries are added . If segment store responds with success then it is guaranteed that all entries are added to the store . However it is important to note that the segment store could respond with Data Exists even if one of the entries exists . In such case this method will ignore data exist and respond with success for the entire batch . It does not verify if all entries existed or one of the entries existed . Callers should use this only if they are guaranteed to never create the requested entries outside of the requested batch . |
23,406 | public CompletableFuture < Version > updateEntry ( String tableName , String key , byte [ ] value , Version ver ) { log . trace ( "updateEntry entry called for : {} key : {} version {}" , tableName , key , ver . asLongVersion ( ) . getLongValue ( ) ) ; KeyVersionImpl version = new KeyVersionImpl ( ver . asLongVersion ( ) . getLongValue ( ) ) ; List < TableEntry < byte [ ] , byte [ ] > > entries = Collections . singletonList ( new TableEntryImpl < > ( new TableKeyImpl < > ( key . getBytes ( Charsets . UTF_8 ) , version ) , value ) ) ; return withRetries ( ( ) -> segmentHelper . updateTableEntries ( tableName , entries , authToken . get ( ) , RequestTag . NON_EXISTENT_ID ) , ( ) -> String . format ( "updateEntry: key: %s table: %s" , key , tableName ) ) . thenApplyAsync ( x -> { KeyVersion first = x . get ( 0 ) ; log . trace ( "entry for key {} updated to table {} with new version {}" , key , tableName , first . getSegmentVersion ( ) ) ; return new Version . LongVersion ( first . getSegmentVersion ( ) ) ; } , executor ) ; } | Method to update a single entry . |
23,407 | public CompletableFuture < Void > removeEntry ( String tableName , String key ) { log . trace ( "remove entry called for : {} key : {}" , tableName , key ) ; List < TableKey < byte [ ] > > keys = Collections . singletonList ( new TableKeyImpl < > ( key . getBytes ( Charsets . UTF_8 ) , null ) ) ; return expectingDataNotFound ( withRetries ( ( ) -> segmentHelper . removeTableKeys ( tableName , keys , authToken . get ( ) , RequestTag . NON_EXISTENT_ID ) , ( ) -> String . format ( "remove entry: key: %s table: %s" , key , tableName ) ) , null ) . thenAcceptAsync ( v -> log . trace ( "entry for key {} removed from table {}" , key , tableName ) , executor ) ; } | Method to remove entry from the store . |
23,408 | public CompletableFuture < Void > removeEntries ( String tableName , Collection < String > keys ) { log . trace ( "remove entry called for : {} keys : {}" , tableName , keys ) ; List < TableKey < byte [ ] > > listOfKeys = keys . stream ( ) . map ( x -> new TableKeyImpl < > ( x . getBytes ( Charsets . UTF_8 ) , null ) ) . collect ( Collectors . toList ( ) ) ; return expectingDataNotFound ( withRetries ( ( ) -> segmentHelper . removeTableKeys ( tableName , listOfKeys , authToken . get ( ) , RequestTag . NON_EXISTENT_ID ) , ( ) -> String . format ( "remove entries: keys: %s table: %s" , keys . toString ( ) , tableName ) ) , null ) . thenAcceptAsync ( v -> log . trace ( "entry for keys {} removed from table {}" , keys , tableName ) , executor ) ; } | Removes a batch of entries from the table store . Ignores data not found exception and treats it as success . If table store throws dataNotFound for a subset of entries there is no way for this method to disambiguate . So it is the responsibility of the caller to use this api if they are guaranteed to always attempt to remove same batch entries . |
23,409 | public CompletableFuture < Map . Entry < ByteBuf , List < String > > > getKeysPaginated ( String tableName , ByteBuf continuationToken , int limit ) { log . trace ( "get keys paginated called for : {}" , tableName ) ; return withRetries ( ( ) -> segmentHelper . readTableKeys ( tableName , limit , IteratorState . fromBytes ( continuationToken ) , authToken . get ( ) , RequestTag . NON_EXISTENT_ID ) , ( ) -> String . format ( "get keys paginated for table: %s" , tableName ) ) . thenApplyAsync ( result -> { List < String > items = result . getItems ( ) . stream ( ) . map ( x -> new String ( x . getKey ( ) , Charsets . UTF_8 ) ) . collect ( Collectors . toList ( ) ) ; log . trace ( "get keys paginated on table {} returned items {}" , tableName , items ) ; return new AbstractMap . SimpleEntry < > ( result . getState ( ) . toBytes ( ) , items ) ; } , executor ) ; } | Method to get paginated list of keys with a continuation token . |
23,410 | public < T > CompletableFuture < Map . Entry < ByteBuf , List < Map . Entry < String , VersionedMetadata < T > > > > > getEntriesPaginated ( String tableName , ByteBuf continuationToken , int limit , Function < byte [ ] , T > fromBytes ) { log . trace ( "get entries paginated called for : {}" , tableName ) ; return withRetries ( ( ) -> segmentHelper . readTableEntries ( tableName , limit , IteratorState . fromBytes ( continuationToken ) , authToken . get ( ) , RequestTag . NON_EXISTENT_ID ) , ( ) -> String . format ( "get entries paginated for table: %s" , tableName ) ) . thenApplyAsync ( result -> { List < Map . Entry < String , VersionedMetadata < T > > > items = result . getItems ( ) . stream ( ) . map ( x -> { String key = new String ( x . getKey ( ) . getKey ( ) , Charsets . UTF_8 ) ; T deserialized = fromBytes . apply ( x . getValue ( ) ) ; VersionedMetadata < T > value = new VersionedMetadata < > ( deserialized , new Version . LongVersion ( x . getKey ( ) . getVersion ( ) . getSegmentVersion ( ) ) ) ; return new AbstractMap . SimpleEntry < > ( key , value ) ; } ) . collect ( Collectors . toList ( ) ) ; log . trace ( "get keys paginated on table {} returned number of items {}" , tableName , items . size ( ) ) ; return new AbstractMap . SimpleEntry < > ( result . getState ( ) . toBytes ( ) , items ) ; } , executor ) ; } | Method to get paginated list of entries with a continuation token . |
23,411 | public < K , V > CompletableFuture < Map < K , V > > getEntriesWithFilter ( String table , Function < String , K > fromStringKey , Function < byte [ ] , V > fromBytesValue , BiFunction < K , V , Boolean > filter , int limit ) { Map < K , V > result = new ConcurrentHashMap < > ( ) ; AtomicBoolean canContinue = new AtomicBoolean ( true ) ; AtomicReference < ByteBuf > token = new AtomicReference < > ( IteratorState . EMPTY . toBytes ( ) ) ; return Futures . exceptionallyExpecting ( Futures . loop ( canContinue :: get , ( ) -> getEntriesPaginated ( table , token . get ( ) , limit , fromBytesValue ) . thenAccept ( v -> { List < Map . Entry < String , VersionedMetadata < V > > > pair = v . getValue ( ) ; for ( Map . Entry < String , VersionedMetadata < V > > val : pair ) { K key = fromStringKey . apply ( val . getKey ( ) ) ; V value = val . getValue ( ) . getObject ( ) ; if ( filter . apply ( key , value ) ) { result . put ( key , value ) ; if ( result . size ( ) == limit ) { break ; } } } canContinue . set ( ! ( v . getValue ( ) . size ( ) < limit || result . size ( ) >= limit ) ) ; token . get ( ) . release ( ) ; if ( canContinue . get ( ) ) { token . set ( v . getKey ( ) ) ; } } ) , executor ) . thenApply ( x -> result ) , DATA_NOT_FOUND_PREDICATE , Collections . emptyMap ( ) ) ; } | Method to retrieve a collection of entries bounded by the specified limit size that satisfy the supplied filter . This function makes calls into segment store and includes entries that satisfy the supplied predicate . It makes repeated paginated calls into segment store until it has either collected deseried number of entries or it has exhausted all entries in the store . |
23,412 | public AsyncIterator < String > getAllKeys ( String tableName ) { return new ContinuationTokenAsyncIterator < > ( token -> getKeysPaginated ( tableName , token , 1000 ) . thenApplyAsync ( result -> { token . release ( ) ; return new AbstractMap . SimpleEntry < > ( result . getKey ( ) , result . getValue ( ) ) ; } , executor ) , IteratorState . EMPTY . toBytes ( ) ) ; } | Method to retrieve all keys in the table . It returns an asyncIterator which can be used to iterate over the returned keys . |
23,413 | public < T > AsyncIterator < Map . Entry < String , VersionedMetadata < T > > > getAllEntries ( String tableName , Function < byte [ ] , T > fromBytes ) { return new ContinuationTokenAsyncIterator < > ( token -> getEntriesPaginated ( tableName , token , 1000 , fromBytes ) . thenApplyAsync ( result -> { token . release ( ) ; return new AbstractMap . SimpleEntry < > ( result . getKey ( ) , result . getValue ( ) ) ; } , executor ) , IteratorState . EMPTY . toBytes ( ) ) ; } | Method to retrieve all entries in the table . It returns an asyncIterator which can be used to iterate over the returned entries . |
23,414 | public void initialize ( ) throws DurableDataLogException { Preconditions . checkState ( this . bookKeeper . get ( ) == null , "BookKeeperLogFactory is already initialized." ) ; try { this . bookKeeper . set ( startBookKeeperClient ( ) ) ; } catch ( IllegalArgumentException | NullPointerException ex ) { close ( ) ; throw ex ; } catch ( Throwable ex ) { if ( ! Exceptions . mustRethrow ( ex ) ) { close ( ) ; } throw new DataLogNotAvailableException ( "Unable to establish connection to ZooKeeper or BookKeeper." , ex ) ; } } | region DurableDataLogFactory Implementation |
23,415 | public DebugLogWrapper createDebugLogWrapper ( int logId ) { Preconditions . checkState ( this . bookKeeper . get ( ) != null , "BookKeeperLogFactory is not initialized." ) ; return new DebugLogWrapper ( logId , this . zkClient , this . bookKeeper . get ( ) , this . config , this . executor ) ; } | Creates a new DebugLogWrapper that can be used for debugging purposes . This should not be used for regular operations . |
23,416 | private static String getSegmentBaseName ( String segmentQualifiedName ) { int endOfStreamNamePos = segmentQualifiedName . lastIndexOf ( TRANSACTION_DELIMITER ) ; if ( endOfStreamNamePos < 0 || endOfStreamNamePos + TRANSACTION_DELIMITER . length ( ) + TRANSACTION_ID_LENGTH > segmentQualifiedName . length ( ) ) { return segmentQualifiedName ; } return segmentQualifiedName . substring ( 0 , endOfStreamNamePos ) ; } | Get base name of segment with the potential transaction delimiter removed . |
23,417 | public static String [ ] createHostTag ( ) { String [ ] hostTag = { MetricsTags . TAG_HOST , null } ; try { hostTag [ 1 ] = InetAddress . getLocalHost ( ) . getHostName ( ) ; } catch ( UnknownHostException e ) { hostTag [ 1 ] = "unknown" ; } return hostTag ; } | Create host tag based on the local host . |
23,418 | public void append ( long streamSegmentId , long offset , byte [ ] data ) throws StreamSegmentNotExistsException { Exceptions . checkNotClosed ( this . closed . get ( ) , this ) ; log . debug ( "{}: append (StreamSegmentId = {}, Offset = {}, DataLength = {})." , this . traceObjectId , streamSegmentId , offset , data . length ) ; StreamSegmentReadIndex index = getOrCreateIndex ( streamSegmentId ) ; Exceptions . checkArgument ( ! index . isMerged ( ) , "streamSegmentId" , "StreamSegment is merged. Cannot append to it anymore." ) ; index . append ( offset , data ) ; } | region ReadIndex Implementation |
23,419 | private StreamSegmentReadIndex getOrCreateIndex ( long streamSegmentId ) throws StreamSegmentNotExistsException { StreamSegmentReadIndex index ; synchronized ( this . lock ) { index = getIndex ( streamSegmentId ) ; if ( index == null ) { SegmentMetadata segmentMetadata = this . metadata . getStreamSegmentMetadata ( streamSegmentId ) ; Exceptions . checkArgument ( segmentMetadata != null , "streamSegmentId" , "StreamSegmentId %s does not exist in the metadata." , streamSegmentId ) ; if ( segmentMetadata . isDeleted ( ) ) { throw new StreamSegmentNotExistsException ( segmentMetadata . getName ( ) ) ; } index = new StreamSegmentReadIndex ( this . config , segmentMetadata , this . cache , this . storage , this . executor , isRecoveryMode ( ) ) ; this . cacheManager . register ( index ) ; this . readIndices . put ( streamSegmentId , index ) ; } } return index ; } | Gets a reference to the existing StreamSegmentRead index for the given StreamSegment Id . Creates a new one if necessary . |
23,420 | public void add ( int value ) { int newIndex = ( this . lastIndex + 1 ) % this . samples . length ; if ( this . count >= this . samples . length ) { this . sum -= this . samples [ newIndex ] ; } else { this . count ++ ; } this . samples [ newIndex ] = value ; this . sum += value ; this . lastIndex = newIndex ; } | Adds the given value to the moving average . If the moving average is already at capacity this will overwrite the oldest value in the series . |
23,421 | private void executeStorageRead ( Request request ) { try { byte [ ] buffer = new byte [ request . length ] ; getHandle ( ) . thenComposeAsync ( handle -> this . storage . read ( handle , request . offset , buffer , 0 , buffer . length , request . getTimeout ( ) ) , this . executor ) . thenAcceptAsync ( bytesRead -> request . complete ( new ByteArraySegment ( buffer , 0 , bytesRead ) ) , this . executor ) . whenComplete ( ( r , ex ) -> { if ( ex != null ) { request . fail ( ex ) ; } finalizeRequest ( request ) ; } ) ; } catch ( Throwable ex ) { if ( Exceptions . mustRethrow ( ex ) ) { throw ex ; } request . fail ( ex ) ; finalizeRequest ( request ) ; } } | Executes the Storage Read for the given request . |
23,422 | @ GuardedBy ( "lock" ) private Request findOverlappingRequest ( Request request ) { Map . Entry < Long , Request > previousEntry = this . pendingRequests . floorEntry ( request . getOffset ( ) ) ; if ( previousEntry != null && request . getOffset ( ) < previousEntry . getValue ( ) . getEndOffset ( ) ) { return previousEntry . getValue ( ) ; } return null ; } | Finds a pending Request that overlaps with the given request based on the given request s Offset . |
23,423 | public void serialize ( RevisionDataOutput dataOutput , T object ) throws IOException { serialize ( dataOutput . getBaseStream ( ) , object ) ; } | Serializes the given object to the given RevisionDataOutput . This overload is usually invoked for serializing nested classes or collections . |
23,424 | void processHeader ( InputStream dataInput ) throws IOException { int formatVersion = dataInput . read ( ) ; if ( formatVersion < 0 ) { throw new EOFException ( ) ; } ensureCondition ( formatVersion == SERIALIZER_VERSION , "Unsupported format version %d." , formatVersion ) ; } | Reads a single unsigned byte from the given InputStream and interprets it as a Serializer Format Version after which it validates that it is supported . |
23,425 | void ensureCondition ( boolean condition , String messageFormat , Object ... args ) throws SerializationException { if ( ! condition ) { throw new SerializationException ( String . format ( messageFormat , args ) ) ; } } | Verifies that the given condition is true . If not throws a SerializationException . |
23,426 | private void removeDuplicates ( NavigableMap < Double , SegmentWithRange > result ) { Segment last = null ; for ( Iterator < SegmentWithRange > iterator = result . descendingMap ( ) . values ( ) . iterator ( ) ; iterator . hasNext ( ) ; ) { SegmentWithRange current = iterator . next ( ) ; if ( current . getSegment ( ) . equals ( last ) ) { iterator . remove ( ) ; } last = current . getSegment ( ) ; } } | This combines consecutive entries in the map that refer to the same segment . This happens following a merge because the preceeding segments are replaced one at a time . |
23,427 | private void verifyReplacementRange ( SegmentWithRange replacedSegment , StreamSegmentsWithPredecessors replacementSegments ) { log . debug ( "Verification of replacement segments {} with the current segments {}" , replacementSegments , segments ) ; Map < Long , List < SegmentWithRange > > replacementRanges = replacementSegments . getReplacementRanges ( ) ; List < SegmentWithRange > replacements = replacementRanges . get ( replacedSegment . getSegment ( ) . getSegmentId ( ) ) ; Preconditions . checkArgument ( replacements != null , "Replacement segments did not contain replacements for segment being replaced" ) ; if ( replacementRanges . size ( ) == 1 ) { Preconditions . checkArgument ( replacedSegment . getHigh ( ) == getUpperBound ( replacements ) ) ; Preconditions . checkArgument ( replacedSegment . getLow ( ) == getLowerBound ( replacements ) ) ; } else { Preconditions . checkArgument ( replacedSegment . getHigh ( ) <= getUpperBound ( replacements ) ) ; Preconditions . checkArgument ( replacedSegment . getLow ( ) >= getLowerBound ( replacements ) ) ; } for ( Entry < Long , List < SegmentWithRange > > ranges : replacementRanges . entrySet ( ) ) { Entry < Double , SegmentWithRange > upperReplacedSegment = segments . floorEntry ( getUpperBound ( ranges . getValue ( ) ) ) ; Entry < Double , SegmentWithRange > lowerReplacedSegment = segments . higherEntry ( getLowerBound ( ranges . getValue ( ) ) ) ; Preconditions . checkArgument ( upperReplacedSegment != null , "Missing replaced replacement segments %s" , replacementSegments ) ; Preconditions . checkArgument ( lowerReplacedSegment != null , "Missing replaced replacement segments %s" , replacementSegments ) ; } } | Checks that replacementSegments provided are consistent with the segments that are currently being used . |
23,428 | public static Map < UUID , Long > getCoreNonNullAttributes ( Map < UUID , Long > attributes ) { return attributes . entrySet ( ) . stream ( ) . filter ( e -> Attributes . isCoreAttribute ( e . getKey ( ) ) && e . getValue ( ) != NULL_ATTRIBUTE_VALUE ) . collect ( Collectors . toMap ( Map . Entry :: getKey , Map . Entry :: getValue ) ) ; } | Returns a new Map of Attribute Ids to Values containing only those Core Attributes from the given Map that do not have a null value . |
23,429 | static SegmentChunk forSegment ( String segmentName , long startOffset ) { return new SegmentChunk ( StreamSegmentNameUtils . getSegmentChunkName ( segmentName , startOffset ) , startOffset ) ; } | Creates a new instance of the SegmentChunk class . |
23,430 | SegmentChunk withNewOffset ( long newOffset ) { SegmentChunk ns = new SegmentChunk ( this . name , newOffset ) ; ns . setLength ( getLength ( ) ) ; if ( isSealed ( ) ) { ns . markSealed ( ) ; } if ( ! exists ( ) ) { ns . markInexistent ( ) ; } return ns ; } | Creates a new instance of the SegmentChunk class with the same information as this one but with a new offset . |
23,431 | synchronized void increaseLength ( long delta ) { Preconditions . checkState ( ! this . sealed , "Cannot increase the length of a sealed SegmentChunk." ) ; Preconditions . checkArgument ( delta >= 0 , "Cannot decrease the length of a SegmentChunk." ) ; this . length += delta ; } | Increases the length of the SegmentChunk by the given delta . |
23,432 | synchronized void setLength ( long length ) { Preconditions . checkState ( ! this . sealed , "Cannot increase the length of a sealed SegmentChunk." ) ; Preconditions . checkArgument ( length >= 0 , "length must be a non-negative number." ) ; this . length = length ; } | Sets the length of the SegmentChunk . |
23,433 | public void registerHost ( Host host ) { Preconditions . checkNotNull ( host , "host" ) ; Exceptions . checkArgument ( ! entryMap . containsKey ( host ) , "host" , "host is already registered to cluster." ) ; String hostPath = ZKPaths . makePath ( getPathPrefix ( ) , host . toString ( ) ) ; PersistentNode node = new PersistentNode ( client , CreateMode . EPHEMERAL , false , hostPath , host . toBytes ( ) ) ; node . start ( ) ; entryMap . put ( host , node ) ; } | Register Host to cluster . |
23,434 | public void deregisterHost ( Host host ) { Preconditions . checkNotNull ( host , "host" ) ; PersistentNode node = entryMap . get ( host ) ; Preconditions . checkNotNull ( node , "Host is not present in cluster." ) ; entryMap . remove ( host ) ; close ( node ) ; } | Remove Host from cluster . |
23,435 | public void addListener ( ClusterListener listener ) { Preconditions . checkNotNull ( listener , "listener" ) ; if ( ! cache . isPresent ( ) ) { initializeCache ( ) ; } cache . get ( ) . getListenable ( ) . addListener ( pathChildrenCacheListener ( listener ) ) ; } | Add Listener to the cluster . |
23,436 | public Set < Host > getClusterMembers ( ) { if ( ! cache . isPresent ( ) ) { initializeCache ( ) ; } List < ChildData > data = cache . get ( ) . getCurrentData ( ) ; return data . stream ( ) . map ( d -> Host . fromBytes ( d . getData ( ) ) ) . collect ( Collectors . toSet ( ) ) ; } | Get the current cluster members . |
23,437 | public static boolean validateInputRange ( final List < Long > segmentsToSeal , final List < Map . Entry < Double , Double > > newRanges , final EpochRecord currentEpoch ) { boolean newRangesCheck = newRanges . stream ( ) . noneMatch ( x -> x . getKey ( ) >= x . getValue ( ) && x . getValue ( ) > 0 ) ; if ( newRangesCheck ) { List < Map . Entry < Double , Double > > oldRanges = segmentsToSeal . stream ( ) . map ( segmentId -> { StreamSegmentRecord segment = currentEpoch . getSegment ( segmentId ) ; if ( segment != null ) { return new AbstractMap . SimpleEntry < > ( segment . getKeyStart ( ) , segment . getKeyEnd ( ) ) ; } else { return null ; } } ) . filter ( Objects :: nonNull ) . collect ( Collectors . toList ( ) ) ; return reduce ( oldRanges ) . equals ( reduce ( newRanges ) ) ; } return false ; } | Method to validate supplied scale input . It performs a check that new ranges are identical to sealed ranges . |
23,438 | public static boolean canScaleFor ( final List < Long > segmentsToSeal , final EpochRecord currentEpoch ) { return segmentsToSeal . stream ( ) . allMatch ( x -> currentEpoch . getSegment ( x ) != null ) ; } | Method to check that segments to seal are present in current epoch . |
23,439 | public static boolean verifyRecordMatchesInput ( List < Long > segmentsToSeal , List < Map . Entry < Double , Double > > newRanges , boolean isManualScale , EpochTransitionRecord record ) { boolean newRangeMatch = newRanges . stream ( ) . allMatch ( x -> record . getNewSegmentsWithRange ( ) . values ( ) . stream ( ) . anyMatch ( y -> y . getKey ( ) . equals ( x . getKey ( ) ) && y . getValue ( ) . equals ( x . getValue ( ) ) ) ) ; if ( newRangeMatch ) { final Set < Integer > segmentNumbersToSeal = isManualScale ? segmentsToSeal . stream ( ) . map ( StreamSegmentNameUtils :: getSegmentNumber ) . collect ( Collectors . toSet ( ) ) : null ; return segmentsToSeal . stream ( ) . allMatch ( segmentId -> { if ( isManualScale ) { return segmentNumbersToSeal . contains ( StreamSegmentNameUtils . getSegmentNumber ( segmentId ) ) ; } else { return record . getSegmentsToSeal ( ) . contains ( segmentId ) ; } } ) ; } return false ; } | Method to verify if supplied epoch transition record matches the supplied input which includes segments to seal new ranges to create . For manual scale it will verify that segments to seal match and epoch transition record share the same segment number . |
23,440 | public static EpochTransitionRecord computeEpochTransition ( EpochRecord currentEpoch , List < Long > segmentsToSeal , List < Map . Entry < Double , Double > > newRanges , long scaleTimestamp ) { Preconditions . checkState ( segmentsToSeal . stream ( ) . allMatch ( currentEpoch :: containsSegment ) , "Invalid epoch transition request" ) ; int newEpoch = currentEpoch . getEpoch ( ) + 1 ; int nextSegmentNumber = currentEpoch . getSegments ( ) . stream ( ) . mapToInt ( StreamSegmentRecord :: getSegmentNumber ) . max ( ) . getAsInt ( ) + 1 ; ImmutableMap . Builder < Long , Map . Entry < Double , Double > > newSegments = ImmutableMap . builder ( ) ; for ( int i = 0 ; i < newRanges . size ( ) ; i ++ ) { newSegments . put ( computeSegmentId ( nextSegmentNumber + i , newEpoch ) , newRanges . get ( i ) ) ; } return new EpochTransitionRecord ( currentEpoch . getEpoch ( ) , scaleTimestamp , ImmutableSet . copyOf ( segmentsToSeal ) , newSegments . build ( ) ) ; } | Method to compute epoch transition record . It takes segments to seal and new ranges and all the tables and computes the next epoch transition record . |
23,441 | public static UUID generateTxnId ( int epoch , int msb , long lsb ) { long msb64Bit = ( long ) epoch << 32 | msb & 0xFFFFFFFFL ; return new UUID ( msb64Bit , lsb ) ; } | region transaction id |
23,442 | public static boolean streamCutComparator ( Map < Long , Long > streamCut1 , Map < StreamSegmentRecord , Integer > span1 , Map < Long , Long > streamCut2 , Map < StreamSegmentRecord , Integer > span2 ) { return span1 . entrySet ( ) . stream ( ) . allMatch ( e1 -> span2 . entrySet ( ) . stream ( ) . noneMatch ( e2 -> ( e2 . getKey ( ) . segmentId ( ) == e1 . getKey ( ) . segmentId ( ) && streamCut1 . get ( e1 . getKey ( ) . segmentId ( ) ) < streamCut2 . get ( e2 . getKey ( ) . segmentId ( ) ) ) || ( e2 . getKey ( ) . overlaps ( e1 . getKey ( ) ) && e1 . getValue ( ) < e2 . getValue ( ) ) ) ) ; } | Method to compare two stream cuts given their spans . |
23,443 | private static List < Map . Entry < Double , Double > > reduce ( List < Map . Entry < Double , Double > > input ) { List < Map . Entry < Double , Double > > ranges = new ArrayList < > ( input ) ; ranges . sort ( Comparator . comparingDouble ( Map . Entry :: getKey ) ) ; List < Map . Entry < Double , Double > > result = new ArrayList < > ( ) ; double low = - 1.0 ; double high = - 1.0 ; for ( Map . Entry < Double , Double > range : ranges ) { if ( high < range . getKey ( ) ) { if ( low != - 1.0 && high != - 1.0 ) { result . add ( new AbstractMap . SimpleEntry < > ( low , high ) ) ; } low = range . getKey ( ) ; high = range . getValue ( ) ; } else if ( high == range . getKey ( ) ) { high = range . getValue ( ) ; } else { assert low >= 0 ; assert high > 0 ; result . add ( new AbstractMap . SimpleEntry < > ( low , high ) ) ; low = range . getKey ( ) ; high = range . getValue ( ) ; } } if ( low != - 1.0 && high != - 1.0 ) { result . add ( new AbstractMap . SimpleEntry < > ( low , high ) ) ; } return result ; } | Helper method to compute list of continuous ranges . For example two neighbouring key ranges where range1 . high == range2 . low then they are considered neighbours . This method reduces input range into distinct continuous blocks . |
23,444 | public long getStreamSegmentId ( String streamSegmentName , boolean updateLastUsed ) { synchronized ( this . lock ) { StreamSegmentMetadata metadata = this . metadataByName . getOrDefault ( streamSegmentName , null ) ; if ( updateLastUsed && metadata != null ) { metadata . setLastUsed ( getOperationSequenceNumber ( ) ) ; } return metadata != null ? metadata . getId ( ) : NO_STREAM_SEGMENT_ID ; } } | region SegmentMetadataCollection Implementation |
23,445 | public Collection < SegmentMetadata > getEvictionCandidates ( long sequenceNumberCutoff , int maxCount ) { long adjustedCutoff = Math . min ( sequenceNumberCutoff , this . lastTruncatedSequenceNumber . get ( ) ) ; List < SegmentMetadata > candidates ; synchronized ( this . lock ) { candidates = this . metadataById . values ( ) . stream ( ) . filter ( m -> isEligibleForEviction ( m , adjustedCutoff ) ) . collect ( Collectors . toList ( ) ) ; } if ( candidates . size ( ) > maxCount ) { candidates . sort ( Comparator . comparingLong ( SegmentMetadata :: getLastUsed ) ) ; candidates = candidates . subList ( 0 , maxCount ) ; } return candidates ; } | region EvictableMetadata Implementation |
23,446 | public void recordTruncationMarker ( long operationSequenceNumber , LogAddress address ) { Exceptions . checkArgument ( operationSequenceNumber >= 0 , "operationSequenceNumber" , "Operation Sequence Number must be a positive number." ) ; Preconditions . checkNotNull ( address , "address" ) ; synchronized ( this . truncationMarkers ) { LogAddress existing = this . truncationMarkers . getOrDefault ( operationSequenceNumber , null ) ; if ( existing == null || existing . getSequence ( ) < address . getSequence ( ) ) { this . truncationMarkers . put ( operationSequenceNumber , address ) ; } } } | region TruncationMarkerRepository Implementation |
23,447 | public void close ( ) throws IOException { if ( this . remaining > 0 ) { int toSkip = this . remaining ; long skipped = skip ( toSkip ) ; if ( skipped != toSkip ) { throw new SerializationException ( String . format ( "Read %d fewer byte(s) than expected only able to skip %d." , toSkip , skipped ) ) ; } } else if ( this . remaining < 0 ) { throw new SerializationException ( String . format ( "Read more bytes than expected (%d)." , - this . remaining ) ) ; } } | region InputStream Implementation |
23,448 | public BoundedInputStream subStream ( int bound ) { Preconditions . checkArgument ( bound >= 0 && bound <= this . remaining , "bound must be a non-negative integer and less than or equal to the remaining length." ) ; this . remaining -= bound ; return new BoundedInputStream ( this . in , bound ) ; } | Creates a new BoundedInputStream wrapping the same InputStream as this one starting at the current position with the given bound . |
23,449 | protected void startUp ( ) throws Exception { long traceId = LoggerHelpers . traceEnterWithContext ( log , this . objectId , "startUp" ) ; try { log . info ( "Starting gRPC server listening on port: {}" , this . config . getPort ( ) ) ; this . server . start ( ) ; } finally { LoggerHelpers . traceLeave ( log , this . objectId , "startUp" , traceId ) ; } } | Start gRPC server . |
23,450 | protected void shutDown ( ) throws Exception { long traceId = LoggerHelpers . traceEnterWithContext ( log , this . objectId , "shutDown" ) ; try { log . info ( "Stopping gRPC server listening on port: {}" , this . config . getPort ( ) ) ; this . server . shutdown ( ) ; log . info ( "Awaiting termination of gRPC server" ) ; this . server . awaitTermination ( ) ; log . info ( "gRPC server terminated" ) ; } finally { LoggerHelpers . traceLeave ( log , this . objectId , "shutDown" , traceId ) ; } } | Gracefully stop gRPC server . |
23,451 | private boolean exceedsThresholds ( ) { boolean isFirstAppend = this . operations . size ( ) > 0 && isAppendOperation ( this . operations . getFirst ( ) ) ; long length = isFirstAppend ? this . operations . getFirst ( ) . getLength ( ) : 0 ; if ( isFirstAppend && length == 0 && ! ( ( AggregatedAppendOperation ) this . operations . getFirst ( ) ) . attributes . isEmpty ( ) ) { length = 1 ; } return length >= this . config . getFlushThresholdBytes ( ) || ( length > 0 && getElapsedSinceLastFlush ( ) . compareTo ( this . config . getFlushThresholdTime ( ) ) >= 0 ) ; } | Gets a value indicating whether the Flush thresholds are exceeded for this SegmentAggregator . |
23,452 | private boolean isReconciling ( ) { AggregatorState currentState = this . state . get ( ) ; return currentState == AggregatorState . ReconciliationNeeded || currentState == AggregatorState . Reconciling ; } | Gets a value indicating whether the SegmentAggregator is currently in a Reconciliation state . |
23,453 | public void add ( SegmentOperation operation ) throws DataCorruptionException { ensureInitializedAndNotClosed ( ) ; checkValidOperation ( operation ) ; boolean isDelete = isDeleteOperation ( operation ) ; if ( isDelete ) { addDeleteOperation ( ( DeleteSegmentOperation ) operation ) ; log . debug ( "{}: Add {}." , this . traceObjectId , operation ) ; } else if ( ! this . metadata . isDeleted ( ) ) { if ( operation instanceof StorageOperation ) { addStorageOperation ( ( StorageOperation ) operation ) ; } else if ( operation instanceof UpdateAttributesOperation ) { addUpdateAttributesOperation ( ( UpdateAttributesOperation ) operation ) ; } else { return ; } log . debug ( "{}: Add {}; OpCount={}, MergeCount={}, Seal={}." , this . traceObjectId , operation , this . operations . size ( ) , this . mergeTransactionCount , this . hasSealPending ) ; } } | Adds the given SegmentOperation to the Aggregator . |
23,454 | private void addUpdateAttributesOperation ( UpdateAttributesOperation operation ) { if ( ! this . metadata . isSealedInStorage ( ) ) { Map < UUID , Long > attributes = getExtendedAttributes ( operation ) ; if ( ! attributes . isEmpty ( ) ) { AggregatedAppendOperation aggregatedAppend = getOrCreateAggregatedAppend ( this . metadata . getStorageLength ( ) , operation . getSequenceNumber ( ) ) ; aggregatedAppend . includeAttributes ( attributes ) ; } } } | Processes an UpdateAttributesOperation . |
23,455 | private void processNewOperation ( StorageOperation operation ) { if ( operation instanceof MergeSegmentOperation ) { this . operations . add ( operation ) ; this . mergeTransactionCount . incrementAndGet ( ) ; } else if ( operation instanceof StreamSegmentSealOperation ) { this . operations . add ( operation ) ; this . hasSealPending . set ( true ) ; } else if ( operation instanceof StreamSegmentTruncateOperation ) { this . operations . add ( operation ) ; this . truncateCount . incrementAndGet ( ) ; } else if ( operation instanceof CachedStreamSegmentAppendOperation ) { AggregatedAppendOperation aggregatedAppend = getOrCreateAggregatedAppend ( operation . getStreamSegmentOffset ( ) , operation . getSequenceNumber ( ) ) ; aggregateAppendOperation ( ( CachedStreamSegmentAppendOperation ) operation , aggregatedAppend ) ; } } | Processes an operation that would result in a change to the underlying Storage . |
23,456 | public CompletableFuture < WriterFlushResult > flush ( Duration timeout ) { ensureInitializedAndNotClosed ( ) ; if ( this . metadata . isDeletedInStorage ( ) ) { return CompletableFuture . completedFuture ( new WriterFlushResult ( ) ) ; } long traceId = LoggerHelpers . traceEnterWithContext ( log , this . traceObjectId , "flush" ) ; TimeoutTimer timer = new TimeoutTimer ( timeout ) ; CompletableFuture < WriterFlushResult > result ; try { switch ( this . state . get ( ) ) { case Writing : result = flushNormally ( timer ) ; break ; case ReconciliationNeeded : result = beginReconciliation ( timer ) . thenComposeAsync ( v -> reconcile ( timer ) , this . executor ) ; break ; case Reconciling : result = reconcile ( timer ) ; break ; default : result = Futures . failedFuture ( new IllegalStateException ( String . format ( "Unexpected state for SegmentAggregator (%s) for segment '%s'." , this . state , this . metadata . getName ( ) ) ) ) ; break ; } } catch ( Exception ex ) { result = Futures . failedFuture ( ex ) ; } return result . thenApply ( r -> { LoggerHelpers . traceLeave ( log , this . traceObjectId , "flush" , traceId , r ) ; return r ; } ) ; } | Flushes the contents of the Aggregator to the Storage . |
23,457 | private CompletableFuture < WriterFlushResult > flushNormally ( TimeoutTimer timer ) { assert this . state . get ( ) == AggregatorState . Writing : "flushNormally cannot be called if state == " + this . state ; long traceId = LoggerHelpers . traceEnterWithContext ( log , this . traceObjectId , "flushNormally" , this . operations . size ( ) ) ; WriterFlushResult result = new WriterFlushResult ( ) ; AtomicBoolean canContinue = new AtomicBoolean ( true ) ; return Futures . loop ( canContinue :: get , ( ) -> flushOnce ( timer ) , partialResult -> { canContinue . set ( partialResult . getFlushedBytes ( ) + partialResult . getMergedBytes ( ) > 0 ) ; result . withFlushResult ( partialResult ) ; } , this . executor ) . thenApply ( v -> { LoggerHelpers . traceLeave ( log , this . traceObjectId , "flushNormally" , traceId , result ) ; return result ; } ) ; } | Repeatedly flushes the contents of the Aggregator to the Storage as long as something immediate needs to be flushed such as a Seal or Merge operation . |
23,458 | private CompletableFuture < WriterFlushResult > flushPendingTruncate ( WriterFlushResult flushResult , Duration timeout ) { StorageOperation op = this . operations . getFirst ( ) ; if ( ! isTruncateOperation ( op ) || ! this . storage . supportsTruncation ( ) ) { return CompletableFuture . completedFuture ( flushResult ) ; } CompletableFuture < Void > truncateTask ; if ( this . handle . get ( ) == null ) { assert this . metadata . getStorageLength ( ) == 0 : "handle is null but Metadata.getStorageLength is non-zero" ; truncateTask = CompletableFuture . completedFuture ( null ) ; } else { long truncateOffset = Math . min ( this . metadata . getStorageLength ( ) , op . getStreamSegmentOffset ( ) ) ; truncateTask = this . storage . truncate ( this . handle . get ( ) , truncateOffset , timeout ) ; } return truncateTask . thenApplyAsync ( v -> { updateStatePostTruncate ( ) ; return flushResult ; } , this . executor ) ; } | Flushes a pending StreamSegmentTruncateOperation if that is the next pending one . |
23,459 | private CompletableFuture < WriterFlushResult > flushPendingAppends ( Duration timeout ) { FlushArgs flushArgs ; try { flushArgs = getFlushArgs ( ) ; } catch ( DataCorruptionException ex ) { return Futures . failedFuture ( ex ) ; } long traceId = LoggerHelpers . traceEnterWithContext ( log , this . traceObjectId , "flushPendingAppends" ) ; if ( flushArgs . getLength ( ) == 0 && flushArgs . getAttributes ( ) . isEmpty ( ) ) { WriterFlushResult result = new WriterFlushResult ( ) ; LoggerHelpers . traceLeave ( log , this . traceObjectId , "flushPendingAppends" , traceId , result ) ; return CompletableFuture . completedFuture ( result ) ; } TimeoutTimer timer = new TimeoutTimer ( timeout ) ; CompletableFuture < Void > flush ; if ( flushArgs . getLength ( ) == 0 ) { flush = CompletableFuture . completedFuture ( null ) ; } else { flush = createSegmentIfNecessary ( ( ) -> this . storage . write ( this . handle . get ( ) , this . metadata . getStorageLength ( ) , flushArgs . getStream ( ) , flushArgs . getLength ( ) , timer . getRemaining ( ) ) , timer . getRemaining ( ) ) ; } if ( ! flushArgs . getAttributes ( ) . isEmpty ( ) ) { flush = flush . thenComposeAsync ( v -> handleAttributeException ( this . dataSource . persistAttributes ( this . metadata . getId ( ) , flushArgs . attributes , timer . getRemaining ( ) ) ) ) ; } return flush . thenApplyAsync ( v -> { WriterFlushResult result = updateStatePostFlush ( flushArgs ) ; LoggerHelpers . traceLeave ( log , this . traceObjectId , "flushPendingAppends" , traceId , result ) ; return result ; } , this . executor ) . exceptionally ( ex -> { if ( Exceptions . unwrap ( ex ) instanceof BadOffsetException ) { setState ( AggregatorState . ReconciliationNeeded ) ; } throw new CompletionException ( ex ) ; } ) ; } | Flushes all Append Operations that can be flushed up to the maximum allowed flush size . |
23,460 | private FlushArgs getFlushArgs ( ) throws DataCorruptionException { StorageOperation first = this . operations . getFirst ( ) ; if ( ! ( first instanceof AggregatedAppendOperation ) ) { return new FlushArgs ( null , 0 , Collections . emptyMap ( ) ) ; } AggregatedAppendOperation appendOp = ( AggregatedAppendOperation ) first ; int length = ( int ) appendOp . getLength ( ) ; InputStream data ; if ( length > 0 ) { data = this . dataSource . getAppendData ( appendOp . getStreamSegmentId ( ) , appendOp . getStreamSegmentOffset ( ) , length ) ; if ( data == null ) { if ( this . metadata . isDeleted ( ) ) { return new FlushArgs ( null , 0 , Collections . emptyMap ( ) ) ; } throw new DataCorruptionException ( String . format ( "Unable to retrieve CacheContents for '%s'." , appendOp ) ) ; } } else { if ( appendOp . attributes . isEmpty ( ) ) { throw new DataCorruptionException ( String . format ( "Found AggregatedAppendOperation with no data or attributes: '%s'." , appendOp ) ) ; } data = null ; } appendOp . seal ( ) ; return new FlushArgs ( data , length , appendOp . attributes ) ; } | Returns a FlushArgs which contains the data needing to be flushed to Storage . |
23,461 | private CompletableFuture < WriterFlushResult > mergeWith ( UpdateableSegmentMetadata transactionMetadata , MergeSegmentOperation mergeOp , TimeoutTimer timer ) { long traceId = LoggerHelpers . traceEnterWithContext ( log , this . traceObjectId , "mergeWith" , transactionMetadata . getId ( ) , transactionMetadata . getName ( ) , transactionMetadata . isSealedInStorage ( ) ) ; boolean emptySourceSegment = transactionMetadata . getLength ( ) == 0 ; if ( transactionMetadata . isDeleted ( ) && ! emptySourceSegment ) { setState ( AggregatorState . ReconciliationNeeded ) ; return Futures . failedFuture ( new StreamSegmentNotExistsException ( transactionMetadata . getName ( ) ) ) ; } WriterFlushResult result = new WriterFlushResult ( ) ; CompletableFuture < SegmentProperties > merge ; if ( emptySourceSegment ) { log . warn ( "{}: Not applying '{}' because source segment is missing or empty." , this . traceObjectId , mergeOp ) ; merge = CompletableFuture . completedFuture ( this . metadata ) ; } else if ( ! transactionMetadata . isSealedInStorage ( ) || transactionMetadata . getLength ( ) > transactionMetadata . getStorageLength ( ) ) { LoggerHelpers . traceLeave ( log , this . traceObjectId , "mergeWith" , traceId , result ) ; return CompletableFuture . completedFuture ( result ) ; } else { merge = mergeInStorage ( transactionMetadata , mergeOp , timer ) ; } return merge . thenAcceptAsync ( segmentProperties -> mergeCompleted ( segmentProperties , transactionMetadata , mergeOp ) , this . executor ) . thenComposeAsync ( v -> this . dataSource . deleteAllAttributes ( transactionMetadata , timer . getRemaining ( ) ) , this . executor ) . thenApply ( v -> { this . lastFlush . set ( this . timer . getElapsed ( ) ) ; result . withMergedBytes ( mergeOp . getLength ( ) ) ; LoggerHelpers . traceLeave ( log , this . traceObjectId , "mergeWith" , traceId , result ) ; return result ; } ) . exceptionally ( ex -> { Throwable realEx = Exceptions . unwrap ( ex ) ; if ( realEx instanceof BadOffsetException || realEx instanceof StreamSegmentNotExistsException ) { setState ( AggregatorState . ReconciliationNeeded ) ; } throw new CompletionException ( ex ) ; } ) ; } | Merges the Transaction StreamSegment with given metadata into this one at the current offset . |
23,462 | private CompletableFuture < SegmentProperties > mergeInStorage ( SegmentMetadata transactionMetadata , MergeSegmentOperation mergeOp , TimeoutTimer timer ) { return this . storage . getStreamSegmentInfo ( transactionMetadata . getName ( ) , timer . getRemaining ( ) ) . thenAcceptAsync ( transProperties -> { if ( transProperties . getLength ( ) != transactionMetadata . getStorageLength ( ) ) { throw new CompletionException ( new DataCorruptionException ( String . format ( "Transaction Segment '%s' cannot be merged into parent '%s' because its metadata disagrees with the Storage. Metadata.StorageLength=%d, Storage.StorageLength=%d" , transactionMetadata . getName ( ) , this . metadata . getName ( ) , transactionMetadata . getStorageLength ( ) , transProperties . getLength ( ) ) ) ) ; } if ( transProperties . getLength ( ) != mergeOp . getLength ( ) ) { throw new CompletionException ( new DataCorruptionException ( String . format ( "Transaction Segment '%s' cannot be merged into parent '%s' because the declared length in the operation disagrees with the Storage. Operation.Length=%d, Storage.StorageLength=%d" , transactionMetadata . getName ( ) , this . metadata . getName ( ) , mergeOp . getLength ( ) , transProperties . getLength ( ) ) ) ) ; } } , this . executor ) . thenComposeAsync ( v -> createSegmentIfNecessary ( ( ) -> storage . concat ( this . handle . get ( ) , mergeOp . getStreamSegmentOffset ( ) , transactionMetadata . getName ( ) , timer . getRemaining ( ) ) , timer . getRemaining ( ) ) , this . executor ) . exceptionally ( ex -> { ex = Exceptions . unwrap ( ex ) ; if ( transactionMetadata . getLength ( ) == 0 && ex instanceof StreamSegmentNotExistsException && ( ( StreamSegmentNotExistsException ) ex ) . getStreamSegmentName ( ) . equals ( transactionMetadata . getName ( ) ) ) { log . warn ( "{}: Not applying '{}' because source segment is missing (storage) and had no data." , this . traceObjectId , mergeOp ) ; return null ; } else { throw new CompletionException ( ex ) ; } } ) . thenComposeAsync ( v -> storage . getStreamSegmentInfo ( this . metadata . getName ( ) , timer . getRemaining ( ) ) , this . executor ) ; } | Executes the merge of the Source StreamSegment with given metadata into this one in Storage . |
23,463 | private void mergeCompleted ( SegmentProperties segmentProperties , UpdateableSegmentMetadata transactionMetadata , MergeSegmentOperation mergeOp ) { StorageOperation processedOperation = this . operations . removeFirst ( ) ; assert processedOperation != null && processedOperation instanceof MergeSegmentOperation : "First outstanding operation was not a MergeSegmentOperation" ; MergeSegmentOperation mop = ( MergeSegmentOperation ) processedOperation ; assert mop . getSourceSegmentId ( ) == transactionMetadata . getId ( ) : "First outstanding operation was a MergeSegmentOperation for the wrong Transaction id." ; int newCount = this . mergeTransactionCount . decrementAndGet ( ) ; assert newCount >= 0 : "Negative value for mergeTransactionCount" ; long expectedNewLength = this . metadata . getStorageLength ( ) + mergeOp . getLength ( ) ; if ( segmentProperties . getLength ( ) != expectedNewLength ) { throw new CompletionException ( new DataCorruptionException ( String . format ( "Transaction Segment '%s' was merged into parent '%s' but the parent segment has an unexpected StorageLength after the merger. Previous=%d, MergeLength=%d, Expected=%d, Actual=%d" , transactionMetadata . getName ( ) , this . metadata . getName ( ) , segmentProperties . getLength ( ) , mergeOp . getLength ( ) , expectedNewLength , segmentProperties . getLength ( ) ) ) ) ; } updateMetadata ( segmentProperties ) ; updateMetadataForTransactionPostMerger ( transactionMetadata , mop . getStreamSegmentId ( ) ) ; } | Executes post - Storage merge tasks including state and metadata updates . |
23,464 | private CompletableFuture < WriterFlushResult > sealIfNecessary ( WriterFlushResult flushResult , TimeoutTimer timer ) { if ( ! this . hasSealPending . get ( ) || ! ( this . operations . getFirst ( ) instanceof StreamSegmentSealOperation ) ) { return CompletableFuture . completedFuture ( flushResult ) ; } long traceId = LoggerHelpers . traceEnterWithContext ( log , this . traceObjectId , "sealIfNecessary" ) ; CompletableFuture < Void > sealTask ; if ( this . handle . get ( ) == null ) { assert this . metadata . getStorageLength ( ) == 0 : "handle is null but Metadata.StorageLength is non-zero" ; sealTask = CompletableFuture . completedFuture ( null ) ; } else { sealTask = this . storage . seal ( this . handle . get ( ) , timer . getRemaining ( ) ) ; } return sealTask . thenComposeAsync ( v -> sealAttributes ( timer . getRemaining ( ) ) , this . executor ) . handleAsync ( ( v , ex ) -> { ex = Exceptions . unwrap ( ex ) ; if ( ex != null && ! ( ex instanceof StreamSegmentSealedException ) ) { if ( ex instanceof StreamSegmentNotExistsException ) { setState ( AggregatorState . ReconciliationNeeded ) ; } throw new CompletionException ( ex ) ; } updateStatePostSeal ( ) ; LoggerHelpers . traceLeave ( log , this . traceObjectId , "sealIfNecessary" , traceId , flushResult ) ; return flushResult ; } , this . executor ) ; } | Seals the StreamSegment in Storage if necessary . |
23,465 | private CompletableFuture < Void > sealAttributes ( Duration timeout ) { return handleAttributeException ( this . dataSource . sealAttributes ( this . metadata . getId ( ) , timeout ) ) ; } | Seals the Attribute Index for this Segment . |
23,466 | private CompletableFuture < WriterFlushResult > reconcileOperation ( StorageOperation op , SegmentProperties storageInfo , TimeoutTimer timer ) { CompletableFuture < WriterFlushResult > result ; if ( isAppendOperation ( op ) ) { result = reconcileAppendOperation ( ( AggregatedAppendOperation ) op , storageInfo , timer ) ; } else if ( op instanceof MergeSegmentOperation ) { result = reconcileMergeOperation ( ( MergeSegmentOperation ) op , storageInfo , timer ) ; } else if ( op instanceof StreamSegmentSealOperation ) { result = reconcileSealOperation ( storageInfo , timer . getRemaining ( ) ) ; } else if ( isTruncateOperation ( op ) ) { updateStatePostTruncate ( ) ; result = CompletableFuture . completedFuture ( new WriterFlushResult ( ) ) ; } else { result = Futures . failedFuture ( new ReconciliationFailureException ( String . format ( "Operation '%s' is not supported for reconciliation." , op ) , this . metadata , storageInfo ) ) ; } return result ; } | Attempts to reconcile the given StorageOperation . |
23,467 | private CompletableFuture < WriterFlushResult > reconcileAppendOperation ( AggregatedAppendOperation op , SegmentProperties storageInfo , TimeoutTimer timer ) { CompletableFuture < Boolean > reconcileResult ; WriterFlushResult flushResult = new WriterFlushResult ( ) ; if ( op . getLength ( ) > 0 ) { reconcileResult = reconcileData ( op , storageInfo , timer ) . thenApply ( reconciledBytes -> { flushResult . withFlushedBytes ( reconciledBytes ) ; return reconciledBytes >= op . getLength ( ) && op . getLastStreamSegmentOffset ( ) <= storageInfo . getLength ( ) ; } ) ; } else { reconcileResult = CompletableFuture . completedFuture ( true ) ; } if ( ! op . attributes . isEmpty ( ) ) { reconcileResult = reconcileResult . thenComposeAsync ( fullyReconciledData -> { if ( fullyReconciledData ) { return reconcileAttributes ( op , timer ) . thenApply ( v -> { flushResult . withFlushedAttributes ( op . attributes . size ( ) ) ; return fullyReconciledData ; } ) ; } else { return CompletableFuture . completedFuture ( fullyReconciledData ) ; } } , this . executor ) ; } return reconcileResult . thenApplyAsync ( fullyReconciled -> { if ( fullyReconciled ) { StorageOperation removedOp = this . operations . removeFirst ( ) ; assert op == removedOp : "Reconciled operation is not the same as removed operation" ; } return flushResult ; } ) ; } | Attempts to reconcile data and attributes for the given AggregatedAppendOperation . Since Append Operations can be partially flushed reconciliation may be for the full operation or for a part of it . |
23,468 | private CompletableFuture < Integer > reconcileData ( AggregatedAppendOperation op , SegmentProperties storageInfo , TimeoutTimer timer ) { InputStream appendStream = this . dataSource . getAppendData ( op . getStreamSegmentId ( ) , op . getStreamSegmentOffset ( ) , ( int ) op . getLength ( ) ) ; if ( appendStream == null ) { return Futures . failedFuture ( new ReconciliationFailureException ( String . format ( "Unable to reconcile operation '%s' because no append data is associated with it." , op ) , this . metadata , storageInfo ) ) ; } long readLength = Math . min ( op . getLastStreamSegmentOffset ( ) , storageInfo . getLength ( ) ) - op . getStreamSegmentOffset ( ) ; assert readLength > 0 : "Append Operation to be reconciled is beyond the Segment's StorageLength (" + storageInfo . getLength ( ) + "): " + op ; byte [ ] storageData = new byte [ ( int ) readLength ] ; AtomicInteger reconciledBytes = new AtomicInteger ( ) ; return Futures . loop ( ( ) -> reconciledBytes . get ( ) < readLength , ( ) -> this . storage . read ( this . handle . get ( ) , op . getStreamSegmentOffset ( ) + reconciledBytes . get ( ) , storageData , reconciledBytes . get ( ) , ( int ) readLength - reconciledBytes . get ( ) , timer . getRemaining ( ) ) , bytesRead -> { assert bytesRead > 0 : String . format ( "Unable to make any read progress when reconciling operation '%s' after reading %s bytes." , op , reconciledBytes ) ; reconciledBytes . addAndGet ( bytesRead ) ; } , this . executor ) . thenApplyAsync ( v -> { verifySame ( appendStream , storageData , op , storageInfo ) ; return reconciledBytes . get ( ) ; } , this . executor ) ; } | Attempts to reconcile the data for the given AggregatedAppendOperation . Since Append Operations can be partially flushed reconciliation may be for the full operation or for a part of it . |
23,469 | private CompletableFuture < WriterFlushResult > reconcileMergeOperation ( MergeSegmentOperation op , SegmentProperties storageInfo , TimeoutTimer timer ) { UpdateableSegmentMetadata transactionMeta = this . dataSource . getStreamSegmentMetadata ( op . getSourceSegmentId ( ) ) ; if ( transactionMeta == null ) { return Futures . failedFuture ( new ReconciliationFailureException ( String . format ( "Cannot reconcile operation '%s' because the source segment is missing from the metadata." , op ) , this . metadata , storageInfo ) ) ; } if ( op . getLastStreamSegmentOffset ( ) > storageInfo . getLength ( ) ) { return Futures . failedFuture ( new ReconciliationFailureException ( String . format ( "Cannot reconcile operation '%s' because the source segment is not fully merged into the target." , op ) , this . metadata , storageInfo ) ) ; } return this . storage . exists ( transactionMeta . getName ( ) , timer . getRemaining ( ) ) . thenComposeAsync ( exists -> { if ( exists ) { return Futures . failedFuture ( new ReconciliationFailureException ( String . format ( "Cannot reconcile operation '%s' because the transaction segment still exists in Storage." , op ) , this . metadata , storageInfo ) ) ; } return this . dataSource . deleteAllAttributes ( transactionMeta , timer . getRemaining ( ) ) ; } , this . executor ) . thenApplyAsync ( v -> { StorageOperation processedOperation = this . operations . removeFirst ( ) ; assert processedOperation != null && processedOperation instanceof MergeSegmentOperation : "First outstanding operation was not a MergeSegmentOperation" ; int newCount = this . mergeTransactionCount . decrementAndGet ( ) ; assert newCount >= 0 : "Negative value for mergeTransactionCount" ; long minStorageLength = processedOperation . getLastStreamSegmentOffset ( ) ; if ( this . metadata . getStorageLength ( ) < minStorageLength ) { this . metadata . setStorageLength ( minStorageLength ) ; } updateMetadataForTransactionPostMerger ( transactionMeta , processedOperation . getStreamSegmentId ( ) ) ; return new WriterFlushResult ( ) . withMergedBytes ( op . getLength ( ) ) ; } , this . executor ) ; } | Attempts to reconcile the given MergeSegmentOperation . |
23,470 | private CompletableFuture < WriterFlushResult > reconcileSealOperation ( SegmentProperties storageInfo , Duration timeout ) { if ( storageInfo . isSealed ( ) || storageInfo . getLength ( ) == 0 ) { return sealAttributes ( timeout ) . thenApplyAsync ( v -> { updateStatePostSeal ( ) ; return new WriterFlushResult ( ) ; } , this . executor ) ; } else { return Futures . failedFuture ( new ReconciliationFailureException ( "Segment was supposed to be sealed in storage but it is not." , this . metadata , storageInfo ) ) ; } } | Attempts to reconcile a StreamSegmentSealOperation . |
23,471 | private CompletableFuture < Void > reconcileAttributes ( AggregatedAppendOperation op , TimeoutTimer timer ) { return handleAttributeException ( this . dataSource . persistAttributes ( this . metadata . getId ( ) , op . attributes , timer . getRemaining ( ) ) ) ; } | Attempts to reconcile the attributes for a given operation . There is no verification done ; this operation simply re - writes all the attributes to the index which is much more efficient than trying to read the values from the index and then comparing them . |
23,472 | private CompletableFuture < Void > handleAttributeException ( CompletableFuture < Void > future ) { return Futures . exceptionallyExpecting ( future , ex -> ( ex instanceof StreamSegmentSealedException && this . metadata . isSealedInStorage ( ) ) || ( ( ex instanceof StreamSegmentNotExistsException || ex instanceof StreamSegmentMergedException ) && ( this . metadata . isMerged ( ) || this . metadata . isDeleted ( ) ) ) , null ) ; } | Handles expected Attribute - related exceptions . Since the attribute index is a separate segment from the main one it is highly likely that it may get temporarily out of sync with the main one thus causing spurious StreamSegmentSealedExceptions or StreamSegmentNotExistsExceptions . If we get either of those and they are consistent with our current state the we can safely ignore them ; otherwise we should be rethrowing them . |
23,473 | private void checkValidStorageOperation ( StorageOperation operation ) throws DataCorruptionException { Preconditions . checkArgument ( ! ( operation instanceof StreamSegmentAppendOperation ) , "SegmentAggregator cannot process StreamSegmentAppendOperations." ) ; long offset = operation . getStreamSegmentOffset ( ) ; long length = operation . getLength ( ) ; Preconditions . checkArgument ( offset >= 0 , "Operation '%s' has an invalid offset (%s)." , operation , operation . getStreamSegmentOffset ( ) ) ; Preconditions . checkArgument ( length >= 0 , "Operation '%s' has an invalid length (%s)." , operation , operation . getLength ( ) ) ; if ( isTruncateOperation ( operation ) ) { if ( this . metadata . getStartOffset ( ) < operation . getStreamSegmentOffset ( ) ) { throw new DataCorruptionException ( String . format ( "StreamSegmentTruncateOperation '%s' has a truncation offset beyond the one in the Segment's Metadata. Expected: at most %d, actual: %d." , operation , this . metadata . getStartOffset ( ) , offset ) ) ; } } else { long lastOffset = this . lastAddedOffset . get ( ) ; if ( lastOffset >= 0 && offset != lastOffset ) { throw new DataCorruptionException ( String . format ( "Wrong offset for Operation '%s'. Expected: %s, actual: %d." , operation , this . lastAddedOffset , offset ) ) ; } } if ( offset + length > this . metadata . getLength ( ) ) { throw new DataCorruptionException ( String . format ( "Operation '%s' has at least one byte beyond its Length. Offset = %d, Length = %d, Length = %d." , operation , offset , length , this . metadata . getLength ( ) ) ) ; } if ( operation instanceof StreamSegmentSealOperation ) { if ( this . metadata . getLength ( ) != offset ) { throw new DataCorruptionException ( String . format ( "Wrong offset for Operation '%s'. Expected: %d (Length), actual: %d." , operation , this . metadata . getLength ( ) , offset ) ) ; } if ( ! this . metadata . isSealed ( ) ) { throw new DataCorruptionException ( String . format ( "Received Operation '%s' for a non-sealed segment." , operation ) ) ; } } } | Validates that the given StorageOperation can be processed given the current accumulated state of the Segment . |
23,474 | private WriterFlushResult updateStatePostFlush ( FlushArgs flushArgs ) { long newLength = this . metadata . getStorageLength ( ) + flushArgs . getLength ( ) ; this . metadata . setStorageLength ( newLength ) ; boolean reachedEnd = false ; while ( this . operations . size ( ) > 0 && ! reachedEnd ) { StorageOperation first = this . operations . getFirst ( ) ; long lastOffset = first . getLastStreamSegmentOffset ( ) ; reachedEnd = lastOffset >= newLength ; assert reachedEnd || isAppendOperation ( first ) : "Flushed operation was not an Append." ; if ( lastOffset <= newLength && isAppendOperation ( first ) ) { this . operations . removeFirst ( ) ; } } this . lastFlush . set ( this . timer . getElapsed ( ) ) ; return new WriterFlushResult ( ) . withFlushedBytes ( flushArgs . getLength ( ) ) . withFlushedAttributes ( flushArgs . getAttributes ( ) . size ( ) ) ; } | Updates the metadata and the internal state after a flush was completed . |
23,475 | private void updateStatePostSeal ( ) { this . metadata . markSealedInStorage ( ) ; this . operations . removeFirst ( ) ; assert this . operations . size ( ) - this . truncateCount . get ( ) == 0 : "there are outstanding non-truncate operations after a Seal" ; this . hasSealPending . set ( false ) ; } | Updates the metadata and the internal state after a Seal was completed . |
23,476 | private void updateMetadata ( SegmentProperties segmentProperties ) { this . metadata . setStorageLength ( segmentProperties . getLength ( ) ) ; if ( segmentProperties . isSealed ( ) && ! this . metadata . isSealedInStorage ( ) ) { this . metadata . markSealed ( ) ; this . metadata . markSealedInStorage ( ) ; } } | Updates the metadata and based on the given SegmentProperties object . |
23,477 | private CompletableFuture < SegmentProperties > openWrite ( String segmentName , AtomicReference < SegmentHandle > handleRef , Duration timeout ) { return this . storage . openWrite ( segmentName ) . thenComposeAsync ( handle -> { handleRef . set ( handle ) ; return this . storage . getStreamSegmentInfo ( segmentName , timeout ) ; } , this . executor ) ; } | Opens the given segment for writing . |
23,478 | private Map < UUID , Long > getExtendedAttributes ( AttributeUpdaterOperation operation ) { Collection < AttributeUpdate > updates = operation . getAttributeUpdates ( ) ; if ( updates == null ) { return Collections . emptyMap ( ) ; } return updates . stream ( ) . filter ( au -> ! Attributes . isCoreAttribute ( au . getAttributeId ( ) ) ) . collect ( Collectors . toMap ( AttributeUpdate :: getAttributeId , AttributeUpdate :: getValue ) ) ; } | Collects the extended Attributes from the AttributeUpdates of the given operation . |
23,479 | void runAsync ( ) { synchronized ( this ) { Exceptions . checkNotClosed ( this . closed , this ) ; if ( this . running ) { this . runAgain = true ; return ; } this . running = true ; } runInternal ( ) ; } | Executes one instance of the task or queues it up at most once should the task be currently running . |
23,480 | public static final StreamConfiguration getCreateStreamConfig ( final CreateStreamRequest createStreamRequest ) { ScalingPolicy scalingPolicy ; if ( createStreamRequest . getScalingPolicy ( ) . getType ( ) == ScalingConfig . TypeEnum . FIXED_NUM_SEGMENTS ) { scalingPolicy = ScalingPolicy . fixed ( createStreamRequest . getScalingPolicy ( ) . getMinSegments ( ) ) ; } else if ( createStreamRequest . getScalingPolicy ( ) . getType ( ) == ScalingConfig . TypeEnum . BY_RATE_IN_EVENTS_PER_SEC ) { scalingPolicy = ScalingPolicy . byEventRate ( createStreamRequest . getScalingPolicy ( ) . getTargetRate ( ) , createStreamRequest . getScalingPolicy ( ) . getScaleFactor ( ) , createStreamRequest . getScalingPolicy ( ) . getMinSegments ( ) ) ; } else { scalingPolicy = ScalingPolicy . byDataRate ( createStreamRequest . getScalingPolicy ( ) . getTargetRate ( ) , createStreamRequest . getScalingPolicy ( ) . getScaleFactor ( ) , createStreamRequest . getScalingPolicy ( ) . getMinSegments ( ) ) ; } RetentionPolicy retentionPolicy = null ; if ( createStreamRequest . getRetentionPolicy ( ) != null ) { switch ( createStreamRequest . getRetentionPolicy ( ) . getType ( ) ) { case LIMITED_SIZE_MB : retentionPolicy = RetentionPolicy . bySizeBytes ( createStreamRequest . getRetentionPolicy ( ) . getValue ( ) * 1024 * 1024 ) ; break ; case LIMITED_DAYS : retentionPolicy = RetentionPolicy . byTime ( Duration . ofDays ( createStreamRequest . getRetentionPolicy ( ) . getValue ( ) ) ) ; break ; } } return StreamConfiguration . builder ( ) . scalingPolicy ( scalingPolicy ) . retentionPolicy ( retentionPolicy ) . build ( ) ; } | This method translates the REST request object CreateStreamRequest into internal object StreamConfiguration . |
23,481 | public static final StreamConfiguration getUpdateStreamConfig ( final UpdateStreamRequest updateStreamRequest ) { ScalingPolicy scalingPolicy ; if ( updateStreamRequest . getScalingPolicy ( ) . getType ( ) == ScalingConfig . TypeEnum . FIXED_NUM_SEGMENTS ) { scalingPolicy = ScalingPolicy . fixed ( updateStreamRequest . getScalingPolicy ( ) . getMinSegments ( ) ) ; } else if ( updateStreamRequest . getScalingPolicy ( ) . getType ( ) == ScalingConfig . TypeEnum . BY_RATE_IN_EVENTS_PER_SEC ) { scalingPolicy = ScalingPolicy . byEventRate ( updateStreamRequest . getScalingPolicy ( ) . getTargetRate ( ) , updateStreamRequest . getScalingPolicy ( ) . getScaleFactor ( ) , updateStreamRequest . getScalingPolicy ( ) . getMinSegments ( ) ) ; } else { scalingPolicy = ScalingPolicy . byDataRate ( updateStreamRequest . getScalingPolicy ( ) . getTargetRate ( ) , updateStreamRequest . getScalingPolicy ( ) . getScaleFactor ( ) , updateStreamRequest . getScalingPolicy ( ) . getMinSegments ( ) ) ; } RetentionPolicy retentionPolicy = null ; if ( updateStreamRequest . getRetentionPolicy ( ) != null ) { switch ( updateStreamRequest . getRetentionPolicy ( ) . getType ( ) ) { case LIMITED_SIZE_MB : retentionPolicy = RetentionPolicy . bySizeBytes ( updateStreamRequest . getRetentionPolicy ( ) . getValue ( ) * 1024 * 1024 ) ; break ; case LIMITED_DAYS : retentionPolicy = RetentionPolicy . byTime ( Duration . ofDays ( updateStreamRequest . getRetentionPolicy ( ) . getValue ( ) ) ) ; break ; } } return StreamConfiguration . builder ( ) . scalingPolicy ( scalingPolicy ) . retentionPolicy ( retentionPolicy ) . build ( ) ; } | This method translates the REST request object UpdateStreamRequest into internal object StreamConfiguration . |
23,482 | public static final StreamProperty encodeStreamResponse ( String scope , String streamName , final StreamConfiguration streamConfiguration ) { ScalingConfig scalingPolicy = new ScalingConfig ( ) ; if ( streamConfiguration . getScalingPolicy ( ) . getScaleType ( ) == ScalingPolicy . ScaleType . FIXED_NUM_SEGMENTS ) { scalingPolicy . setType ( ScalingConfig . TypeEnum . valueOf ( streamConfiguration . getScalingPolicy ( ) . getScaleType ( ) . name ( ) ) ) ; scalingPolicy . setMinSegments ( streamConfiguration . getScalingPolicy ( ) . getMinNumSegments ( ) ) ; } else { scalingPolicy . setType ( ScalingConfig . TypeEnum . valueOf ( streamConfiguration . getScalingPolicy ( ) . getScaleType ( ) . name ( ) ) ) ; scalingPolicy . setTargetRate ( streamConfiguration . getScalingPolicy ( ) . getTargetRate ( ) ) ; scalingPolicy . setScaleFactor ( streamConfiguration . getScalingPolicy ( ) . getScaleFactor ( ) ) ; scalingPolicy . setMinSegments ( streamConfiguration . getScalingPolicy ( ) . getMinNumSegments ( ) ) ; } RetentionConfig retentionConfig = null ; if ( streamConfiguration . getRetentionPolicy ( ) != null ) { retentionConfig = new RetentionConfig ( ) ; switch ( streamConfiguration . getRetentionPolicy ( ) . getRetentionType ( ) ) { case SIZE : retentionConfig . setType ( RetentionConfig . TypeEnum . LIMITED_SIZE_MB ) ; retentionConfig . setValue ( streamConfiguration . getRetentionPolicy ( ) . getRetentionParam ( ) / ( 1024 * 1024 ) ) ; break ; case TIME : retentionConfig . setType ( RetentionConfig . TypeEnum . LIMITED_DAYS ) ; retentionConfig . setValue ( Duration . ofMillis ( streamConfiguration . getRetentionPolicy ( ) . getRetentionParam ( ) ) . toDays ( ) ) ; break ; } } StreamProperty streamProperty = new StreamProperty ( ) ; streamProperty . setScopeName ( scope ) ; streamProperty . setStreamName ( streamName ) ; streamProperty . setScalingPolicy ( scalingPolicy ) ; streamProperty . setRetentionPolicy ( retentionConfig ) ; return streamProperty ; } | The method translates the internal object StreamConfiguration into REST response object . |
23,483 | public < T > T getConfig ( Supplier < ? extends ConfigBuilder < ? extends T > > constructor ) { return constructor . get ( ) . rebase ( this . properties ) . build ( ) ; } | Gets a new instance of a Configuration for this builder . |
23,484 | public void store ( File file ) throws IOException { try ( val s = new FileOutputStream ( file , false ) ) { this . properties . store ( s , "" ) ; } } | Stores the contents of the ServiceBuilderConfig into the given File . |
23,485 | public static ServiceBuilderConfig getDefaultConfig ( ) { return new Builder ( ) . include ( ServiceConfig . builder ( ) . with ( ServiceConfig . CONTAINER_COUNT , 1 ) ) . build ( ) ; } | Gets a default set of configuration values in absence of any real configuration . These configuration values are the default ones from all component configurations except that it will create only one container to host segments . |
23,486 | public < V > ConfigBuilder < T > with ( Property < V > property , V value ) { String key = String . format ( "%s.%s" , this . namespace , property . getName ( ) ) ; this . properties . setProperty ( key , value == null ? "" : value . toString ( ) ) ; return this ; } | Includes the given property and its value in the builder . |
23,487 | public ConfigBuilder < T > withUnsafe ( Property < ? > property , Object value ) { String key = String . format ( "%s.%s" , this . namespace , property . getName ( ) ) ; this . properties . setProperty ( key , value . toString ( ) ) ; return this ; } | Includes the given property and its value in the builder without Property - Value type - enforcement . |
23,488 | protected void outputLogSummary ( int logId , ReadOnlyLogMetadata m ) { if ( m == null ) { output ( "Log %d: No metadata." , logId ) ; } else { output ( "Log %d: Epoch=%d, Version=%d, Enabled=%s, Ledgers=%d, Truncation={%s}" , logId , m . getEpoch ( ) , m . getUpdateVersion ( ) , m . isEnabled ( ) , m . getLedgers ( ) . size ( ) , m . getTruncationAddress ( ) ) ; } } | Outputs a summary for the given Log . |
23,489 | protected Context createContext ( ) throws DurableDataLogException { val serviceConfig = getServiceConfig ( ) ; val bkConfig = getCommandArgs ( ) . getState ( ) . getConfigBuilder ( ) . include ( BookKeeperConfig . builder ( ) . with ( BookKeeperConfig . ZK_ADDRESS , serviceConfig . getZkURL ( ) ) ) . build ( ) . getConfig ( BookKeeperConfig :: builder ) ; val zkClient = createZKClient ( ) ; val factory = new BookKeeperLogFactory ( bkConfig , zkClient , getCommandArgs ( ) . getState ( ) . getExecutor ( ) ) ; try { factory . initialize ( ) ; } catch ( DurableDataLogException ex ) { zkClient . close ( ) ; throw ex ; } val bkAdmin = new BookKeeperAdmin ( factory . getBookKeeperClient ( ) ) ; return new Context ( serviceConfig , bkConfig , zkClient , factory , bkAdmin ) ; } | Creates a new Context to be used by the BookKeeper command . |
23,490 | void add ( TableKey key , UUID hash , int length ) { Item item = new Item ( key , hash , this . length ) ; this . items . add ( item ) ; this . length += length ; if ( key . hasVersion ( ) ) { this . versionedItems . add ( item ) ; } } | Adds a new Item to this TableKeyBatch . |
23,491 | private void processWritesSync ( ) { if ( this . closed . get ( ) ) { return ; } if ( getWriteLedger ( ) . ledger . isClosed ( ) ) { this . rolloverProcessor . runAsync ( ) ; } else if ( ! processPendingWrites ( ) && ! this . closed . get ( ) ) { this . writeProcessor . runAsync ( ) ; } } | Write Processor main loop . This method is not thread safe and should only be invoked as part of the Write Processor . |
23,492 | private boolean processPendingWrites ( ) { long traceId = LoggerHelpers . traceEnterWithContext ( log , this . traceObjectId , "processPendingWrites" ) ; val cs = this . writes . removeFinishedWrites ( ) ; if ( cs == WriteQueue . CleanupStatus . WriteFailed ) { close ( ) ; LoggerHelpers . traceLeave ( log , this . traceObjectId , "processPendingWrites" , traceId , cs ) ; return false ; } else if ( cs == WriteQueue . CleanupStatus . QueueEmpty ) { LoggerHelpers . traceLeave ( log , this . traceObjectId , "processPendingWrites" , traceId , cs ) ; return true ; } List < Write > toExecute = getWritesToExecute ( ) ; boolean success = true ; if ( ! toExecute . isEmpty ( ) ) { success = executeWrites ( toExecute ) ; if ( success ) { this . rolloverProcessor . runAsync ( ) ; } } LoggerHelpers . traceLeave ( log , this . traceObjectId , "processPendingWrites" , traceId , toExecute . size ( ) , success ) ; return success ; } | Executes pending Writes to BookKeeper . This method is not thread safe and should only be invoked as part of the Write Processor . |
23,493 | private List < Write > getWritesToExecute ( ) { final long maxTotalSize = this . config . getBkLedgerMaxSize ( ) - getWriteLedger ( ) . ledger . getLength ( ) ; List < Write > toExecute = this . writes . getWritesToExecute ( maxTotalSize ) ; if ( handleClosedLedgers ( toExecute ) ) { toExecute = this . writes . getWritesToExecute ( maxTotalSize ) ; } return toExecute ; } | Collects an ordered list of Writes to execute to BookKeeper . |
23,494 | private boolean executeWrites ( List < Write > toExecute ) { log . debug ( "{}: Executing {} writes." , this . traceObjectId , toExecute . size ( ) ) ; for ( int i = 0 ; i < toExecute . size ( ) ; i ++ ) { Write w = toExecute . get ( i ) ; try { int attemptCount = w . beginAttempt ( ) ; if ( attemptCount > this . config . getMaxWriteAttempts ( ) ) { throw new RetriesExhaustedException ( w . getFailureCause ( ) ) ; } w . getWriteLedger ( ) . ledger . asyncAddEntry ( w . data . array ( ) , w . data . arrayOffset ( ) , w . data . getLength ( ) , this :: addCallback , w ) ; } catch ( Throwable ex ) { boolean isFinal = ! isRetryable ( ex ) ; w . fail ( ex , isFinal ) ; for ( int j = i + 1 ; j < toExecute . size ( ) ; j ++ ) { toExecute . get ( j ) . fail ( new DurableDataLogException ( "Previous write failed." , ex ) , isFinal ) ; } return false ; } } return true ; } | Executes the given Writes to BookKeeper . |
23,495 | @ SneakyThrows ( DurableDataLogException . class ) private long fetchLastAddConfirmed ( WriteLedger writeLedger , Map < Long , Long > lastAddsConfirmed ) { long ledgerId = writeLedger . ledger . getId ( ) ; long lac = lastAddsConfirmed . getOrDefault ( ledgerId , - 1L ) ; long traceId = LoggerHelpers . traceEnterWithContext ( log , this . traceObjectId , "fetchLastAddConfirmed" , ledgerId , lac ) ; if ( lac < 0 ) { if ( writeLedger . isRolledOver ( ) ) { lac = writeLedger . ledger . getLastAddConfirmed ( ) ; } else { lac = Ledgers . readLastAddConfirmed ( ledgerId , this . bookKeeper , this . config ) ; } lastAddsConfirmed . put ( ledgerId , lac ) ; log . info ( "{}: Fetched actual LastAddConfirmed ({}) for LedgerId {}." , this . traceObjectId , lac , ledgerId ) ; } LoggerHelpers . traceLeave ( log , this . traceObjectId , "fetchLastAddConfirmed" , traceId , ledgerId , lac ) ; return lac ; } | Reliably gets the LastAddConfirmed for the WriteLedger |
23,496 | private void addCallback ( int rc , LedgerHandle handle , long entryId , Object ctx ) { Write write = ( Write ) ctx ; try { assert handle . getId ( ) == write . getWriteLedger ( ) . ledger . getId ( ) : "Handle.Id mismatch: " + write . getWriteLedger ( ) . ledger . getId ( ) + " vs " + handle . getId ( ) ; write . setEntryId ( entryId ) ; if ( rc == 0 ) { completeWrite ( write ) ; return ; } handleWriteException ( rc , write ) ; } catch ( Throwable ex ) { write . fail ( ex , ! isRetryable ( ex ) ) ; } finally { try { this . writeProcessor . runAsync ( ) ; } catch ( ObjectClosedException ex ) { log . warn ( "{}: Not running WriteProcessor as part of callback due to BookKeeperLog being closed." , this . traceObjectId , ex ) ; } } } | Callback for BookKeeper appends . |
23,497 | private void completeWrite ( Write write ) { Timer t = write . complete ( ) ; if ( t != null ) { this . metrics . bookKeeperWriteCompleted ( write . data . getLength ( ) , t . getElapsed ( ) ) ; } } | Completes the given Write and makes any necessary internal updates . |
23,498 | private void handleWriteException ( Throwable ex ) { if ( ex instanceof ObjectClosedException && ! this . closed . get ( ) ) { log . warn ( "{}: Caught ObjectClosedException but not closed; closing now." , this . traceObjectId , ex ) ; close ( ) ; } } | Handles a general Write exception . |
23,499 | private void handleWriteException ( int responseCode , Write write ) { assert responseCode != BKException . Code . OK : "cannot handle an exception when responseCode == " + BKException . Code . OK ; Exception ex = BKException . create ( responseCode ) ; try { if ( ex instanceof BKException . BKLedgerFencedException ) { ex = new DataLogWriterNotPrimaryException ( "BookKeeperLog is not primary anymore." , ex ) ; } else if ( ex instanceof BKException . BKNotEnoughBookiesException ) { ex = new DataLogNotAvailableException ( "BookKeeperLog is not available." , ex ) ; } else if ( ex instanceof BKException . BKLedgerClosedException ) { ex = new WriteFailureException ( "Active Ledger is closed." , ex ) ; } else if ( ex instanceof BKException . BKWriteException ) { ex = new WriteFailureException ( "Unable to write to active Ledger." , ex ) ; } else if ( ex instanceof BKException . BKClientClosedException ) { ex = new ObjectClosedException ( this , ex ) ; } else { ex = new DurableDataLogException ( "General exception while accessing BookKeeper." , ex ) ; } } finally { write . fail ( ex , ! isRetryable ( ex ) ) ; } } | Handles an exception after a Write operation converts it to a Pravega Exception and completes the given future exceptionally using it . |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.