idx
int64
0
41.2k
question
stringlengths
83
4.15k
target
stringlengths
5
715
8,200
private int preparedStatement ( Cassandra . Client client ) { Integer itemId = preparedStatements . get ( client ) ; if ( itemId == null ) { CqlPreparedResult result ; try { result = client . prepare_cql3_query ( ByteBufferUtil . bytes ( cql ) , Compression . NONE ) ; } catch ( InvalidRequestException e ) { throw new RuntimeException ( "failed to prepare cql query " + cql , e ) ; } catch ( TException e ) { throw new RuntimeException ( "failed to prepare cql query " + cql , e ) ; } Integer previousId = preparedStatements . putIfAbsent ( client , Integer . valueOf ( result . itemId ) ) ; itemId = previousId == null ? result . itemId : previousId ; } return itemId ; }
get prepared statement id from cache otherwise prepare it from Cassandra server
8,201
public void complete ( ) { completeTree ( ) ; StageManager . getStage ( Stage . ANTI_ENTROPY ) . execute ( this ) ; if ( logger . isDebugEnabled ( ) ) { logger . debug ( "Validated {} partitions for {}. Partitions per leaf are:" , validated , desc . sessionId ) ; tree . histogramOfRowCountPerLeaf ( ) . log ( logger ) ; logger . debug ( "Validated {} partitions for {}. Partition sizes are:" , validated , desc . sessionId ) ; tree . histogramOfRowSizePerLeaf ( ) . log ( logger ) ; } }
Registers the newly created tree for rendezvous in Stage . ANTIENTROPY .
8,202
public void fail ( ) { logger . error ( "Failed creating a merkle tree for {}, {} (see log for details)" , desc , initiator ) ; MessagingService . instance ( ) . sendOneWay ( new ValidationComplete ( desc ) . createMessage ( ) , initiator ) ; }
Called when some error during the validation happened . This sends RepairStatus to inform the initiator that the validation has failed . The actual reason for failure should be looked up in the log of the host calling this function .
8,203
public void run ( ) { if ( ! initiator . equals ( FBUtilities . getBroadcastAddress ( ) ) ) logger . info ( String . format ( "[repair #%s] Sending completed merkle tree to %s for %s/%s" , desc . sessionId , initiator , desc . keyspace , desc . columnFamily ) ) ; MessagingService . instance ( ) . sendOneWay ( new ValidationComplete ( desc , tree ) . createMessage ( ) , initiator ) ; }
Called after the validation lifecycle to respond with the now valid tree . Runs in Stage . ANTIENTROPY .
8,204
boolean maybeSchedule ( ) { if ( pool . spinningCount . get ( ) > 0 || ! takeWorkPermit ( true ) ) return false ; pool . schedule ( new Work ( this ) ) ; return true ; }
will self - assign to it in the immediate future
8,205
void returnWorkPermit ( ) { while ( true ) { long current = permits . get ( ) ; int workPermits = workPermits ( current ) ; if ( permits . compareAndSet ( current , updateWorkPermits ( current , workPermits + 1 ) ) ) return ; } }
gives up a work permit
8,206
private static boolean areTokensValid ( Token ... tokens ) { for ( Token token : tokens ) { if ( ! isTokenValid ( token ) ) return false ; } return true ; }
Checks if the specified tokens are valid .
8,207
private static String highlightToken ( String line , Token token ) { String newLine = insertChar ( line , getLastCharPositionInLine ( token ) , ']' ) ; return insertChar ( newLine , token . getCharPositionInLine ( ) , '[' ) ; }
Puts the specified token within square brackets .
8,208
public void run ( ) { differences . addAll ( MerkleTree . difference ( r1 . tree , r2 . tree ) ) ; String format = String . format ( "[repair #%s] Endpoints %s and %s %%s for %s" , desc . sessionId , r1 . endpoint , r2 . endpoint , desc . columnFamily ) ; if ( differences . isEmpty ( ) ) { logger . info ( String . format ( format , "are consistent" ) ) ; MessagingService . instance ( ) . sendOneWay ( new SyncComplete ( desc , r1 . endpoint , r2 . endpoint , true ) . createMessage ( ) , FBUtilities . getLocalAddress ( ) ) ; return ; } logger . info ( String . format ( format , "have " + differences . size ( ) + " range(s) out of sync" ) ) ; performStreamingRepair ( ) ; }
Compares our trees and triggers repairs for any ranges that mismatch .
8,209
private InetAddress minEndpoint ( ) { return FBUtilities . compareUnsigned ( r1 . endpoint . getAddress ( ) , r2 . endpoint . getAddress ( ) ) < 0 ? r1 . endpoint : r2 . endpoint ; }
So we just order endpoint deterministically to simplify this
8,210
public static CompressionMetadata create ( String dataFilePath ) { Descriptor desc = Descriptor . fromFilename ( dataFilePath ) ; return new CompressionMetadata ( desc . filenameFor ( Component . COMPRESSION_INFO ) , new File ( dataFilePath ) . length ( ) , desc . version . hasPostCompressionAdlerChecksums ) ; }
Create metadata about given compressed file including uncompressed data length chunk size and list of the chunk offsets of the compressed data .
8,211
boolean reset ( double useChance , int targetCount , boolean isWrite ) { this . isWrite = isWrite ; if ( this . useChance < 1d ) { Arrays . fill ( rollmodifier , 1d ) ; Arrays . fill ( chancemodifier , 1d ) ; } generator . clusteringComponents . get ( 0 ) . setSeed ( idseed ) ; int firstComponentCount = ( int ) generator . clusteringComponents . get ( 0 ) . clusteringDistribution . next ( ) ; int expectedRowCount ; int position = seed . position ( ) ; if ( isWrite ) expectedRowCount = firstComponentCount * generator . clusteringDescendantAverages [ 0 ] ; else if ( position != 0 ) expectedRowCount = setLastRow ( position - 1 ) ; else expectedRowCount = setNoLastRow ( firstComponentCount ) ; if ( Double . isNaN ( useChance ) ) useChance = Math . max ( 0d , Math . min ( 1d , targetCount / ( double ) expectedRowCount ) ) ; this . useChance = useChance ; while ( true ) { for ( Queue < ? > q : clusteringComponents ) q . clear ( ) ; clusteringSeeds [ 0 ] = idseed ; fill ( clusteringComponents [ 0 ] , firstComponentCount , generator . clusteringComponents . get ( 0 ) ) ; if ( ! isWrite ) { if ( seek ( 0 ) != State . SUCCESS ) throw new IllegalStateException ( ) ; return true ; } int count = Math . max ( 1 , expectedRowCount / seed . visits ) ; position = seed . moveForwards ( count ) ; isFirstWrite = position == 0 ; setLastRow ( position + count - 1 ) ; switch ( seek ( position ) ) { case END_OF_PARTITION : return false ; case SUCCESS : return true ; } } }
initialise the iterator state
8,212
private int setNoLastRow ( int firstComponentCount ) { Arrays . fill ( lastRow , Integer . MAX_VALUE ) ; return firstComponentCount * generator . clusteringDescendantAverages [ 0 ] ; }
returns expected row count
8,213
private int setLastRow ( int position ) { if ( position < 0 ) throw new IllegalStateException ( ) ; decompose ( position , lastRow ) ; int expectedRowCount = 0 ; for ( int i = 0 ; i < lastRow . length ; i ++ ) { int l = lastRow [ i ] ; expectedRowCount += l * generator . clusteringDescendantAverages [ i ] ; } return expectedRowCount + 1 ; }
returns expected distance from zero
8,214
private int compareToLastRow ( int depth ) { for ( int i = 0 ; i <= depth ; i ++ ) { int p = currentRow [ i ] , l = lastRow [ i ] , r = clusteringComponents [ i ] . size ( ) ; if ( ( p == l ) | ( r == 1 ) ) continue ; return p - l ; } return 0 ; }
OR if that row does not exist it is the last row prior to it
8,215
private void decompose ( int scalar , int [ ] decomposed ) { for ( int i = 0 ; i < decomposed . length ; i ++ ) { int avg = generator . clusteringDescendantAverages [ i ] ; decomposed [ i ] = scalar / avg ; scalar %= avg ; } for ( int i = lastRow . length - 1 ; i > 0 ; i -- ) { int avg = generator . clusteringComponentAverages [ i ] ; if ( decomposed [ i ] >= avg ) { decomposed [ i - 1 ] += decomposed [ i ] / avg ; decomposed [ i ] %= avg ; } } }
Translate the scalar position into a tiered position based on mean expected counts
8,216
private State seek ( int scalar ) { if ( scalar == 0 ) { this . currentRow [ 0 ] = - 1 ; clusteringComponents [ 0 ] . addFirst ( this ) ; return setHasNext ( advance ( 0 , true ) ) ; } int [ ] position = this . currentRow ; decompose ( scalar , position ) ; for ( int i = 0 ; i < position . length ; i ++ ) { if ( i != 0 ) fill ( i ) ; for ( int c = position [ i ] ; c > 0 ; c -- ) clusteringComponents [ i ] . poll ( ) ; if ( clusteringComponents [ i ] . isEmpty ( ) ) { int j = i ; while ( true ) { if ( -- j < 0 ) return setHasNext ( false ) ; clusteringComponents [ j ] . poll ( ) ; if ( ! clusteringComponents [ j ] . isEmpty ( ) ) break ; } position [ j ] ++ ; Arrays . fill ( position , j + 1 , position . length , 0 ) ; while ( j < i ) fill ( ++ j ) ; } row . row [ i ] = clusteringComponents [ i ] . peek ( ) ; } if ( compareToLastRow ( currentRow . length - 1 ) > 0 ) return setHasNext ( false ) ; position [ position . length - 1 ] -- ; clusteringComponents [ position . length - 1 ] . addFirst ( this ) ; return setHasNext ( advance ( position . length - 1 , true ) ) ; }
seek to the provided position to initialise the iterator
8,217
void advance ( ) { int depth = clusteringComponents . length - 1 ; long parentSeed = clusteringSeeds [ depth ] ; long rowSeed = seed ( clusteringComponents [ depth ] . peek ( ) , generator . clusteringComponents . get ( depth ) . type , parentSeed ) ; for ( int i = clusteringSeeds . length ; i < row . row . length ; i ++ ) { Generator gen = generator . valueComponents . get ( i - clusteringSeeds . length ) ; gen . setSeed ( rowSeed ) ; row . row [ i ] = gen . generate ( ) ; } setHasNext ( advance ( depth , false ) ) ; }
to move the iterator to the next item
8,218
void fill ( int depth ) { long seed = clusteringSeeds [ depth - 1 ] ; Generator gen = generator . clusteringComponents . get ( depth ) ; gen . setSeed ( seed ) ; clusteringSeeds [ depth ] = seed ( clusteringComponents [ depth - 1 ] . peek ( ) , generator . clusteringComponents . get ( depth - 1 ) . type , seed ) ; fill ( clusteringComponents [ depth ] , ( int ) gen . clusteringDistribution . next ( ) , gen ) ; }
to have been generated and their seeds populated into clusteringSeeds
8,219
void fill ( Queue < Object > queue , int count , Generator generator ) { if ( count == 1 ) { queue . add ( generator . generate ( ) ) ; return ; } switch ( this . generator . order ) { case SORTED : if ( Comparable . class . isAssignableFrom ( generator . clazz ) ) { tosort . clear ( ) ; for ( int i = 0 ; i < count ; i ++ ) tosort . add ( generator . generate ( ) ) ; Collections . sort ( ( List < Comparable > ) ( List < ? > ) tosort ) ; for ( int i = 0 ; i < count ; i ++ ) if ( i == 0 || ( ( Comparable ) tosort . get ( i - 1 ) ) . compareTo ( i ) < 0 ) queue . add ( tosort . get ( i ) ) ; break ; } case ARBITRARY : unique . clear ( ) ; for ( int i = 0 ; i < count ; i ++ ) { Object next = generator . generate ( ) ; if ( unique . add ( next ) ) queue . add ( next ) ; } break ; case SHUFFLED : unique . clear ( ) ; tosort . clear ( ) ; ThreadLocalRandom rand = ThreadLocalRandom . current ( ) ; for ( int i = 0 ; i < count ; i ++ ) { Object next = generator . generate ( ) ; if ( unique . add ( next ) ) tosort . add ( next ) ; } for ( int i = 0 ; i < tosort . size ( ) ; i ++ ) { int index = rand . nextInt ( i , tosort . size ( ) ) ; Object obj = tosort . get ( index ) ; tosort . set ( index , tosort . get ( i ) ) ; queue . add ( obj ) ; } break ; default : throw new IllegalStateException ( ) ; } }
generate the clustering components into the queue
8,220
public WaitQueue . Signal requestExtraSync ( ) { WaitQueue . Signal signal = syncComplete . register ( ) ; haveWork . release ( 1 ) ; return signal ; }
Sync immediately but don t block for the sync to cmplete
8,221
static URL getStorageConfigURL ( ) throws ConfigurationException { String configUrl = System . getProperty ( "cassandra.config" ) ; if ( configUrl == null ) configUrl = DEFAULT_CONFIGURATION ; URL url ; try { url = new URL ( configUrl ) ; url . openStream ( ) . close ( ) ; } catch ( Exception e ) { ClassLoader loader = DatabaseDescriptor . class . getClassLoader ( ) ; url = loader . getResource ( configUrl ) ; if ( url == null ) { String required = "file:" + File . separator + File . separator ; if ( ! configUrl . startsWith ( required ) ) throw new ConfigurationException ( "Expecting URI in variable: [cassandra.config]. Please prefix the file with " + required + File . separator + " for local files or " + required + "<server>" + File . separator + " for remote files. Aborting. If you are executing this from an external tool, it needs to set Config.setClientMode(true) to avoid loading configuration." ) ; throw new ConfigurationException ( "Cannot locate " + configUrl + ". If this is a local file, please confirm you've provided " + required + File . separator + " as a URI prefix." ) ; } } return url ; }
Inspect the classpath to find storage configuration file
8,222
public void deleteFromSchema ( Mutation mutation , long timestamp ) { ColumnFamily cf = mutation . addOrGet ( CFMetaData . SchemaColumnsCf ) ; int ldt = ( int ) ( System . currentTimeMillis ( ) / 1000 ) ; Composite prefix = CFMetaData . SchemaColumnsCf . comparator . make ( cfName , name . toString ( ) ) ; cf . addAtom ( new RangeTombstone ( prefix , prefix . end ( ) , timestamp , ldt ) ) ; }
Drop specified column from the schema using given mutation .
8,223
public static List < ColumnDefinition > fromSchema ( UntypedResultSet serializedColumns , String ksName , String cfName , AbstractType < ? > rawComparator , boolean isSuper ) { List < ColumnDefinition > cds = new ArrayList < > ( ) ; for ( UntypedResultSet . Row row : serializedColumns ) { Kind kind = row . has ( KIND ) ? Kind . deserialize ( row . getString ( KIND ) ) : Kind . REGULAR ; Integer componentIndex = null ; if ( row . has ( COMPONENT_INDEX ) ) componentIndex = row . getInt ( COMPONENT_INDEX ) ; else if ( kind == Kind . CLUSTERING_COLUMN && isSuper ) componentIndex = 1 ; AbstractType < ? > comparator = getComponentComparator ( rawComparator , componentIndex , kind ) ; ColumnIdentifier name = new ColumnIdentifier ( comparator . fromString ( row . getString ( COLUMN_NAME ) ) , comparator ) ; AbstractType < ? > validator ; try { validator = TypeParser . parse ( row . getString ( TYPE ) ) ; } catch ( RequestValidationException e ) { throw new RuntimeException ( e ) ; } IndexType indexType = null ; if ( row . has ( INDEX_TYPE ) ) indexType = IndexType . valueOf ( row . getString ( INDEX_TYPE ) ) ; Map < String , String > indexOptions = null ; if ( row . has ( INDEX_OPTIONS ) ) indexOptions = FBUtilities . fromJsonMap ( row . getString ( INDEX_OPTIONS ) ) ; String indexName = null ; if ( row . has ( INDEX_NAME ) ) indexName = row . getString ( INDEX_NAME ) ; cds . add ( new ColumnDefinition ( ksName , cfName , name , validator , indexType , indexOptions , indexName , componentIndex , kind ) ) ; } return cds ; }
Deserialize columns from storage - level representation
8,224
public ByteBuffer getSerializedValue ( ByteBuffer serializedMap , ByteBuffer serializedKey , AbstractType keyType ) { try { ByteBuffer input = serializedMap . duplicate ( ) ; int n = readCollectionSize ( input , Server . VERSION_3 ) ; for ( int i = 0 ; i < n ; i ++ ) { ByteBuffer kbb = readValue ( input , Server . VERSION_3 ) ; ByteBuffer vbb = readValue ( input , Server . VERSION_3 ) ; int comparison = keyType . compare ( kbb , serializedKey ) ; if ( comparison == 0 ) return vbb ; else if ( comparison > 0 ) return null ; } return null ; } catch ( BufferUnderflowException e ) { throw new MarshalException ( "Not enough bytes to read a map" ) ; } }
Given a serialized map gets the value associated with a given key .
8,225
public static < V > Object [ ] build ( Iterable < V > source , int size , Comparator < V > comparator , boolean sorted , UpdateFunction < V > updateF ) { if ( size < FAN_FACTOR ) { V [ ] values = ( V [ ] ) new Object [ size + ( size & 1 ) ] ; { int i = 0 ; for ( V v : source ) values [ i ++ ] = v ; } if ( ! sorted ) Arrays . sort ( values , 0 , size , comparator ) ; if ( updateF != null ) { for ( int i = 0 ; i < size ; i ++ ) values [ i ] = updateF . apply ( values [ i ] ) ; updateF . allocated ( ObjectSizes . sizeOfArray ( values ) ) ; } return values ; } if ( ! sorted ) source = sorted ( source , comparator , size ) ; Queue < Builder > queue = modifier . get ( ) ; Builder builder = queue . poll ( ) ; if ( builder == null ) builder = new Builder ( ) ; Object [ ] btree = builder . build ( source , updateF , size ) ; queue . add ( builder ) ; return btree ; }
Creates a BTree containing all of the objects in the provided collection
8,226
public static < V > Cursor < V , V > slice ( Object [ ] btree , boolean forwards ) { Cursor < V , V > r = new Cursor < > ( ) ; r . reset ( btree , forwards ) ; return r ; }
Returns an Iterator over the entire tree
8,227
public static < K , V extends K > Cursor < K , V > slice ( Object [ ] btree , Comparator < K > comparator , K start , boolean startInclusive , K end , boolean endInclusive , boolean forwards ) { Cursor < K , V > r = new Cursor < > ( ) ; r . reset ( btree , comparator , start , startInclusive , end , endInclusive , forwards ) ; return r ; }
Returns an Iterator over a sub - range of the tree
8,228
static int getLeafKeyEnd ( Object [ ] node ) { int len = node . length ; if ( len == 0 ) return 0 ; else if ( node [ len - 1 ] == null ) return len - 1 ; else return len ; }
get the last index that is non - null in the leaf node
8,229
private static < V > Collection < V > sorted ( Iterable < V > source , Comparator < V > comparator , int size ) { V [ ] vs = ( V [ ] ) new Object [ size ] ; int i = 0 ; for ( V v : source ) vs [ i ++ ] = v ; Arrays . sort ( vs , comparator ) ; return Arrays . asList ( vs ) ; }
return a sorted collection
8,230
public int completed ( InetAddress request ) { String dc = DatabaseDescriptor . getEndpointSnitch ( ) . getDatacenter ( request ) ; Queue < InetAddress > requests = requestsByDatacenter . get ( dc ) ; assert requests != null ; assert request . equals ( requests . peek ( ) ) ; requests . poll ( ) ; if ( ! requests . isEmpty ( ) ) processor . process ( requests . peek ( ) ) ; return -- remaining ; }
Returns how many request remains
8,231
public static DataResource fromName ( String name ) { String [ ] parts = StringUtils . split ( name , '/' ) ; if ( ! parts [ 0 ] . equals ( ROOT_NAME ) || parts . length > 3 ) throw new IllegalArgumentException ( String . format ( "%s is not a valid data resource name" , name ) ) ; if ( parts . length == 1 ) return root ( ) ; if ( parts . length == 2 ) return keyspace ( parts [ 1 ] ) ; return columnFamily ( parts [ 1 ] , parts [ 2 ] ) ; }
Parses a data resource name into a DataResource instance .
8,232
protected < T extends Number > Gauge < T > createColumnFamilyGauge ( final String name , Gauge < T > gauge ) { return createColumnFamilyGauge ( name , gauge , new Gauge < Long > ( ) { public Long value ( ) { long total = 0 ; for ( Metric cfGauge : allColumnFamilyMetrics . get ( name ) ) { total = total + ( ( Gauge < ? extends Number > ) cfGauge ) . value ( ) . longValue ( ) ; } return total ; } } ) ; }
Create a gauge that will be part of a merged version of all column families . The global gauge will merge each CF gauge by adding their values
8,233
protected < G , T > Gauge < T > createColumnFamilyGauge ( String name , Gauge < T > gauge , Gauge < G > globalGauge ) { Gauge < T > cfGauge = Metrics . newGauge ( factory . createMetricName ( name ) , gauge ) ; if ( register ( name , cfGauge ) ) { Metrics . newGauge ( globalNameFactory . createMetricName ( name ) , globalGauge ) ; } return cfGauge ; }
Create a gauge that will be part of a merged version of all column families . The global gauge is defined as the globalGauge parameter
8,234
protected Counter createColumnFamilyCounter ( final String name ) { Counter cfCounter = Metrics . newCounter ( factory . createMetricName ( name ) ) ; if ( register ( name , cfCounter ) ) { Metrics . newGauge ( globalNameFactory . createMetricName ( name ) , new Gauge < Long > ( ) { public Long value ( ) { long total = 0 ; for ( Metric cfGauge : allColumnFamilyMetrics . get ( name ) ) { total += ( ( Counter ) cfGauge ) . count ( ) ; } return total ; } } ) ; } return cfCounter ; }
Creates a counter that will also have a global counter thats the sum of all counters across different column families
8,235
protected ColumnFamilyHistogram createColumnFamilyHistogram ( String name , Histogram keyspaceHistogram ) { Histogram cfHistogram = Metrics . newHistogram ( factory . createMetricName ( name ) , true ) ; register ( name , cfHistogram ) ; return new ColumnFamilyHistogram ( cfHistogram , keyspaceHistogram , Metrics . newHistogram ( globalNameFactory . createMetricName ( name ) , true ) ) ; }
Create a histogram - like interface that will register both a CF keyspace and global level histogram and forward any updates to both
8,236
private boolean register ( String name , Metric metric ) { boolean ret = allColumnFamilyMetrics . putIfAbsent ( name , new HashSet < Metric > ( ) ) == null ; allColumnFamilyMetrics . get ( name ) . add ( metric ) ; all . add ( name ) ; return ret ; }
Registers a metric to be removed when unloading CF .
8,237
public static void setOutputKeyspace ( Configuration conf , String keyspace ) { if ( keyspace == null ) throw new UnsupportedOperationException ( "keyspace may not be null" ) ; conf . set ( OUTPUT_KEYSPACE_CONFIG , keyspace ) ; }
Set the keyspace for the output of this job .
8,238
public static void setOutputColumnFamily ( Configuration conf , String keyspace , String columnFamily ) { setOutputKeyspace ( conf , keyspace ) ; setOutputColumnFamily ( conf , columnFamily ) ; }
Set the column family for the output of this job .
8,239
public static void setInputSlicePredicate ( Configuration conf , SlicePredicate predicate ) { conf . set ( INPUT_PREDICATE_CONFIG , thriftToString ( predicate ) ) ; }
Set the predicate that determines what columns will be selected from each row .
8,240
public static KeyRange getInputKeyRange ( Configuration conf ) { String str = conf . get ( INPUT_KEYRANGE_CONFIG ) ; return str == null ? null : keyRangeFromString ( str ) ; }
may be null if unset
8,241
public OnDiskAtom readNext ( ) throws IOException { Composite name = nameDeserializer . readNext ( ) ; assert ! name . isEmpty ( ) ; nextFlags = nextFlags == Integer . MIN_VALUE ? in . readUnsignedByte ( ) : nextFlags ; OnDiskAtom atom = ( nextFlags & ColumnSerializer . RANGE_TOMBSTONE_MASK ) != 0 ? type . rangeTombstoneSerializer ( ) . deserializeBody ( in , name , version ) : type . columnSerializer ( ) . deserializeColumnBody ( in , ( CellName ) name , nextFlags , flag , expireBefore ) ; nextFlags = Integer . MIN_VALUE ; return atom ; }
Returns the next atom .
8,242
public void skipNext ( ) throws IOException { nameDeserializer . skipNext ( ) ; nextFlags = nextFlags == Integer . MIN_VALUE ? in . readUnsignedByte ( ) : nextFlags ; if ( ( nextFlags & ColumnSerializer . RANGE_TOMBSTONE_MASK ) != 0 ) type . rangeTombstoneSerializer ( ) . skipBody ( in , version ) ; else type . columnSerializer ( ) . skipColumnBody ( in , nextFlags ) ; nextFlags = Integer . MIN_VALUE ; }
Skips the next atom .
8,243
public void connectionComplete ( SocketAddress socket ) { assert socket != null ; activeSocketSessions . remove ( socket ) ; if ( logger . isTraceEnabled ( ) ) logger . trace ( "ClientState removed for socket addr {}" , socket ) ; }
The connection associated with
8,244
public void delete ( final DecoratedKey partitionKey ) { if ( indexQueue == null ) { deleteInner ( partitionKey ) ; } else { indexQueue . submitAsynchronous ( partitionKey , new Runnable ( ) { public void run ( ) { deleteInner ( partitionKey ) ; } } ) ; } }
Deletes the partition identified by the specified partition key . This operation is performed asynchronously .
8,245
public final void commit ( ) { if ( indexQueue == null ) { luceneIndex . commit ( ) ; } else { indexQueue . submitSynchronous ( new Runnable ( ) { public void run ( ) { luceneIndex . commit ( ) ; } } ) ; } }
Commits the pending changes . This operation is performed asynchronously .
8,246
public static void printOptions ( PrintStream out , String command , GroupedOptions ... groupings ) { out . println ( ) ; boolean firstRow = true ; for ( GroupedOptions grouping : groupings ) { if ( ! firstRow ) { out . println ( " OR " ) ; } firstRow = false ; StringBuilder sb = new StringBuilder ( "Usage: " + command ) ; for ( Option option : grouping . options ( ) ) { sb . append ( " " ) ; sb . append ( option . shortDisplay ( ) ) ; } out . println ( sb . toString ( ) ) ; } out . println ( ) ; final Set < Option > printed = new HashSet < > ( ) ; for ( GroupedOptions grouping : groupings ) { for ( Option option : grouping . options ( ) ) { if ( printed . add ( option ) ) { if ( option . longDisplay ( ) != null ) { out . println ( " " + option . longDisplay ( ) ) ; for ( String row : option . multiLineDisplay ( ) ) out . println ( " " + row ) ; } } } } }
pretty prints all of the option groupings
8,247
public ByteBuffer createMessage ( boolean compress , int version ) { int header = 0 ; if ( compress ) header |= 4 ; header |= 8 ; header |= ( version << 8 ) ; byte [ ] bytes ; try { int size = ( int ) StreamInitMessage . serializer . serializedSize ( this , version ) ; DataOutputBuffer buffer = new DataOutputBuffer ( size ) ; StreamInitMessage . serializer . serialize ( this , buffer , version ) ; bytes = buffer . getData ( ) ; } catch ( IOException e ) { throw new RuntimeException ( e ) ; } assert bytes . length > 0 ; ByteBuffer buffer = ByteBuffer . allocate ( 4 + 4 + bytes . length ) ; buffer . putInt ( MessagingService . PROTOCOL_MAGIC ) ; buffer . putInt ( header ) ; buffer . put ( bytes ) ; buffer . flip ( ) ; return buffer ; }
Create serialized message .
8,248
public RowIndexEntry tryAppend ( AbstractCompactedRow row ) { writer . mark ( ) ; try { return append ( row ) ; } catch ( Throwable t ) { writer . resetAndTruncate ( ) ; throw t ; } }
attempts to append the row if fails resets the writer position
8,249
private void moveStarts ( SSTableReader newReader , DecoratedKey lowerbound , boolean reset ) { if ( isOffline ) return ; List < SSTableReader > toReplace = new ArrayList < > ( ) ; List < SSTableReader > replaceWith = new ArrayList < > ( ) ; final List < DecoratedKey > invalidateKeys = new ArrayList < > ( ) ; if ( ! reset ) { invalidateKeys . addAll ( cachedKeys . keySet ( ) ) ; for ( Map . Entry < DecoratedKey , RowIndexEntry > cacheKey : cachedKeys . entrySet ( ) ) newReader . cacheKey ( cacheKey . getKey ( ) , cacheKey . getValue ( ) ) ; } cachedKeys = new HashMap < > ( ) ; for ( SSTableReader sstable : ImmutableList . copyOf ( rewriting ) ) { final SSTableReader latest = sstable . getCurrentReplacement ( ) ; SSTableReader replacement ; if ( reset ) { DecoratedKey newStart = originalStarts . get ( sstable . descriptor ) ; replacement = latest . cloneWithNewStart ( newStart , null ) ; } else { if ( latest . openReason == SSTableReader . OpenReason . SHADOWED ) continue ; if ( latest . first . compareTo ( lowerbound ) > 0 ) continue ; final Runnable runOnClose = new Runnable ( ) { public void run ( ) { for ( DecoratedKey key : invalidateKeys ) latest . invalidateCacheKey ( key ) ; } } ; if ( lowerbound . compareTo ( latest . last ) >= 0 ) { replacement = latest . cloneAsShadowed ( runOnClose ) ; } else { DecoratedKey newStart = latest . firstKeyBeyond ( lowerbound ) ; assert newStart != null ; replacement = latest . cloneWithNewStart ( newStart , runOnClose ) ; } } toReplace . add ( latest ) ; replaceWith . add ( replacement ) ; rewriting . remove ( sstable ) ; rewriting . add ( replacement ) ; } cfs . getDataTracker ( ) . replaceWithNewInstances ( toReplace , replaceWith ) ; }
Replace the readers we are rewriting with cloneWithNewStart reclaiming any page cache that is no longer needed and transferring any key cache entries over to the new reader expiring them from the old . if reset is true we are instead restoring the starts of the readers from before the rewriting began
8,250
private void replaceWithFinishedReaders ( List < SSTableReader > finished ) { if ( isOffline ) { for ( SSTableReader reader : discard ) { if ( reader . getCurrentReplacement ( ) == reader ) reader . markObsolete ( ) ; reader . selfRef ( ) . release ( ) ; } } else { dataTracker . replaceEarlyOpenedFiles ( discard , finished ) ; dataTracker . unmarkCompacting ( discard ) ; } discard . clear ( ) ; }
cleanup all our temporary readers and swap in our new ones
8,251
public CQLSSTableWriter rawAddRow ( ByteBuffer ... values ) throws InvalidRequestException , IOException { return rawAddRow ( Arrays . asList ( values ) ) ; }
Adds a new row to the writer given already serialized values .
8,252
public static void calculatePendingRanges ( AbstractReplicationStrategy strategy , String keyspaceName ) { TokenMetadata tm = StorageService . instance . getTokenMetadata ( ) ; Multimap < Range < Token > , InetAddress > pendingRanges = HashMultimap . create ( ) ; BiMultiValMap < Token , InetAddress > bootstrapTokens = tm . getBootstrapTokens ( ) ; Set < InetAddress > leavingEndpoints = tm . getLeavingEndpoints ( ) ; if ( bootstrapTokens . isEmpty ( ) && leavingEndpoints . isEmpty ( ) && tm . getMovingEndpoints ( ) . isEmpty ( ) ) { if ( logger . isDebugEnabled ( ) ) logger . debug ( "No bootstrapping, leaving or moving nodes, and no relocating tokens -> empty pending ranges for {}" , keyspaceName ) ; tm . setPendingRanges ( keyspaceName , pendingRanges ) ; return ; } Multimap < InetAddress , Range < Token > > addressRanges = strategy . getAddressRanges ( ) ; TokenMetadata allLeftMetadata = tm . cloneAfterAllLeft ( ) ; Set < Range < Token > > affectedRanges = new HashSet < Range < Token > > ( ) ; for ( InetAddress endpoint : leavingEndpoints ) affectedRanges . addAll ( addressRanges . get ( endpoint ) ) ; TokenMetadata metadata = tm . cloneOnlyTokenMap ( ) ; for ( Range < Token > range : affectedRanges ) { Set < InetAddress > currentEndpoints = ImmutableSet . copyOf ( strategy . calculateNaturalEndpoints ( range . right , metadata ) ) ; Set < InetAddress > newEndpoints = ImmutableSet . copyOf ( strategy . calculateNaturalEndpoints ( range . right , allLeftMetadata ) ) ; pendingRanges . putAll ( range , Sets . difference ( newEndpoints , currentEndpoints ) ) ; } Multimap < InetAddress , Token > bootstrapAddresses = bootstrapTokens . inverse ( ) ; for ( InetAddress endpoint : bootstrapAddresses . keySet ( ) ) { Collection < Token > tokens = bootstrapAddresses . get ( endpoint ) ; allLeftMetadata . updateNormalTokens ( tokens , endpoint ) ; for ( Range < Token > range : strategy . getAddressRanges ( allLeftMetadata ) . get ( endpoint ) ) pendingRanges . put ( range , endpoint ) ; allLeftMetadata . removeEndpoint ( endpoint ) ; } for ( Pair < Token , InetAddress > moving : tm . getMovingEndpoints ( ) ) { InetAddress endpoint = moving . right ; allLeftMetadata . updateNormalToken ( moving . left , endpoint ) ; for ( Range < Token > range : strategy . getAddressRanges ( allLeftMetadata ) . get ( endpoint ) ) { pendingRanges . put ( range , endpoint ) ; } allLeftMetadata . removeEndpoint ( endpoint ) ; } tm . setPendingRanges ( keyspaceName , pendingRanges ) ; if ( logger . isDebugEnabled ( ) ) logger . debug ( "Pending ranges:\n" + ( pendingRanges . isEmpty ( ) ? "<empty>" : tm . printPendingRanges ( ) ) ) ; }
public & static for testing purposes
8,253
public synchronized void gossiperStarting ( ) { gossiperInitialized = true ; StorageService . instance . gossipSnitchInfo ( ) ; Gossiper . instance . register ( new ReconnectableSnitchHelper ( this , localNodeData . datacenter , true ) ) ; }
Called in preparation for the initiation of the gossip loop .
8,254
public synchronized ScheduledFuture scheduleTimeout ( final int sequenceNumber , long time , TimeUnit unit ) { if ( ! files . containsKey ( sequenceNumber ) ) return null ; ScheduledFuture future = timeoutExecutor . schedule ( new Runnable ( ) { public void run ( ) { synchronized ( StreamTransferTask . this ) { timeoutTasks . remove ( sequenceNumber ) ; StreamTransferTask . this . complete ( sequenceNumber ) ; } } } , time , unit ) ; ScheduledFuture prev = timeoutTasks . put ( sequenceNumber , future ) ; assert prev == null ; return future ; }
Schedule timeout task to release reference for file sent . When not receiving ACK after sending to receiver in given time the task will release reference .
8,255
public void add ( Composite start , Composite end , long markedAt , int delTime ) { if ( isEmpty ( ) ) { addInternal ( 0 , start , end , markedAt , delTime ) ; return ; } int c = comparator . compare ( ends [ size - 1 ] , start ) ; if ( c <= 0 ) { addInternal ( size , start , end , markedAt , delTime ) ; } else { int pos = Arrays . binarySearch ( ends , 0 , size , start , comparator ) ; insertFrom ( ( pos >= 0 ? pos : - pos - 1 ) , start , end , markedAt , delTime ) ; } boundaryHeapSize += start . unsharedHeapSize ( ) + end . unsharedHeapSize ( ) ; }
Adds a new range tombstone .
8,256
public void purge ( int gcBefore ) { int j = 0 ; for ( int i = 0 ; i < size ; i ++ ) { if ( delTimes [ i ] >= gcBefore ) setInternal ( j ++ , starts [ i ] , ends [ i ] , markedAts [ i ] , delTimes [ i ] ) ; } size = j ; }
Removes all range tombstones whose local deletion time is older than gcBefore .
8,257
public void updateDigest ( MessageDigest digest ) { ByteBuffer longBuffer = ByteBuffer . allocate ( 8 ) ; for ( int i = 0 ; i < size ; i ++ ) { for ( int j = 0 ; j < starts [ i ] . size ( ) ; j ++ ) digest . update ( starts [ i ] . get ( j ) . duplicate ( ) ) ; for ( int j = 0 ; j < ends [ i ] . size ( ) ; j ++ ) digest . update ( ends [ i ] . get ( j ) . duplicate ( ) ) ; longBuffer . putLong ( 0 , markedAts [ i ] ) ; digest . update ( longBuffer . array ( ) , 0 , 8 ) ; } }
Calculates digest for triggering read repair on mismatch
8,258
public long [ ] getHashBuckets ( ByteBuffer key , int hashCount , long max ) { long [ ] hash = new long [ 2 ] ; hash ( key , key . position ( ) , key . remaining ( ) , 0L , hash ) ; long [ ] indexes = new long [ hashCount ] ; setIndexes ( hash [ 0 ] , hash [ 1 ] , hashCount , max , indexes ) ; return indexes ; }
rather than using the threadLocal like we do in production
8,259
private long [ ] indexes ( ByteBuffer key ) { long [ ] indexes = reusableIndexes . get ( ) ; hash ( key , key . position ( ) , key . remaining ( ) , 0L , indexes ) ; setIndexes ( indexes [ 0 ] , indexes [ 1 ] , hashCount , bitset . capacity ( ) , indexes ) ; return indexes ; }
a second threadlocal lookup .
8,260
public final void addFields ( Document document , CellName cellName ) { String serializedKey = ByteBufferUtils . toString ( cellName . toByteBuffer ( ) ) ; Field field = new StringField ( FIELD_NAME , serializedKey , Field . Store . YES ) ; document . add ( field ) ; }
Adds to the specified document the clustering key contained in the specified cell name .
8,261
public final List < CellName > clusteringKeys ( ColumnFamily columnFamily ) { List < CellName > clusteringKeys = new ArrayList < > ( ) ; CellName lastClusteringKey = null ; for ( Cell cell : columnFamily ) { CellName cellName = cell . name ( ) ; if ( ! isStatic ( cellName ) ) { CellName clusteringKey = extractClusteringKey ( cellName ) ; if ( lastClusteringKey == null || ! lastClusteringKey . isSameCQL3RowAs ( cellNameType , clusteringKey ) ) { lastClusteringKey = clusteringKey ; clusteringKeys . add ( clusteringKey ) ; } } } return sort ( clusteringKeys ) ; }
Returns the common clustering keys of the specified column family .
8,262
public final CellName makeCellName ( CellName cellName , ColumnDefinition columnDefinition ) { return cellNameType . create ( start ( cellName ) , columnDefinition ) ; }
Returns the storage engine column name for the specified column identifier using the specified clustering key .
8,263
public final CellName clusteringKey ( BytesRef bytesRef ) { String string = bytesRef . utf8ToString ( ) ; ByteBuffer bb = ByteBufferUtils . fromString ( string ) ; return cellNameType . cellFromByteBuffer ( bb ) ; }
Returns the clustering key contained in the specified Lucene field value .
8,264
public final Composite start ( CellName cellName ) { CBuilder builder = cellNameType . builder ( ) ; for ( int i = 0 ; i < cellName . clusteringSize ( ) ; i ++ ) { ByteBuffer component = cellName . get ( i ) ; builder . add ( component ) ; } return builder . build ( ) ; }
Returns the first possible cell name of those having the same clustering key that the specified cell name .
8,265
public final Composite end ( CellName cellName ) { return start ( cellName ) . withEOC ( Composite . EOC . END ) ; }
Returns the last possible cell name of those having the same clustering key that the specified cell name .
8,266
public final List < CellName > sort ( List < CellName > clusteringKeys ) { List < CellName > result = new ArrayList < > ( clusteringKeys ) ; Collections . sort ( result , new Comparator < CellName > ( ) { public int compare ( CellName o1 , CellName o2 ) { return cellNameType . compare ( o1 , o2 ) ; } } ) ; return result ; }
Returns the specified list of clustering keys sorted according to the table cell name comparator .
8,267
public String get ( String propertyName , String defaultValue ) { return properties . getProperty ( propertyName , defaultValue ) ; }
Get a snitch property value or return defaultValue if not defined .
8,268
public static AbstractType < ? > parse ( String str ) throws SyntaxException , ConfigurationException { if ( str == null ) return BytesType . instance ; AbstractType < ? > type = cache . get ( str ) ; if ( type != null ) return type ; int i = 0 ; i = skipBlank ( str , i ) ; int j = i ; while ( ! isEOS ( str , i ) && isIdentifierChar ( str . charAt ( i ) ) ) ++ i ; if ( i == j ) return BytesType . instance ; String name = str . substring ( j , i ) ; i = skipBlank ( str , i ) ; if ( ! isEOS ( str , i ) && str . charAt ( i ) == '(' ) type = getAbstractType ( name , new TypeParser ( str , i ) ) ; else type = getAbstractType ( name ) ; cache . put ( str , type ) ; return type ; }
Parse a string containing an type definition .
8,269
public AbstractType < ? > parse ( ) throws SyntaxException , ConfigurationException { skipBlank ( ) ; String name = readNextIdentifier ( ) ; skipBlank ( ) ; if ( ! isEOS ( ) && str . charAt ( idx ) == '(' ) return getAbstractType ( name , this ) ; else return getAbstractType ( name ) ; }
Parse an AbstractType from current position of this parser .
8,270
private boolean skipBlankAndComma ( ) { boolean commaFound = false ; while ( ! isEOS ( ) ) { int c = str . charAt ( idx ) ; if ( c == ',' ) { if ( commaFound ) return true ; else commaFound = true ; } else if ( ! isBlank ( c ) ) { return true ; } ++ idx ; } return false ; }
skip all blank and at best one comma return true if there not EOS
8,271
public String readNextIdentifier ( ) { int i = idx ; while ( ! isEOS ( ) && isIdentifierChar ( str . charAt ( idx ) ) ) ++ idx ; return str . substring ( i , idx ) ; }
left idx positioned on the character stopping the read
8,272
private void seekToChunkStart ( ) { if ( getOnDiskFilePointer ( ) != chunkOffset ) { try { out . seek ( chunkOffset ) ; } catch ( IOException e ) { throw new FSReadError ( e , getPath ( ) ) ; } } }
Seek to the offset where next compressed data chunk should be stored .
8,273
public void validate ( CFMetaData metadata ) { for ( Map . Entry < String , ColumnMapper > entry : columnMappers . entrySet ( ) ) { String name = entry . getKey ( ) ; ColumnMapper columnMapper = entry . getValue ( ) ; ByteBuffer columnName = UTF8Type . instance . decompose ( name ) ; ColumnDefinition columnDefinition = metadata . getColumnDefinition ( columnName ) ; if ( columnDefinition == null ) { throw new RuntimeException ( "No column definition for mapper " + name ) ; } if ( columnDefinition . isStatic ( ) ) { throw new RuntimeException ( "Lucene indexes are not allowed on static columns as " + name ) ; } AbstractType < ? > type = columnDefinition . type ; if ( ! columnMapper . supports ( type ) ) { throw new RuntimeException ( String . format ( "Type '%s' is not supported by mapper '%s'" , type , name ) ) ; } } }
Checks if this is consistent with the specified column family metadata .
8,274
public void reload ( ) { Collection < ByteBuffer > indexedColumnNames = indexesByColumn . keySet ( ) ; for ( ByteBuffer indexedColumn : indexedColumnNames ) { ColumnDefinition def = baseCfs . metadata . getColumnDefinition ( indexedColumn ) ; if ( def == null || def . getIndexType ( ) == null ) removeIndexedColumn ( indexedColumn ) ; } for ( ColumnDefinition cdef : baseCfs . metadata . allColumns ( ) ) if ( cdef . getIndexType ( ) != null && ! indexedColumnNames . contains ( cdef . name . bytes ) ) addIndexedColumn ( cdef ) ; for ( SecondaryIndex index : allIndexes ) index . reload ( ) ; }
Drops and adds new indexes associated with the underlying CF
8,275
public void maybeBuildSecondaryIndexes ( Collection < SSTableReader > sstables , Set < String > idxNames ) { if ( idxNames . isEmpty ( ) ) return ; logger . info ( String . format ( "Submitting index build of %s for data in %s" , idxNames , StringUtils . join ( sstables , ", " ) ) ) ; SecondaryIndexBuilder builder = new SecondaryIndexBuilder ( baseCfs , idxNames , new ReducingKeyIterator ( sstables ) ) ; Future < ? > future = CompactionManager . instance . submitIndexBuild ( builder ) ; FBUtilities . waitOnFuture ( future ) ; flushIndexesBlocking ( ) ; logger . info ( "Index build of {} complete" , idxNames ) ; }
Does a full blocking rebuild of the indexes specified by columns from the sstables . Does nothing if columns is empty .
8,276
public void removeIndexedColumn ( ByteBuffer column ) { SecondaryIndex index = indexesByColumn . remove ( column ) ; if ( index == null ) return ; if ( index instanceof PerRowSecondaryIndex ) { index . removeColumnDef ( column ) ; if ( index . getColumnDefs ( ) . isEmpty ( ) ) { allIndexes . remove ( index ) ; rowLevelIndexMap . remove ( index . getClass ( ) ) ; } } else { allIndexes . remove ( index ) ; } index . removeIndex ( column ) ; SystemKeyspace . setIndexRemoved ( baseCfs . metadata . ksName , index . getNameForSystemKeyspace ( column ) ) ; }
Removes a existing index
8,277
public synchronized Future < ? > addIndexedColumn ( ColumnDefinition cdef ) { if ( indexesByColumn . containsKey ( cdef . name . bytes ) ) return null ; assert cdef . getIndexType ( ) != null ; SecondaryIndex index ; try { index = SecondaryIndex . createInstance ( baseCfs , cdef ) ; } catch ( ConfigurationException e ) { throw new RuntimeException ( e ) ; } if ( index instanceof PerRowSecondaryIndex ) { SecondaryIndex currentIndex = rowLevelIndexMap . get ( index . getClass ( ) ) ; if ( currentIndex == null ) { rowLevelIndexMap . put ( index . getClass ( ) , index ) ; index . init ( ) ; } else { index = currentIndex ; index . addColumnDef ( cdef ) ; logger . info ( "Creating new index : {}" , cdef ) ; } } else { if ( cdef . getIndexType ( ) == IndexType . CUSTOM && index instanceof AbstractSimplePerColumnSecondaryIndex ) throw new RuntimeException ( "Cannot use a subclass of AbstractSimplePerColumnSecondaryIndex as a CUSTOM index, as they assume they are CFS backed" ) ; index . init ( ) ; } indexesByColumn . put ( cdef . name . bytes , index ) ; allIndexes . add ( index ) ; if ( index . isIndexBuilt ( cdef . name . bytes ) ) return null ; return index . buildIndexAsync ( ) ; }
Adds and builds a index for a column
8,278
public void flushIndexesBlocking ( ) { List < Future < ? > > wait = new ArrayList < > ( ) ; synchronized ( baseCfs . getDataTracker ( ) ) { for ( SecondaryIndex index : allIndexes ) if ( index . getIndexCfs ( ) != null ) wait . add ( index . getIndexCfs ( ) . forceFlush ( ) ) ; } for ( SecondaryIndex index : allIndexes ) if ( index . getIndexCfs ( ) == null ) index . forceBlockingFlush ( ) ; FBUtilities . waitOnFutures ( wait ) ; }
Flush all indexes to disk
8,279
public void indexRow ( ByteBuffer key , ColumnFamily cf , OpOrder . Group opGroup ) { Set < Class < ? extends SecondaryIndex > > appliedRowLevelIndexes = null ; for ( SecondaryIndex index : allIndexes ) { if ( index instanceof PerRowSecondaryIndex ) { if ( appliedRowLevelIndexes == null ) appliedRowLevelIndexes = new HashSet < > ( ) ; if ( appliedRowLevelIndexes . add ( index . getClass ( ) ) ) ( ( PerRowSecondaryIndex ) index ) . index ( key , cf ) ; } else { for ( Cell cell : cf ) if ( cell . isLive ( ) && index . indexes ( cell . name ( ) ) ) ( ( PerColumnSecondaryIndex ) index ) . insert ( key , cell , opGroup ) ; } } }
When building an index against existing data add the given row to the index
8,280
public void deleteFromIndexes ( DecoratedKey key , List < Cell > indexedColumnsInRow , OpOrder . Group opGroup ) { Set < Class < ? extends SecondaryIndex > > cleanedRowLevelIndexes = null ; for ( Cell cell : indexedColumnsInRow ) { for ( SecondaryIndex index : indexFor ( cell . name ( ) ) ) { if ( index instanceof PerRowSecondaryIndex ) { if ( cleanedRowLevelIndexes == null ) cleanedRowLevelIndexes = new HashSet < > ( ) ; if ( cleanedRowLevelIndexes . add ( index . getClass ( ) ) ) ( ( PerRowSecondaryIndex ) index ) . delete ( key , opGroup ) ; } else { ( ( PerColumnSecondaryIndex ) index ) . deleteForCleanup ( key . getKey ( ) , cell , opGroup ) ; } } } }
Delete all columns from all indexes for this row . For when cleanup rips a row out entirely .
8,281
public List < SecondaryIndexSearcher > getIndexSearchersForQuery ( List < IndexExpression > clause ) { Map < String , Set < ByteBuffer > > groupByIndexType = new HashMap < > ( ) ; for ( IndexExpression ix : clause ) { SecondaryIndex index = getIndexForColumn ( ix . column ) ; if ( index == null || ! index . supportsOperator ( ix . operator ) ) continue ; Set < ByteBuffer > columns = groupByIndexType . get ( index . indexTypeForGrouping ( ) ) ; if ( columns == null ) { columns = new HashSet < > ( ) ; groupByIndexType . put ( index . indexTypeForGrouping ( ) , columns ) ; } columns . add ( ix . column ) ; } List < SecondaryIndexSearcher > indexSearchers = new ArrayList < > ( groupByIndexType . size ( ) ) ; for ( Set < ByteBuffer > column : groupByIndexType . values ( ) ) indexSearchers . add ( getIndexForColumn ( column . iterator ( ) . next ( ) ) . createSecondaryIndexSearcher ( column ) ) ; return indexSearchers ; }
Get a list of IndexSearchers from the union of expression index types
8,282
public List < Row > search ( ExtendedFilter filter ) { SecondaryIndexSearcher mostSelective = getHighestSelectivityIndexSearcher ( filter . getClause ( ) ) ; if ( mostSelective == null ) return Collections . emptyList ( ) ; else return mostSelective . search ( filter ) ; }
Performs a search across a number of column indexes
8,283
public void write ( DataOutput out ) throws IOException { out . writeUTF ( startToken ) ; out . writeUTF ( endToken ) ; out . writeInt ( dataNodes . length ) ; for ( String endpoint : dataNodes ) { out . writeUTF ( endpoint ) ; } }
KeyspaceSplits as needed by the Writable interface .
8,284
public void afterExecute ( Runnable r , Throwable t ) { super . afterExecute ( r , t ) ; DebuggableThreadPoolExecutor . logExceptionsAfterExecute ( r , t ) ; }
We need this as well as the wrapper for the benefit of non - repeating tasks
8,285
public long snapshotCreationTime ( String snapshotName ) { for ( File dir : dataPaths ) { File snapshotDir = new File ( dir , join ( SNAPSHOT_SUBDIR , snapshotName ) ) ; if ( snapshotDir . exists ( ) ) return snapshotDir . lastModified ( ) ; } throw new RuntimeException ( "Snapshot " + snapshotName + " doesn't exist" ) ; }
The snapshot must exist
8,286
public static List < File > getKSChildDirectories ( String ksName ) { List < File > result = new ArrayList < > ( ) ; for ( DataDirectory dataDirectory : dataDirectories ) { File ksDir = new File ( dataDirectory . location , ksName ) ; File [ ] cfDirs = ksDir . listFiles ( ) ; if ( cfDirs == null ) continue ; for ( File cfDir : cfDirs ) { if ( cfDir . isDirectory ( ) ) result . add ( cfDir ) ; } } return result ; }
Recursively finds all the sub directories in the KS directory .
8,287
protected static String makeFilename ( File directory , final String keyspace , final String columnFamily ) { final Set < Descriptor > existing = new HashSet < Descriptor > ( ) ; directory . list ( new FilenameFilter ( ) { public boolean accept ( File dir , String name ) { Pair < Descriptor , Component > p = SSTable . tryComponentFromFilename ( dir , name ) ; Descriptor desc = p == null ? null : p . left ; if ( desc == null ) return false ; if ( desc . cfname . equals ( columnFamily ) ) existing . add ( desc ) ; return false ; } } ) ; int maxGen = generation . getAndIncrement ( ) ; for ( Descriptor desc : existing ) { while ( desc . generation > maxGen ) { maxGen = generation . getAndIncrement ( ) ; } } return new Descriptor ( directory , keyspace , columnFamily , maxGen + 1 , Descriptor . Type . TEMP ) . filenameFor ( Component . DATA ) ; }
find available generation and pick up filename from that
8,288
private void setToRowStart ( RowIndexEntry rowEntry , FileDataInput in ) throws IOException { if ( in == null ) { this . file = sstable . getFileDataInput ( rowEntry . position ) ; } else { this . file = in ; in . seek ( rowEntry . position ) ; } sstable . partitioner . decorateKey ( ByteBufferUtil . readWithShortLength ( file ) ) ; }
Sets the seek position to the start of the row for column scanning .
8,289
public void collectMarkerSpecification ( VariableSpecifications boundNames ) { if ( collectionElement != null ) collectionElement . collectMarkerSpecification ( boundNames ) ; if ( operator . equals ( Operator . IN ) && inValues != null ) { for ( Term value : inValues ) value . collectMarkerSpecification ( boundNames ) ; } else { value . collectMarkerSpecification ( boundNames ) ; } }
Collects the column specification for the bind variables of this operation .
8,290
public Socket createConnection ( InetAddress peer ) throws IOException { int attempts = 0 ; while ( true ) { try { Socket socket = OutboundTcpConnectionPool . newSocket ( peer ) ; socket . setSoTimeout ( DatabaseDescriptor . getStreamingSocketTimeout ( ) ) ; socket . setKeepAlive ( true ) ; return socket ; } catch ( IOException e ) { if ( ++ attempts >= MAX_CONNECT_ATTEMPTS ) throw e ; long waitms = DatabaseDescriptor . getRpcTimeout ( ) * ( long ) Math . pow ( 2 , attempts ) ; logger . warn ( "Failed attempt " + attempts + " to connect to " + peer + ". Retrying in " + waitms + " ms. (" + e + ")" ) ; try { Thread . sleep ( waitms ) ; } catch ( InterruptedException wtf ) { throw new IOException ( "interrupted" , wtf ) ; } } } }
Connect to peer and start exchanging message . When connect attempt fails this retries for maximum of MAX_CONNECT_ATTEMPTS times .
8,291
public void init ( ) { byte sizedepth = ( byte ) ( Math . log10 ( maxsize ) / Math . log10 ( 2 ) ) ; byte depth = ( byte ) Math . min ( sizedepth , hashdepth ) ; root = initHelper ( fullRange . left , fullRange . right , ( byte ) 0 , depth ) ; size = ( long ) Math . pow ( 2 , depth ) ; }
Initializes this tree by splitting it until hashdepth is reached or until an additional level of splits would violate maxsize .
8,292
public TreeRange get ( Token t ) { return getHelper ( root , fullRange . left , fullRange . right , ( byte ) 0 , t ) ; }
For testing purposes . Gets the smallest range containing the token .
8,293
public boolean split ( Token t ) { if ( ! ( size < maxsize ) ) return false ; try { root = splitHelper ( root , fullRange . left , fullRange . right , ( byte ) 0 , t ) ; } catch ( StopRecursion . TooDeep e ) { return false ; } return true ; }
Splits the range containing the given token if no tree limits would be violated . If the range would be split to a depth below hashdepth or if the tree already contains maxsize subranges this operation will fail .
8,294
public static void inspectThrowable ( Throwable t ) { boolean isUnstable = false ; if ( t instanceof OutOfMemoryError ) isUnstable = true ; if ( DatabaseDescriptor . getDiskFailurePolicy ( ) == Config . DiskFailurePolicy . die ) if ( t instanceof FSError || t instanceof CorruptSSTableException ) isUnstable = true ; if ( t instanceof FileNotFoundException || t instanceof SocketException ) if ( t . getMessage ( ) . contains ( "Too many open files" ) ) isUnstable = true ; if ( isUnstable ) killer . killCurrentJVM ( t ) ; }
Certain Throwables and Exceptions represent Die conditions for the server .
8,295
private void reduceNameFilter ( QueryFilter filter , ColumnFamily container , long sstableTimestamp ) { if ( container == null ) return ; for ( Iterator < CellName > iterator = ( ( NamesQueryFilter ) filter . filter ) . columns . iterator ( ) ; iterator . hasNext ( ) ; ) { CellName filterColumn = iterator . next ( ) ; Cell cell = container . getColumn ( filterColumn ) ; if ( cell != null && cell . timestamp ( ) > sstableTimestamp ) iterator . remove ( ) ; } }
remove columns from
8,296
public static CellNameType getIndexComparator ( CFMetaData baseMetadata , ColumnDefinition cfDef ) { if ( cfDef . type . isCollection ( ) && cfDef . type . isMultiCell ( ) ) { switch ( ( ( CollectionType ) cfDef . type ) . kind ) { case LIST : return CompositesIndexOnCollectionValue . buildIndexComparator ( baseMetadata , cfDef ) ; case SET : return CompositesIndexOnCollectionKey . buildIndexComparator ( baseMetadata , cfDef ) ; case MAP : return cfDef . hasIndexOption ( SecondaryIndex . INDEX_KEYS_OPTION_NAME ) ? CompositesIndexOnCollectionKey . buildIndexComparator ( baseMetadata , cfDef ) : CompositesIndexOnCollectionValue . buildIndexComparator ( baseMetadata , cfDef ) ; } } switch ( cfDef . kind ) { case CLUSTERING_COLUMN : return CompositesIndexOnClusteringKey . buildIndexComparator ( baseMetadata , cfDef ) ; case REGULAR : return CompositesIndexOnRegular . buildIndexComparator ( baseMetadata , cfDef ) ; case PARTITION_KEY : return CompositesIndexOnPartitionKey . buildIndexComparator ( baseMetadata , cfDef ) ; } throw new AssertionError ( ) ; }
Check SecondaryIndex . getIndexComparator if you want to know why this is static
8,297
private String getLocation ( ) { Collection < InetAddress > localAddresses = FBUtilities . getAllLocalAddresses ( ) ; for ( InetAddress address : localAddresses ) { for ( String location : split . getLocations ( ) ) { InetAddress locationAddress = null ; try { locationAddress = InetAddress . getByName ( location ) ; } catch ( UnknownHostException e ) { throw new AssertionError ( e ) ; } if ( address . equals ( locationAddress ) ) { return location ; } } } return split . getLocations ( ) [ 0 ] ; }
not necessarily on Cassandra machines too . This should be adequate for single - DC clusters at least .
8,298
public static RuntimeException rethrow ( RequestExecutionException e ) throws UnavailableException , TimedOutException { if ( e instanceof RequestTimeoutException ) throw toThrift ( ( RequestTimeoutException ) e ) ; else throw new UnavailableException ( ) ; }
for methods that have a return value .
8,299
OutboundTcpConnection getConnection ( MessageOut msg ) { Stage stage = msg . getStage ( ) ; return stage == Stage . REQUEST_RESPONSE || stage == Stage . INTERNAL_RESPONSE || stage == Stage . GOSSIP ? ackCon : cmdCon ; }
returns the appropriate connection based on message type . returns null if a connection could not be established .