idx
int64
0
165k
question
stringlengths
73
4.15k
target
stringlengths
5
918
len_question
int64
21
890
len_target
int64
3
255
12,100
public LongToken getToken ( ByteBuffer key ) { if ( key . remaining ( ) == 0 ) return MINIMUM ; long [ ] hash = new long [ 2 ] ; MurmurHash . hash3_x64_128 ( key , key . position ( ) , key . remaining ( ) , 0 , hash ) ; return new LongToken ( normalize ( hash [ 0 ] ) ) ; }
Generate the token of a key . Note that we need to ensure all generated token are strictly bigger than MINIMUM . In particular we don t want MINIMUM to correspond to any key because the range ( MINIMUM X ] doesn t include MINIMUM but we use such range to select all data whose token is smaller than X .
84
70
12,101
private static BigInteger bigForString ( String str , int sigchars ) { assert str . length ( ) <= sigchars ; BigInteger big = BigInteger . ZERO ; for ( int i = 0 ; i < str . length ( ) ; i ++ ) { int charpos = 16 * ( sigchars - ( i + 1 ) ) ; BigInteger charbig = BigInteger . valueOf ( str . charAt ( i ) & 0xFFFF ) ; big = big . or ( charbig . shiftLeft ( charpos ) ) ; } return big ; }
Copies the characters of the given string into a BigInteger .
121
13
12,102
private void setupDefaultUser ( ) { try { // insert the default superuser if AUTH_KS.CREDENTIALS_CF is empty. if ( ! hasExistingUsers ( ) ) { process ( String . format ( "INSERT INTO %s.%s (username, salted_hash) VALUES ('%s', '%s') USING TIMESTAMP 0" , Auth . AUTH_KS , CREDENTIALS_CF , DEFAULT_USER_NAME , escape ( hashpw ( DEFAULT_USER_PASSWORD ) ) ) , ConsistencyLevel . ONE ) ; logger . info ( "PasswordAuthenticator created default user '{}'" , DEFAULT_USER_NAME ) ; } } catch ( RequestExecutionException e ) { logger . warn ( "PasswordAuthenticator skipped default user setup: some nodes were not ready" ) ; } }
if there are no users yet - add default superuser .
188
12
12,103
public synchronized void received ( SSTableWriter sstable ) { if ( done ) return ; assert cfId . equals ( sstable . metadata . cfId ) ; sstables . add ( sstable ) ; if ( sstables . size ( ) == totalFiles ) { done = true ; executor . submit ( new OnCompletionRunnable ( this ) ) ; } }
Process received file .
81
4
12,104
< V > void find ( Object [ ] node , Comparator < V > comparator , Object target , Op mode , boolean forwards ) { // TODO : should not require parameter 'forwards' - consider modifying index to represent both // child and key position, as opposed to just key position (which necessitates a different value depending // on which direction you're moving in. Prerequisite for making Path public and using to implement general // search depth = - 1 ; if ( target instanceof BTree . Special ) { if ( target == POSITIVE_INFINITY ) moveEnd ( node , forwards ) ; else if ( target == NEGATIVE_INFINITY ) moveStart ( node , forwards ) ; else throw new AssertionError ( ) ; return ; } while ( true ) { int keyEnd = getKeyEnd ( node ) ; // search for the target in the current node int i = BTree . find ( comparator , target , node , 0 , keyEnd ) ; if ( i >= 0 ) { // exact match. transform exclusive bounds into the correct index by moving back or forwards one push ( node , i ) ; switch ( mode ) { case HIGHER : successor ( ) ; break ; case LOWER : predecessor ( ) ; } return ; } i = - i - 1 ; // traverse into the appropriate child if ( ! isLeaf ( node ) ) { push ( node , forwards ? i - 1 : i ) ; node = ( Object [ ] ) node [ keyEnd + i ] ; continue ; } // bottom of the tree and still not found. pick the right index to satisfy Op switch ( mode ) { case FLOOR : case LOWER : i -- ; } if ( i < 0 ) { push ( node , 0 ) ; predecessor ( ) ; } else if ( i >= keyEnd ) { push ( node , keyEnd - 1 ) ; successor ( ) ; } else { push ( node , i ) ; } return ; } }
Find the provided key in the tree rooted at node and store the root to it in the path
412
19
12,105
void successor ( ) { Object [ ] node = currentNode ( ) ; int i = currentIndex ( ) ; if ( ! isLeaf ( node ) ) { // if we're on a key in a branch, we MUST have a descendant either side of us, // so we always go down the left-most child until we hit a leaf node = ( Object [ ] ) node [ getBranchKeyEnd ( node ) + i + 1 ] ; while ( ! isLeaf ( node ) ) { push ( node , - 1 ) ; node = ( Object [ ] ) node [ getBranchKeyEnd ( node ) ] ; } push ( node , 0 ) ; return ; } // if we haven't reached the end of this leaf, just increment our index and return i += 1 ; if ( i < getLeafKeyEnd ( node ) ) { // moved to the next key in the same leaf setIndex ( i ) ; return ; } // we've reached the end of this leaf, // so go up until we reach something we've not finished visiting while ( ! isRoot ( ) ) { pop ( ) ; i = currentIndex ( ) + 1 ; node = currentNode ( ) ; if ( i < getKeyEnd ( node ) ) { setIndex ( i ) ; return ; } } // we've visited the last key in the root node, so we're done setIndex ( getKeyEnd ( node ) ) ; }
move to the next key in the tree
300
8
12,106
protected Tuple composeComposite ( AbstractCompositeType comparator , ByteBuffer name ) throws IOException { List < CompositeComponent > result = comparator . deconstruct ( name ) ; Tuple t = TupleFactory . getInstance ( ) . newTuple ( result . size ( ) ) ; for ( int i = 0 ; i < result . size ( ) ; i ++ ) setTupleValue ( t , i , cassandraToObj ( result . get ( i ) . comparator , result . get ( i ) . value ) ) ; return t ; }
Deconstructs a composite type to a Tuple .
121
12
12,107
protected Tuple columnToTuple ( Cell col , CfInfo cfInfo , AbstractType comparator ) throws IOException { CfDef cfDef = cfInfo . cfDef ; Tuple pair = TupleFactory . getInstance ( ) . newTuple ( 2 ) ; ByteBuffer colName = col . name ( ) . toByteBuffer ( ) ; // name if ( comparator instanceof AbstractCompositeType ) setTupleValue ( pair , 0 , composeComposite ( ( AbstractCompositeType ) comparator , colName ) ) ; else setTupleValue ( pair , 0 , cassandraToObj ( comparator , colName ) ) ; // value Map < ByteBuffer , AbstractType > validators = getValidatorMap ( cfDef ) ; if ( cfInfo . cql3Table && ! cfInfo . compactCqlTable ) { ByteBuffer [ ] names = ( ( AbstractCompositeType ) parseType ( cfDef . comparator_type ) ) . split ( colName ) ; colName = names [ names . length - 1 ] ; } if ( validators . get ( colName ) == null ) { Map < MarshallerType , AbstractType > marshallers = getDefaultMarshallers ( cfDef ) ; setTupleValue ( pair , 1 , cassandraToObj ( marshallers . get ( MarshallerType . DEFAULT_VALIDATOR ) , col . value ( ) ) ) ; } else setTupleValue ( pair , 1 , cassandraToObj ( validators . get ( colName ) , col . value ( ) ) ) ; return pair ; }
convert a column to a tuple
341
7
12,108
protected CfInfo getCfInfo ( String signature ) throws IOException { UDFContext context = UDFContext . getUDFContext ( ) ; Properties property = context . getUDFProperties ( AbstractCassandraStorage . class ) ; String prop = property . getProperty ( signature ) ; CfInfo cfInfo = new CfInfo ( ) ; cfInfo . cfDef = cfdefFromString ( prop . substring ( 2 ) ) ; cfInfo . compactCqlTable = prop . charAt ( 0 ) == ' ' ? true : false ; cfInfo . cql3Table = prop . charAt ( 1 ) == ' ' ? true : false ; return cfInfo ; }
get the columnfamily definition for the signature
143
8
12,109
protected Map < MarshallerType , AbstractType > getDefaultMarshallers ( CfDef cfDef ) throws IOException { Map < MarshallerType , AbstractType > marshallers = new EnumMap < MarshallerType , AbstractType > ( MarshallerType . class ) ; AbstractType comparator ; AbstractType subcomparator ; AbstractType default_validator ; AbstractType key_validator ; comparator = parseType ( cfDef . getComparator_type ( ) ) ; subcomparator = parseType ( cfDef . getSubcomparator_type ( ) ) ; default_validator = parseType ( cfDef . getDefault_validation_class ( ) ) ; key_validator = parseType ( cfDef . getKey_validation_class ( ) ) ; marshallers . put ( MarshallerType . COMPARATOR , comparator ) ; marshallers . put ( MarshallerType . DEFAULT_VALIDATOR , default_validator ) ; marshallers . put ( MarshallerType . KEY_VALIDATOR , key_validator ) ; marshallers . put ( MarshallerType . SUBCOMPARATOR , subcomparator ) ; return marshallers ; }
construct a map to store the mashaller type to cassandra data type mapping
259
16
12,110
protected Map < ByteBuffer , AbstractType > getValidatorMap ( CfDef cfDef ) throws IOException { Map < ByteBuffer , AbstractType > validators = new HashMap < ByteBuffer , AbstractType > ( ) ; for ( ColumnDef cd : cfDef . getColumn_metadata ( ) ) { if ( cd . getValidation_class ( ) != null && ! cd . getValidation_class ( ) . isEmpty ( ) ) { AbstractType validator = null ; try { validator = TypeParser . parse ( cd . getValidation_class ( ) ) ; if ( validator instanceof CounterColumnType ) validator = LongType . instance ; validators . put ( cd . name , validator ) ; } catch ( ConfigurationException e ) { throw new IOException ( e ) ; } catch ( SyntaxException e ) { throw new IOException ( e ) ; } } } return validators ; }
get the validators
195
4
12,111
protected AbstractType parseType ( String type ) throws IOException { try { // always treat counters like longs, specifically CCT.compose is not what we need if ( type != null && type . equals ( "org.apache.cassandra.db.marshal.CounterColumnType" ) ) return LongType . instance ; return TypeParser . parse ( type ) ; } catch ( ConfigurationException e ) { throw new IOException ( e ) ; } catch ( SyntaxException e ) { throw new IOException ( e ) ; } }
parse the string to a cassandra data type
115
9
12,112
public static Map < String , String > getQueryMap ( String query ) throws UnsupportedEncodingException { String [ ] params = query . split ( "&" ) ; Map < String , String > map = new HashMap < String , String > ( ) ; for ( String param : params ) { String [ ] keyValue = param . split ( "=" ) ; map . put ( keyValue [ 0 ] , URLDecoder . decode ( keyValue [ 1 ] , "UTF-8" ) ) ; } return map ; }
decompose the query to store the parameters in a map
112
12
12,113
protected byte getPigType ( AbstractType type ) { if ( type instanceof LongType || type instanceof DateType || type instanceof TimestampType ) // DateType is bad and it should feel bad return DataType . LONG ; else if ( type instanceof IntegerType || type instanceof Int32Type ) // IntegerType will overflow at 2**31, but is kept for compatibility until pig has a BigInteger return DataType . INTEGER ; else if ( type instanceof AsciiType || type instanceof UTF8Type || type instanceof DecimalType || type instanceof InetAddressType ) return DataType . CHARARRAY ; else if ( type instanceof FloatType ) return DataType . FLOAT ; else if ( type instanceof DoubleType ) return DataType . DOUBLE ; else if ( type instanceof AbstractCompositeType || type instanceof CollectionType ) return DataType . TUPLE ; return DataType . BYTEARRAY ; }
get pig type for the cassandra data type
205
9
12,114
protected ByteBuffer objToBB ( Object o ) { if ( o == null ) return nullToBB ( ) ; if ( o instanceof java . lang . String ) return ByteBuffer . wrap ( new DataByteArray ( ( String ) o ) . get ( ) ) ; if ( o instanceof Integer ) return Int32Type . instance . decompose ( ( Integer ) o ) ; if ( o instanceof Long ) return LongType . instance . decompose ( ( Long ) o ) ; if ( o instanceof Float ) return FloatType . instance . decompose ( ( Float ) o ) ; if ( o instanceof Double ) return DoubleType . instance . decompose ( ( Double ) o ) ; if ( o instanceof UUID ) return ByteBuffer . wrap ( UUIDGen . decompose ( ( UUID ) o ) ) ; if ( o instanceof Tuple ) { List < Object > objects = ( ( Tuple ) o ) . getAll ( ) ; //collections if ( objects . size ( ) > 0 && objects . get ( 0 ) instanceof String ) { String collectionType = ( String ) objects . get ( 0 ) ; if ( "set" . equalsIgnoreCase ( collectionType ) || "list" . equalsIgnoreCase ( collectionType ) ) return objToListOrSetBB ( objects . subList ( 1 , objects . size ( ) ) ) ; else if ( "map" . equalsIgnoreCase ( collectionType ) ) return objToMapBB ( objects . subList ( 1 , objects . size ( ) ) ) ; } return objToCompositeBB ( objects ) ; } return ByteBuffer . wrap ( ( ( DataByteArray ) o ) . get ( ) ) ; }
convert object to ByteBuffer
363
6
12,115
protected void initSchema ( String signature ) throws IOException { Properties properties = UDFContext . getUDFContext ( ) . getUDFProperties ( AbstractCassandraStorage . class ) ; // Only get the schema if we haven't already gotten it if ( ! properties . containsKey ( signature ) ) { try { Cassandra . Client client = ConfigHelper . getClientFromInputAddressList ( conf ) ; client . set_keyspace ( keyspace ) ; if ( username != null && password != null ) { Map < String , String > credentials = new HashMap < String , String > ( 2 ) ; credentials . put ( IAuthenticator . USERNAME_KEY , username ) ; credentials . put ( IAuthenticator . PASSWORD_KEY , password ) ; try { client . login ( new AuthenticationRequest ( credentials ) ) ; } catch ( AuthenticationException e ) { logger . error ( "Authentication exception: invalid username and/or password" ) ; throw new IOException ( e ) ; } catch ( AuthorizationException e ) { throw new AssertionError ( e ) ; // never actually throws AuthorizationException. } } // compose the CfDef for the columfamily CfInfo cfInfo = getCfInfo ( client ) ; if ( cfInfo . cfDef != null ) { StringBuilder sb = new StringBuilder ( ) ; sb . append ( cfInfo . compactCqlTable ? 1 : 0 ) . append ( cfInfo . cql3Table ? 1 : 0 ) . append ( cfdefToString ( cfInfo . cfDef ) ) ; properties . setProperty ( signature , sb . toString ( ) ) ; } else throw new IOException ( String . format ( "Column family '%s' not found in keyspace '%s'" , column_family , keyspace ) ) ; } catch ( Exception e ) { throw new IOException ( e ) ; } } }
Methods to get the column family schema from Cassandra
400
9
12,116
protected static String cfdefToString ( CfDef cfDef ) throws IOException { assert cfDef != null ; // this is so awful it's kind of cool! TSerializer serializer = new TSerializer ( new TBinaryProtocol . Factory ( ) ) ; try { return Hex . bytesToHex ( serializer . serialize ( cfDef ) ) ; } catch ( TException e ) { throw new IOException ( e ) ; } }
convert CfDef to string
95
6
12,117
protected static CfDef cfdefFromString ( String st ) throws IOException { assert st != null ; TDeserializer deserializer = new TDeserializer ( new TBinaryProtocol . Factory ( ) ) ; CfDef cfDef = new CfDef ( ) ; try { deserializer . deserialize ( cfDef , Hex . hexToBytes ( st ) ) ; } catch ( TException e ) { throw new IOException ( e ) ; } return cfDef ; }
convert string back to CfDef
102
7
12,118
protected CfInfo getCfInfo ( Cassandra . Client client ) throws InvalidRequestException , UnavailableException , TimedOutException , SchemaDisagreementException , TException , NotFoundException , org . apache . cassandra . exceptions . InvalidRequestException , ConfigurationException , IOException { // get CF meta data String query = "SELECT type," + " comparator," + " subcomparator," + " default_validator," + " key_validator," + " key_aliases " + "FROM system.schema_columnfamilies " + "WHERE keyspace_name = '%s' " + " AND columnfamily_name = '%s' " ; CqlResult result = client . execute_cql3_query ( ByteBufferUtil . bytes ( String . format ( query , keyspace , column_family ) ) , Compression . NONE , ConsistencyLevel . ONE ) ; if ( result == null || result . rows == null || result . rows . isEmpty ( ) ) return null ; Iterator < CqlRow > iteraRow = result . rows . iterator ( ) ; CfDef cfDef = new CfDef ( ) ; cfDef . keyspace = keyspace ; cfDef . name = column_family ; boolean cql3Table = false ; if ( iteraRow . hasNext ( ) ) { CqlRow cqlRow = iteraRow . next ( ) ; cfDef . column_type = ByteBufferUtil . string ( cqlRow . columns . get ( 0 ) . value ) ; cfDef . comparator_type = ByteBufferUtil . string ( cqlRow . columns . get ( 1 ) . value ) ; ByteBuffer subComparator = cqlRow . columns . get ( 2 ) . value ; if ( subComparator != null ) cfDef . subcomparator_type = ByteBufferUtil . string ( subComparator ) ; cfDef . default_validation_class = ByteBufferUtil . string ( cqlRow . columns . get ( 3 ) . value ) ; cfDef . key_validation_class = ByteBufferUtil . string ( cqlRow . columns . get ( 4 ) . value ) ; String keyAliases = ByteBufferUtil . string ( cqlRow . columns . get ( 5 ) . value ) ; if ( FBUtilities . fromJsonList ( keyAliases ) . size ( ) > 0 ) cql3Table = true ; } cfDef . column_metadata = getColumnMetadata ( client ) ; CfInfo cfInfo = new CfInfo ( ) ; cfInfo . cfDef = cfDef ; if ( cql3Table && ! ( parseType ( cfDef . comparator_type ) instanceof AbstractCompositeType ) ) cfInfo . compactCqlTable = true ; if ( cql3Table ) cfInfo . cql3Table = true ; ; return cfInfo ; }
return the CfInfo for the column family
625
8
12,119
protected List < ColumnDef > getColumnMeta ( Cassandra . Client client , boolean cassandraStorage , boolean includeCompactValueColumn ) throws InvalidRequestException , UnavailableException , TimedOutException , SchemaDisagreementException , TException , CharacterCodingException , org . apache . cassandra . exceptions . InvalidRequestException , ConfigurationException , NotFoundException { String query = "SELECT column_name, " + " validator, " + " index_type, " + " type " + "FROM system.schema_columns " + "WHERE keyspace_name = '%s' " + " AND columnfamily_name = '%s'" ; CqlResult result = client . execute_cql3_query ( ByteBufferUtil . bytes ( String . format ( query , keyspace , column_family ) ) , Compression . NONE , ConsistencyLevel . ONE ) ; List < CqlRow > rows = result . rows ; List < ColumnDef > columnDefs = new ArrayList < ColumnDef > ( ) ; if ( rows == null || rows . isEmpty ( ) ) { // if CassandraStorage, just return the empty list if ( cassandraStorage ) return columnDefs ; // otherwise for CqlNativeStorage, check metadata for classic thrift tables CFMetaData cfm = getCFMetaData ( keyspace , column_family , client ) ; for ( ColumnDefinition def : cfm . regularAndStaticColumns ( ) ) { ColumnDef cDef = new ColumnDef ( ) ; String columnName = def . name . toString ( ) ; String type = def . type . toString ( ) ; logger . debug ( "name: {}, type: {} " , columnName , type ) ; cDef . name = ByteBufferUtil . bytes ( columnName ) ; cDef . validation_class = type ; columnDefs . add ( cDef ) ; } // we may not need to include the value column for compact tables as we // could have already processed it as schema_columnfamilies.value_alias if ( columnDefs . size ( ) == 0 && includeCompactValueColumn && cfm . compactValueColumn ( ) != null ) { ColumnDefinition def = cfm . compactValueColumn ( ) ; if ( "value" . equals ( def . name . toString ( ) ) ) { ColumnDef cDef = new ColumnDef ( ) ; cDef . name = def . name . bytes ; cDef . validation_class = def . type . toString ( ) ; columnDefs . add ( cDef ) ; } } return columnDefs ; } Iterator < CqlRow > iterator = rows . iterator ( ) ; while ( iterator . hasNext ( ) ) { CqlRow row = iterator . next ( ) ; ColumnDef cDef = new ColumnDef ( ) ; String type = ByteBufferUtil . string ( row . getColumns ( ) . get ( 3 ) . value ) ; if ( ! type . equals ( "regular" ) ) continue ; cDef . setName ( ByteBufferUtil . clone ( row . getColumns ( ) . get ( 0 ) . value ) ) ; cDef . validation_class = ByteBufferUtil . string ( row . getColumns ( ) . get ( 1 ) . value ) ; ByteBuffer indexType = row . getColumns ( ) . get ( 2 ) . value ; if ( indexType != null ) cDef . index_type = getIndexType ( ByteBufferUtil . string ( indexType ) ) ; columnDefs . add ( cDef ) ; } return columnDefs ; }
get column meta data
768
4
12,120
protected IndexType getIndexType ( String type ) { type = type . toLowerCase ( ) ; if ( "keys" . equals ( type ) ) return IndexType . KEYS ; else if ( "custom" . equals ( type ) ) return IndexType . CUSTOM ; else if ( "composites" . equals ( type ) ) return IndexType . COMPOSITES ; else return null ; }
get index type from string
87
5
12,121
public String [ ] getPartitionKeys ( String location , Job job ) throws IOException { if ( ! usePartitionFilter ) return null ; List < ColumnDef > indexes = getIndexes ( ) ; String [ ] partitionKeys = new String [ indexes . size ( ) ] ; for ( int i = 0 ; i < indexes . size ( ) ; i ++ ) { partitionKeys [ i ] = new String ( indexes . get ( i ) . getName ( ) ) ; } return partitionKeys ; }
return partition keys
106
3
12,122
protected List < ColumnDef > getIndexes ( ) throws IOException { CfDef cfdef = getCfInfo ( loadSignature ) . cfDef ; List < ColumnDef > indexes = new ArrayList < ColumnDef > ( ) ; for ( ColumnDef cdef : cfdef . column_metadata ) { if ( cdef . index_type != null ) indexes . add ( cdef ) ; } return indexes ; }
get a list of columns with defined index
89
8
12,123
protected CFMetaData getCFMetaData ( String ks , String cf , Cassandra . Client client ) throws NotFoundException , InvalidRequestException , TException , org . apache . cassandra . exceptions . InvalidRequestException , ConfigurationException { KsDef ksDef = client . describe_keyspace ( ks ) ; for ( CfDef cfDef : ksDef . cf_defs ) { if ( cfDef . name . equalsIgnoreCase ( cf ) ) return CFMetaData . fromThrift ( cfDef ) ; } return null ; }
get CFMetaData of a column family
118
8
12,124
public void commit ( ) { Log . info ( "Committing" ) ; try { indexWriter . commit ( ) ; } catch ( IOException e ) { Log . error ( e , "Error while committing" ) ; throw new RuntimeException ( e ) ; } }
Commits the pending changes .
56
6
12,125
public void close ( ) { Log . info ( "Closing index" ) ; try { Log . info ( "Closing" ) ; searcherReopener . interrupt ( ) ; searcherManager . close ( ) ; indexWriter . close ( ) ; directory . close ( ) ; } catch ( IOException e ) { Log . error ( e , "Error while closing index" ) ; throw new RuntimeException ( e ) ; } }
Commits all changes to the index waits for pending merges to complete and closes all associated resources .
92
20
12,126
public void optimize ( ) { Log . debug ( "Optimizing index" ) ; try { indexWriter . forceMerge ( 1 , true ) ; indexWriter . commit ( ) ; } catch ( IOException e ) { Log . error ( e , "Error while optimizing index" ) ; throw new RuntimeException ( e ) ; } }
Optimizes the index forcing merge segments leaving one single segment . This operation blocks until all merging completes .
71
21
12,127
private long beforeAppend ( DecoratedKey decoratedKey ) { assert decoratedKey != null : "Keys must not be null" ; // empty keys ARE allowed b/c of indexed column values if ( lastWrittenKey != null && lastWrittenKey . compareTo ( decoratedKey ) >= 0 ) throw new RuntimeException ( "Last written key " + lastWrittenKey + " >= current key " + decoratedKey + " writing into " + getFilename ( ) ) ; return ( lastWrittenKey == null ) ? 0 : dataFile . getFilePointer ( ) ; }
Perform sanity checks on
118
5
12,128
public void abort ( ) { assert descriptor . type . isTemporary ; if ( iwriter == null && dataFile == null ) return ; if ( iwriter != null ) iwriter . abort ( ) ; if ( dataFile != null ) dataFile . abort ( ) ; Set < Component > components = SSTable . componentsFor ( descriptor ) ; try { if ( ! components . isEmpty ( ) ) SSTable . delete ( descriptor , components ) ; } catch ( FSWriteError e ) { logger . error ( String . format ( "Failed deleting temp components for %s" , descriptor ) , e ) ; throw e ; } }
After failure attempt to close the index writer and data file before deleting all temp components for the sstable
135
20
12,129
public void reset ( Object [ ] btree , boolean forwards ) { _reset ( btree , null , NEGATIVE_INFINITY , false , POSITIVE_INFINITY , false , forwards ) ; }
Reset this cursor for the provided tree to iterate over its entire range
46
15
12,130
public Pair < Long , Long > addAllWithSizeDelta ( final ColumnFamily cm , MemtableAllocator allocator , OpOrder . Group writeOp , Updater indexer ) { ColumnUpdater updater = new ColumnUpdater ( this , cm . metadata , allocator , writeOp , indexer ) ; DeletionInfo inputDeletionInfoCopy = null ; boolean monitorOwned = false ; try { if ( usePessimisticLocking ( ) ) { Locks . monitorEnterUnsafe ( this ) ; monitorOwned = true ; } while ( true ) { Holder current = ref ; updater . ref = current ; updater . reset ( ) ; DeletionInfo deletionInfo ; if ( cm . deletionInfo ( ) . mayModify ( current . deletionInfo ) ) { if ( inputDeletionInfoCopy == null ) inputDeletionInfoCopy = cm . deletionInfo ( ) . copy ( HeapAllocator . instance ) ; deletionInfo = current . deletionInfo . copy ( ) . add ( inputDeletionInfoCopy ) ; updater . allocated ( deletionInfo . unsharedHeapSize ( ) - current . deletionInfo . unsharedHeapSize ( ) ) ; } else { deletionInfo = current . deletionInfo ; } Object [ ] tree = BTree . update ( current . tree , metadata . comparator . columnComparator ( Memtable . MEMORY_POOL instanceof NativePool ) , cm , cm . getColumnCount ( ) , true , updater ) ; if ( tree != null && refUpdater . compareAndSet ( this , current , new Holder ( tree , deletionInfo ) ) ) { indexer . updateRowLevelIndexes ( ) ; updater . finish ( ) ; return Pair . create ( updater . dataSize , updater . colUpdateTimeDelta ) ; } else if ( ! monitorOwned ) { boolean shouldLock = usePessimisticLocking ( ) ; if ( ! shouldLock ) { shouldLock = updateWastedAllocationTracker ( updater . heapSize ) ; } if ( shouldLock ) { Locks . monitorEnterUnsafe ( this ) ; monitorOwned = true ; } } } } finally { if ( monitorOwned ) Locks . monitorExitUnsafe ( this ) ; } }
This is only called by Memtable . resolve so only AtomicBTreeColumns needs to implement it .
489
21
12,131
private boolean updateWastedAllocationTracker ( long wastedBytes ) { // Early check for huge allocation that exceeds the limit if ( wastedBytes < EXCESS_WASTE_BYTES ) { // We round up to ensure work < granularity are still accounted for int wastedAllocation = ( ( int ) ( wastedBytes + ALLOCATION_GRANULARITY_BYTES - 1 ) ) / ALLOCATION_GRANULARITY_BYTES ; int oldTrackerValue ; while ( TRACKER_PESSIMISTIC_LOCKING != ( oldTrackerValue = wasteTracker ) ) { // Note this time value has an arbitrary offset, but is a constant rate 32 bit counter (that may wrap) int time = ( int ) ( System . nanoTime ( ) >>> CLOCK_SHIFT ) ; int delta = oldTrackerValue - time ; if ( oldTrackerValue == TRACKER_NEVER_WASTED || delta >= 0 || delta < - EXCESS_WASTE_OFFSET ) delta = - EXCESS_WASTE_OFFSET ; delta += wastedAllocation ; if ( delta >= 0 ) break ; if ( wasteTrackerUpdater . compareAndSet ( this , oldTrackerValue , avoidReservedValues ( time + delta ) ) ) return false ; } } // We have definitely reached our waste limit so set the state if it isn't already wasteTrackerUpdater . set ( this , TRACKER_PESSIMISTIC_LOCKING ) ; // And tell the caller to proceed with pessimistic locking return true ; }
Update the wasted allocation tracker state based on newly wasted allocation information
334
12
12,132
public ByteBuffer getElement ( ByteBuffer serializedList , int index ) { try { ByteBuffer input = serializedList . duplicate ( ) ; int n = readCollectionSize ( input , Server . VERSION_3 ) ; if ( n <= index ) return null ; for ( int i = 0 ; i < index ; i ++ ) { int length = input . getInt ( ) ; input . position ( input . position ( ) + length ) ; } return readValue ( input , Server . VERSION_3 ) ; } catch ( BufferUnderflowException e ) { throw new MarshalException ( "Not enough bytes to read a list" ) ; } }
Returns the element at the given index in a list .
137
11
12,133
private void releaseReferences ( ) { for ( SSTableReader sstable : sstables ) { sstable . selfRef ( ) . release ( ) ; assert sstable . selfRef ( ) . globalCount ( ) == 0 ; } }
releases the shared reference for all sstables we acquire this when opening the sstable
51
18
12,134
public TimeCounter start ( ) { switch ( state ) { case UNSTARTED : watch . start ( ) ; break ; case RUNNING : throw new IllegalStateException ( "Already started. " ) ; case STOPPED : watch . resume ( ) ; } state = State . RUNNING ; return this ; }
Starts or resumes the time count .
66
8
12,135
public TimeCounter stop ( ) { switch ( state ) { case UNSTARTED : throw new IllegalStateException ( "Not started. " ) ; case STOPPED : throw new IllegalStateException ( "Already stopped. " ) ; case RUNNING : watch . suspend ( ) ; } state = State . STOPPED ; return this ; }
Stops or suspends the time count .
72
9
12,136
public static List < TriggerDefinition > fromSchema ( Row serializedTriggers ) { List < TriggerDefinition > triggers = new ArrayList <> ( ) ; String query = String . format ( "SELECT * FROM %s.%s" , Keyspace . SYSTEM_KS , SystemKeyspace . SCHEMA_TRIGGERS_CF ) ; for ( UntypedResultSet . Row row : QueryProcessor . resultify ( query , serializedTriggers ) ) { String name = row . getString ( TRIGGER_NAME ) ; String classOption = row . getMap ( TRIGGER_OPTIONS , UTF8Type . instance , UTF8Type . instance ) . get ( CLASS ) ; triggers . add ( new TriggerDefinition ( name , classOption ) ) ; } return triggers ; }
Deserialize triggers from storage - level representation .
172
10
12,137
public void toSchema ( Mutation mutation , String cfName , long timestamp ) { ColumnFamily cf = mutation . addOrGet ( SystemKeyspace . SCHEMA_TRIGGERS_CF ) ; CFMetaData cfm = CFMetaData . SchemaTriggersCf ; Composite prefix = cfm . comparator . make ( cfName , name ) ; CFRowAdder adder = new CFRowAdder ( cf , prefix , timestamp ) ; adder . addMapEntry ( TRIGGER_OPTIONS , CLASS , classOption ) ; }
Add specified trigger to the schema using given mutation .
120
10
12,138
public void deleteFromSchema ( Mutation mutation , String cfName , long timestamp ) { ColumnFamily cf = mutation . addOrGet ( SystemKeyspace . SCHEMA_TRIGGERS_CF ) ; int ldt = ( int ) ( System . currentTimeMillis ( ) / 1000 ) ; Composite prefix = CFMetaData . SchemaTriggersCf . comparator . make ( cfName , name ) ; cf . addAtom ( new RangeTombstone ( prefix , prefix . end ( ) , timestamp , ldt ) ) ; }
Drop specified trigger from the schema using given mutation .
118
10
12,139
void clear ( ) { NodeBuilder current = this ; while ( current != null && current . upperBound != null ) { current . clearSelf ( ) ; current = current . child ; } current = parent ; while ( current != null && current . upperBound != null ) { current . clearSelf ( ) ; current = current . parent ; } }
ensure we aren t referencing any garbage
72
8
12,140
NodeBuilder update ( Object key ) { assert copyFrom != null ; int copyFromKeyEnd = getKeyEnd ( copyFrom ) ; int i = copyFromKeyPosition ; boolean found ; // exact key match? boolean owns = true ; // true iff this node (or a child) should contain the key if ( i == copyFromKeyEnd ) { found = false ; } else { // this optimisation is for the common scenario of updating an existing row with the same columns/keys // and simply avoids performing a binary search until we've checked the proceeding key; // possibly we should disable this check if we determine that it fails more than a handful of times // during any given builder use to get the best of both worlds int c = - comparator . compare ( key , copyFrom [ i ] ) ; if ( c >= 0 ) { found = c == 0 ; } else { i = find ( comparator , key , copyFrom , i + 1 , copyFromKeyEnd ) ; found = i >= 0 ; if ( ! found ) i = - i - 1 ; } } if ( found ) { Object prev = copyFrom [ i ] ; Object next = updateFunction . apply ( prev , key ) ; // we aren't actually replacing anything, so leave our state intact and continue if ( prev == next ) return null ; key = next ; } else if ( i == copyFromKeyEnd && compare ( comparator , key , upperBound ) >= 0 ) owns = false ; if ( isLeaf ( copyFrom ) ) { if ( owns ) { // copy keys from the original node up to prior to the found index copyKeys ( i ) ; if ( found ) { // if found, we've applied updateFunction already replaceNextKey ( key ) ; } else { // if not found, we need to apply updateFunction still key = updateFunction . apply ( key ) ; addNewKey ( key ) ; // handles splitting parent if necessary via ensureRoom } // done, so return null return null ; } else { // we don't want to copy anything if we're ascending and haven't copied anything previously, // as in this case we can return the original node. Leaving buildKeyPosition as 0 indicates // to buildFromRange that it should return the original instead of building a new node if ( buildKeyPosition > 0 ) copyKeys ( i ) ; } // if we don't own it, all we need to do is ensure we've copied everything in this node // (which we have done, since not owning means pos >= keyEnd), ascend, and let Modifier.update // retry against the parent node. The if/ascend after the else branch takes care of that. } else { // branch if ( found ) { copyKeys ( i ) ; replaceNextKey ( key ) ; copyChildren ( i + 1 ) ; return null ; } else if ( owns ) { copyKeys ( i ) ; copyChildren ( i ) ; // belongs to the range owned by this node, but not equal to any key in the node // so descend into the owning child Object newUpperBound = i < copyFromKeyEnd ? copyFrom [ i ] : upperBound ; Object [ ] descendInto = ( Object [ ] ) copyFrom [ copyFromKeyEnd + i ] ; ensureChild ( ) . reset ( descendInto , newUpperBound , updateFunction , comparator ) ; return child ; } else if ( buildKeyPosition > 0 || buildChildPosition > 0 ) { // ensure we've copied all keys and children, but only if we've already copied something. // otherwise we want to return the original node copyKeys ( copyFromKeyEnd ) ; copyChildren ( copyFromKeyEnd + 1 ) ; // since we know that there are exactly 1 more child nodes, than keys } } return ascend ( ) ; }
Inserts or replaces the provided key copying all not - yet - visited keys prior to it into our buffer .
796
22
12,141
NodeBuilder ascendToRoot ( ) { NodeBuilder current = this ; while ( ! current . isRoot ( ) ) current = current . ascend ( ) ; return current ; }
where we work only on the newest child node which may construct many spill - over parents as it goes
36
20
12,142
Object [ ] toNode ( ) { assert buildKeyPosition <= FAN_FACTOR && ( buildKeyPosition > 0 || copyFrom . length > 0 ) : buildKeyPosition ; return buildFromRange ( 0 , buildKeyPosition , isLeaf ( copyFrom ) , false ) ; }
builds a new root BTree node - must be called on root of operation
62
16
12,143
private NodeBuilder ascend ( ) { ensureParent ( ) ; boolean isLeaf = isLeaf ( copyFrom ) ; if ( buildKeyPosition > FAN_FACTOR ) { // split current node and move the midpoint into parent, with the two halves as children int mid = buildKeyPosition / 2 ; parent . addExtraChild ( buildFromRange ( 0 , mid , isLeaf , true ) , buildKeys [ mid ] ) ; parent . finishChild ( buildFromRange ( mid + 1 , buildKeyPosition - ( mid + 1 ) , isLeaf , false ) ) ; } else { parent . finishChild ( buildFromRange ( 0 , buildKeyPosition , isLeaf , false ) ) ; } return parent ; }
finish up this level and pass any constructed children up to our parent ensuring a parent exists
156
18
12,144
void addNewKey ( Object key ) { ensureRoom ( buildKeyPosition + 1 ) ; buildKeys [ buildKeyPosition ++ ] = updateFunction . apply ( key ) ; }
puts the provided key in the builder with no impact on treatment of data from copyf
37
18
12,145
private void addExtraChild ( Object [ ] child , Object upperBound ) { ensureRoom ( buildKeyPosition + 1 ) ; buildKeys [ buildKeyPosition ++ ] = upperBound ; buildChildren [ buildChildPosition ++ ] = child ; }
adds a new and unexpected child to the builder - called by children that overflow
50
16
12,146
private void ensureRoom ( int nextBuildKeyPosition ) { if ( nextBuildKeyPosition < MAX_KEYS ) return ; // flush even number of items so we don't waste leaf space repeatedly Object [ ] flushUp = buildFromRange ( 0 , FAN_FACTOR , isLeaf ( copyFrom ) , true ) ; ensureParent ( ) . addExtraChild ( flushUp , buildKeys [ FAN_FACTOR ] ) ; int size = FAN_FACTOR + 1 ; assert size <= buildKeyPosition : buildKeyPosition + "," + nextBuildKeyPosition ; System . arraycopy ( buildKeys , size , buildKeys , 0 , buildKeyPosition - size ) ; buildKeyPosition -= size ; maxBuildKeyPosition = buildKeys . length ; if ( buildChildPosition > 0 ) { System . arraycopy ( buildChildren , size , buildChildren , 0 , buildChildPosition - size ) ; buildChildPosition -= size ; } }
checks if we can add the requested keys + children to the builder and if not we spill - over into our parent
202
23
12,147
private Object [ ] buildFromRange ( int offset , int keyLength , boolean isLeaf , boolean isExtra ) { // if keyLength is 0, we didn't copy anything from the original, which means we didn't // modify any of the range owned by it, so can simply return it as is if ( keyLength == 0 ) return copyFrom ; Object [ ] a ; if ( isLeaf ) { a = new Object [ keyLength + ( keyLength & 1 ) ] ; System . arraycopy ( buildKeys , offset , a , 0 , keyLength ) ; } else { a = new Object [ 1 + ( keyLength * 2 ) ] ; System . arraycopy ( buildKeys , offset , a , 0 , keyLength ) ; System . arraycopy ( buildChildren , offset , a , keyLength , keyLength + 1 ) ; } if ( isExtra ) updateFunction . allocated ( ObjectSizes . sizeOfArray ( a ) ) ; else if ( a . length != copyFrom . length ) updateFunction . allocated ( ObjectSizes . sizeOfArray ( a ) - ( copyFrom . length == 0 ? 0 : ObjectSizes . sizeOfArray ( copyFrom ) ) ) ; return a ; }
builds and returns a node from the buffered objects in the given range
256
15
12,148
private NodeBuilder ensureParent ( ) { if ( parent == null ) { parent = new NodeBuilder ( ) ; parent . child = this ; } if ( parent . upperBound == null ) parent . reset ( EMPTY_BRANCH , upperBound , updateFunction , comparator ) ; return parent ; }
already be initialised and only aren t in the case where we are overflowing the original root node
64
20
12,149
private List < Pair < Long , Long > > getTransferSections ( CompressionMetadata . Chunk [ ] chunks ) { List < Pair < Long , Long > > transferSections = new ArrayList <> ( ) ; Pair < Long , Long > lastSection = null ; for ( CompressionMetadata . Chunk chunk : chunks ) { if ( lastSection != null ) { if ( chunk . offset == lastSection . right ) { // extend previous section to end of this chunk lastSection = Pair . create ( lastSection . left , chunk . offset + chunk . length + 4 ) ; // 4 bytes for CRC } else { transferSections . add ( lastSection ) ; lastSection = Pair . create ( chunk . offset , chunk . offset + chunk . length + 4 ) ; } } else { lastSection = Pair . create ( chunk . offset , chunk . offset + chunk . length + 4 ) ; } } if ( lastSection != null ) transferSections . add ( lastSection ) ; return transferSections ; }
chunks are assumed to be sorted by offset
221
9
12,150
@ Override public CellName copy ( CFMetaData cfm , AbstractAllocator allocator ) { return new SimpleDenseCellName ( allocator . clone ( element ) ) ; }
we might want to try to do better .
40
9
12,151
private long getNow ( ) { return Collections . max ( cfs . getSSTables ( ) , new Comparator < SSTableReader > ( ) { public int compare ( SSTableReader o1 , SSTableReader o2 ) { return Long . compare ( o1 . getMaxTimestamp ( ) , o2 . getMaxTimestamp ( ) ) ; } } ) . getMaxTimestamp ( ) ; }
Gets the timestamp that DateTieredCompactionStrategy considers to be the current time .
91
20
12,152
@ VisibleForTesting static Iterable < SSTableReader > filterOldSSTables ( List < SSTableReader > sstables , long maxSSTableAge , long now ) { if ( maxSSTableAge == 0 ) return sstables ; final long cutoff = now - maxSSTableAge ; return Iterables . filter ( sstables , new Predicate < SSTableReader > ( ) { @ Override public boolean apply ( SSTableReader sstable ) { return sstable . getMaxTimestamp ( ) >= cutoff ; } } ) ; }
Removes all sstables with max timestamp older than maxSSTableAge .
124
17
12,153
@ VisibleForTesting static < T > List < List < T > > getBuckets ( Collection < Pair < T , Long > > files , long timeUnit , int base , long now ) { // Sort files by age. Newest first. final List < Pair < T , Long > > sortedFiles = Lists . newArrayList ( files ) ; Collections . sort ( sortedFiles , Collections . reverseOrder ( new Comparator < Pair < T , Long > > ( ) { public int compare ( Pair < T , Long > p1 , Pair < T , Long > p2 ) { return p1 . right . compareTo ( p2 . right ) ; } } ) ) ; List < List < T > > buckets = Lists . newArrayList ( ) ; Target target = getInitialTarget ( now , timeUnit ) ; PeekingIterator < Pair < T , Long > > it = Iterators . peekingIterator ( sortedFiles . iterator ( ) ) ; outerLoop : while ( it . hasNext ( ) ) { while ( ! target . onTarget ( it . peek ( ) . right ) ) { // If the file is too new for the target, skip it. if ( target . compareToTimestamp ( it . peek ( ) . right ) < 0 ) { it . next ( ) ; if ( ! it . hasNext ( ) ) break outerLoop ; } else // If the file is too old for the target, switch targets. target = target . nextTarget ( base ) ; } List < T > bucket = Lists . newArrayList ( ) ; while ( target . onTarget ( it . peek ( ) . right ) ) { bucket . add ( it . next ( ) . left ) ; if ( ! it . hasNext ( ) ) break ; } buckets . add ( bucket ) ; } return buckets ; }
Group files with similar min timestamp into buckets . Files with recent min timestamps are grouped together into buckets designated to short timespans while files with older timestamps are grouped into buckets representing longer timespans .
384
44
12,154
@ Override public void write ( Object key , List < ByteBuffer > values ) throws IOException { prepareWriter ( ) ; try { ( ( CQLSSTableWriter ) writer ) . rawAddRow ( values ) ; if ( null != progress ) progress . progress ( ) ; if ( null != context ) HadoopCompat . progress ( context ) ; } catch ( InvalidRequestException e ) { throw new IOException ( "Error adding row with key: " + key , e ) ; } }
The column values must correspond to the order in which they appear in the insert stored procedure .
105
18
12,155
private static long discard ( ByteBuf buffer , long remainingToDiscard ) { int availableToDiscard = ( int ) Math . min ( remainingToDiscard , buffer . readableBytes ( ) ) ; buffer . skipBytes ( availableToDiscard ) ; return remainingToDiscard - availableToDiscard ; }
How much remains to be discarded
66
6
12,156
public static boolean delete ( Descriptor desc , Set < Component > components ) { // remove the DATA component first if it exists if ( components . contains ( Component . DATA ) ) FileUtils . deleteWithConfirm ( desc . filenameFor ( Component . DATA ) ) ; for ( Component component : components ) { if ( component . equals ( Component . DATA ) || component . equals ( Component . SUMMARY ) ) continue ; FileUtils . deleteWithConfirm ( desc . filenameFor ( component ) ) ; } FileUtils . delete ( desc . filenameFor ( Component . SUMMARY ) ) ; logger . debug ( "Deleted {}" , desc ) ; return true ; }
We use a ReferenceQueue to manage deleting files that have been compacted and for which no more SSTable references exist . But this is not guaranteed to run for each such file because of the semantics of the JVM gc . So we write a marker to compactedFilename when a file is compacted ; if such a marker exists on startup the file should be removed .
144
76
12,157
public static DecoratedKey getMinimalKey ( DecoratedKey key ) { return key . getKey ( ) . position ( ) > 0 || key . getKey ( ) . hasRemaining ( ) || ! key . getKey ( ) . hasArray ( ) ? new BufferDecoratedKey ( key . getToken ( ) , HeapAllocator . instance . clone ( key . getKey ( ) ) ) : key ; }
If the given
94
3
12,158
protected static Set < Component > readTOC ( Descriptor descriptor ) throws IOException { File tocFile = new File ( descriptor . filenameFor ( Component . TOC ) ) ; List < String > componentNames = Files . readLines ( tocFile , Charset . defaultCharset ( ) ) ; Set < Component > components = Sets . newHashSetWithExpectedSize ( componentNames . size ( ) ) ; for ( String componentName : componentNames ) { Component component = new Component ( Component . Type . fromRepresentation ( componentName ) , componentName ) ; if ( ! new File ( descriptor . filenameFor ( component ) ) . exists ( ) ) logger . error ( "Missing component: {}" , descriptor . filenameFor ( component ) ) ; else components . add ( component ) ; } return components ; }
Reads the list of components from the TOC component .
176
12
12,159
protected static void appendTOC ( Descriptor descriptor , Collection < Component > components ) { File tocFile = new File ( descriptor . filenameFor ( Component . TOC ) ) ; PrintWriter w = null ; try { w = new PrintWriter ( new FileWriter ( tocFile , true ) ) ; for ( Component component : components ) w . println ( component . name ) ; } catch ( IOException e ) { throw new FSWriteError ( e , tocFile ) ; } finally { FileUtils . closeQuietly ( w ) ; } }
Appends new component names to the TOC component .
119
11
12,160
public synchronized void addComponents ( Collection < Component > newComponents ) { Collection < Component > componentsToAdd = Collections2 . filter ( newComponents , Predicates . not ( Predicates . in ( components ) ) ) ; appendTOC ( descriptor , componentsToAdd ) ; components . addAll ( componentsToAdd ) ; }
Registers new custom components . Used by custom compaction strategies . Adding a component for the second time is a no - op . Don t remove this - this method is a part of the public API intended for use by custom compaction strategies .
70
49
12,161
@ Override public void serialize ( Map < MetadataType , MetadataComponent > components , DataOutputPlus out ) throws IOException { ValidationMetadata validation = ( ValidationMetadata ) components . get ( MetadataType . VALIDATION ) ; StatsMetadata stats = ( StatsMetadata ) components . get ( MetadataType . STATS ) ; CompactionMetadata compaction = ( CompactionMetadata ) components . get ( MetadataType . COMPACTION ) ; assert validation != null && stats != null && compaction != null && validation . partitioner != null ; EstimatedHistogram . serializer . serialize ( stats . estimatedRowSize , out ) ; EstimatedHistogram . serializer . serialize ( stats . estimatedColumnCount , out ) ; ReplayPosition . serializer . serialize ( stats . replayPosition , out ) ; out . writeLong ( stats . minTimestamp ) ; out . writeLong ( stats . maxTimestamp ) ; out . writeInt ( stats . maxLocalDeletionTime ) ; out . writeDouble ( validation . bloomFilterFPChance ) ; out . writeDouble ( stats . compressionRatio ) ; out . writeUTF ( validation . partitioner ) ; out . writeInt ( compaction . ancestors . size ( ) ) ; for ( Integer g : compaction . ancestors ) out . writeInt ( g ) ; StreamingHistogram . serializer . serialize ( stats . estimatedTombstoneDropTime , out ) ; out . writeInt ( stats . sstableLevel ) ; out . writeInt ( stats . minColumnNames . size ( ) ) ; for ( ByteBuffer columnName : stats . minColumnNames ) ByteBufferUtil . writeWithShortLength ( columnName , out ) ; out . writeInt ( stats . maxColumnNames . size ( ) ) ; for ( ByteBuffer columnName : stats . maxColumnNames ) ByteBufferUtil . writeWithShortLength ( columnName , out ) ; }
Legacy serialization is only used for SSTable level reset .
409
14
12,162
@ Override public Map < MetadataType , MetadataComponent > deserialize ( Descriptor descriptor , EnumSet < MetadataType > types ) throws IOException { Map < MetadataType , MetadataComponent > components = Maps . newHashMap ( ) ; File statsFile = new File ( descriptor . filenameFor ( Component . STATS ) ) ; if ( ! statsFile . exists ( ) && types . contains ( MetadataType . STATS ) ) { components . put ( MetadataType . STATS , MetadataCollector . defaultStatsMetadata ( ) ) ; } else { try ( DataInputStream in = new DataInputStream ( new BufferedInputStream ( new FileInputStream ( statsFile ) ) ) ) { EstimatedHistogram rowSizes = EstimatedHistogram . serializer . deserialize ( in ) ; EstimatedHistogram columnCounts = EstimatedHistogram . serializer . deserialize ( in ) ; ReplayPosition replayPosition = ReplayPosition . serializer . deserialize ( in ) ; long minTimestamp = in . readLong ( ) ; long maxTimestamp = in . readLong ( ) ; int maxLocalDeletionTime = in . readInt ( ) ; double bloomFilterFPChance = in . readDouble ( ) ; double compressionRatio = in . readDouble ( ) ; String partitioner = in . readUTF ( ) ; int nbAncestors = in . readInt ( ) ; Set < Integer > ancestors = new HashSet <> ( nbAncestors ) ; for ( int i = 0 ; i < nbAncestors ; i ++ ) ancestors . ( in . readInt ( ) ) ; StreamingHistogram tombstoneHistogram = StreamingHistogram . serializer . deserialize ( in ) ; int sstableLevel = 0 ; if ( in . available ( ) > 0 ) sstableLevel = in . readInt ( ) ; int colCount = in . readInt ( ) ; List < ByteBuffer > minColumnNames = new ArrayList <> ( colCount ) ; for ( int i = 0 ; i < colCount ; i ++ ) minColumnNames . ( ByteBufferUtil . readWithShortLength ( in ) ) ; colCount = in . readInt ( ) ; List < ByteBuffer > maxColumnNames = new ArrayList <> ( colCount ) ; for ( int i = 0 ; i < colCount ; i ++ ) maxColumnNames . ( ByteBufferUtil . readWithShortLength ( in ) ) ; if ( types . contains ( MetadataType . VALIDATION ) ) components . put ( MetadataType . VALIDATION , new ValidationMetadata ( partitioner , bloomFilterFPChance ) ) ; if ( types . contains ( MetadataType . STATS ) ) components . put ( MetadataType . STATS , new StatsMetadata ( rowSizes , columnCounts , replayPosition , minTimestamp , maxTimestamp , maxLocalDeletionTime , compressionRatio , tombstoneHistogram , sstableLevel , minColumnNames , maxColumnNames , true , ActiveRepairService . UNREPAIRED_SSTABLE ) ) ; if ( types . contains ( MetadataType . COMPACTION ) ) components . put ( MetadataType . COMPACTION , new CompactionMetadata ( ancestors , null ) ) ; } } return components ; }
Legacy serializer deserialize all components no matter what types are specified .
706
16
12,163
public void initiate ( ) throws IOException { logger . debug ( "[Stream #{}] Sending stream init for incoming stream" , session . planId ( ) ) ; Socket incomingSocket = session . createConnection ( ) ; incoming . start ( incomingSocket , StreamMessage . CURRENT_VERSION ) ; incoming . sendInitMessage ( incomingSocket , true ) ; logger . debug ( "[Stream #{}] Sending stream init for outgoing stream" , session . planId ( ) ) ; Socket outgoingSocket = session . createConnection ( ) ; outgoing . start ( outgoingSocket , StreamMessage . CURRENT_VERSION ) ; outgoing . sendInitMessage ( outgoingSocket , false ) ; }
Set up incoming message handler and initiate streaming .
140
9
12,164
public void initiateOnReceivingSide ( Socket socket , boolean isForOutgoing , int version ) throws IOException { if ( isForOutgoing ) outgoing . start ( socket , version ) ; else incoming . start ( socket , version ) ; }
Set up outgoing message handler on receiving side .
52
9
12,165
private void setNextSamplePosition ( long position ) { tryAgain : while ( true ) { position += minIndexInterval ; long test = indexIntervalMatches ++ ; for ( int start : startPoints ) if ( ( test - start ) % BASE_SAMPLING_LEVEL == 0 ) continue tryAgain ; nextSamplePosition = position ; return ; } }
calculate the next key we will store to our summary
77
12
12,166
public IndexSummary build ( IPartitioner partitioner , ReadableBoundary boundary ) { assert entries . length ( ) > 0 ; int count = ( int ) ( offsets . length ( ) / 4 ) ; long entriesLength = entries . length ( ) ; if ( boundary != null ) { count = boundary . summaryCount ; entriesLength = boundary . entriesLength ; } int sizeAtFullSampling = ( int ) Math . ceil ( keysWritten / ( double ) minIndexInterval ) ; assert count > 0 ; return new IndexSummary ( partitioner , offsets . currentBuffer ( ) . sharedCopy ( ) , count , entries . currentBuffer ( ) . sharedCopy ( ) , entriesLength , sizeAtFullSampling , minIndexInterval , samplingLevel ) ; }
multiple invocations of this build method
162
7
12,167
public static IndexSummary downsample ( IndexSummary existing , int newSamplingLevel , int minIndexInterval , IPartitioner partitioner ) { // To downsample the old index summary, we'll go through (potentially) several rounds of downsampling. // Conceptually, each round starts at position X and then removes every Nth item. The value of X follows // a particular pattern to evenly space out the items that we remove. The value of N decreases by one each // round. int currentSamplingLevel = existing . getSamplingLevel ( ) ; assert currentSamplingLevel > newSamplingLevel ; assert minIndexInterval == existing . getMinIndexInterval ( ) ; // calculate starting indexes for downsampling rounds int [ ] startPoints = Downsampling . getStartPoints ( currentSamplingLevel , newSamplingLevel ) ; // calculate new off-heap size int newKeyCount = existing . size ( ) ; long newEntriesLength = existing . getEntriesLength ( ) ; for ( int start : startPoints ) { for ( int j = start ; j < existing . size ( ) ; j += currentSamplingLevel ) { newKeyCount -- ; long length = existing . getEndInSummary ( j ) - existing . getPositionInSummary ( j ) ; newEntriesLength -= length ; } } Memory oldEntries = existing . getEntries ( ) ; Memory newOffsets = Memory . allocate ( newKeyCount * 4 ) ; Memory newEntries = Memory . allocate ( newEntriesLength ) ; // Copy old entries to our new Memory. int i = 0 ; int newEntriesOffset = 0 ; outer : for ( int oldSummaryIndex = 0 ; oldSummaryIndex < existing . size ( ) ; oldSummaryIndex ++ ) { // to determine if we can skip this entry, go through the starting points for our downsampling rounds // and see if the entry's index is covered by that round for ( int start : startPoints ) { if ( ( oldSummaryIndex - start ) % currentSamplingLevel == 0 ) continue outer ; } // write the position of the actual entry in the index summary (4 bytes) newOffsets . setInt ( i * 4 , newEntriesOffset ) ; i ++ ; long start = existing . getPositionInSummary ( oldSummaryIndex ) ; long length = existing . getEndInSummary ( oldSummaryIndex ) - start ; newEntries . put ( newEntriesOffset , oldEntries , start , length ) ; newEntriesOffset += length ; } assert newEntriesOffset == newEntriesLength ; return new IndexSummary ( partitioner , newOffsets , newKeyCount , newEntries , newEntriesLength , existing . getMaxNumberOfEntries ( ) , minIndexInterval , newSamplingLevel ) ; }
Downsamples an existing index summary to a new sampling level .
594
13
12,168
public void update ( double p , long m ) { Long mi = bin . get ( p ) ; if ( mi != null ) { // we found the same p so increment that counter bin . put ( p , mi + m ) ; } else { bin . put ( p , m ) ; // if bin size exceeds maximum bin size then trim down to max size while ( bin . size ( ) > maxBinSize ) { // find points p1, p2 which have smallest difference Iterator < Double > keys = bin . keySet ( ) . iterator ( ) ; double p1 = keys . next ( ) ; double p2 = keys . next ( ) ; double smallestDiff = p2 - p1 ; double q1 = p1 , q2 = p2 ; while ( keys . hasNext ( ) ) { p1 = p2 ; p2 = keys . next ( ) ; double diff = p2 - p1 ; if ( diff < smallestDiff ) { smallestDiff = diff ; q1 = p1 ; q2 = p2 ; } } // merge those two long k1 = bin . remove ( q1 ) ; long k2 = bin . remove ( q2 ) ; bin . put ( ( q1 * k1 + q2 * k2 ) / ( k1 + k2 ) , k1 + k2 ) ; } } }
Adds new point p with value m to this histogram .
287
12
12,169
public RateLimiter getRateLimiter ( ) { double currentThroughput = DatabaseDescriptor . getCompactionThroughputMbPerSec ( ) * 1024.0 * 1024.0 ; // if throughput is set to 0, throttling is disabled if ( currentThroughput == 0 || StorageService . instance . isBootstrapMode ( ) ) currentThroughput = Double . MAX_VALUE ; if ( compactionRateLimiter . getRate ( ) != currentThroughput ) compactionRateLimiter . setRate ( currentThroughput ) ; return compactionRateLimiter ; }
Gets compaction rate limiter . When compaction_throughput_mb_per_sec is 0 or node is bootstrapping this returns rate limiter with the rate of Double . MAX_VALUE bytes per second . Rate unit is bytes per sec .
122
53
12,170
private SSTableReader lookupSSTable ( final ColumnFamilyStore cfs , Descriptor descriptor ) { for ( SSTableReader sstable : cfs . getSSTables ( ) ) { if ( sstable . descriptor . equals ( descriptor ) ) return sstable ; } return null ; }
This is not efficient do not use in any critical path
64
11
12,171
public Future < Object > submitValidation ( final ColumnFamilyStore cfStore , final Validator validator ) { Callable < Object > callable = new Callable < Object > ( ) { public Object call ( ) throws IOException { try { doValidationCompaction ( cfStore , validator ) ; } catch ( Throwable e ) { // we need to inform the remote end of our failure, otherwise it will hang on repair forever validator . fail ( ) ; throw e ; } return this ; } } ; return validationExecutor . submit ( callable ) ; }
Does not mutate data so is not scheduled .
120
10
12,172
static boolean needsCleanup ( SSTableReader sstable , Collection < Range < Token > > ownedRanges ) { assert ! ownedRanges . isEmpty ( ) ; // cleanup checks for this // unwrap and sort the ranges by LHS token List < Range < Token > > sortedRanges = Range . normalize ( ownedRanges ) ; // see if there are any keys LTE the token for the start of the first range // (token range ownership is exclusive on the LHS.) Range < Token > firstRange = sortedRanges . get ( 0 ) ; if ( sstable . first . getToken ( ) . compareTo ( firstRange . left ) <= 0 ) return true ; // then, iterate over all owned ranges and see if the next key beyond the end of the owned // range falls before the start of the next range for ( int i = 0 ; i < sortedRanges . size ( ) ; i ++ ) { Range < Token > range = sortedRanges . get ( i ) ; if ( range . right . isMinimum ( ) ) { // we split a wrapping range and this is the second half. // there can't be any keys beyond this (and this is the last range) return false ; } DecoratedKey firstBeyondRange = sstable . firstKeyBeyond ( range . right . maxKeyBound ( ) ) ; if ( firstBeyondRange == null ) { // we ran off the end of the sstable looking for the next key; we don't need to check any more ranges return false ; } if ( i == ( sortedRanges . size ( ) - 1 ) ) { // we're at the last range and we found a key beyond the end of the range return true ; } Range < Token > nextRange = sortedRanges . get ( i + 1 ) ; if ( ! nextRange . contains ( firstBeyondRange . getToken ( ) ) ) { // we found a key in between the owned ranges return true ; } } return false ; }
Determines if a cleanup would actually remove any data in this SSTable based on a set of owned ranges .
416
24
12,173
public Future < ? > submitIndexBuild ( final SecondaryIndexBuilder builder ) { Runnable runnable = new Runnable ( ) { public void run ( ) { metrics . beginCompaction ( builder ) ; try { builder . build ( ) ; } finally { metrics . finishCompaction ( builder ) ; } } } ; if ( executor . isShutdown ( ) ) { logger . info ( "Compaction executor has shut down, not submitting index build" ) ; return null ; } return executor . submit ( runnable ) ; }
Is not scheduled because it is performing disjoint work from sstable compaction .
117
17
12,174
public void interruptCompactionFor ( Iterable < CFMetaData > columnFamilies , boolean interruptValidation ) { assert columnFamilies != null ; // interrupt in-progress compactions for ( Holder compactionHolder : CompactionMetrics . getCompactions ( ) ) { CompactionInfo info = compactionHolder . getCompactionInfo ( ) ; if ( ( info . getTaskType ( ) == OperationType . VALIDATION ) && ! interruptValidation ) continue ; if ( Iterables . contains ( columnFamilies , info . getCFMetaData ( ) ) ) compactionHolder . stop ( ) ; // signal compaction to stop } }
Try to stop all of the compactions for given ColumnFamilies .
142
15
12,175
private void fastAddAll ( ArrayBackedSortedColumns other ) { if ( other . isInsertReversed ( ) == isInsertReversed ( ) ) { cells = Arrays . copyOf ( other . cells , other . cells . length ) ; size = other . size ; sortedSize = other . sortedSize ; isSorted = other . isSorted ; } else { if ( cells . length < other . getColumnCount ( ) ) cells = new Cell [ Math . max ( MINIMAL_CAPACITY , other . getColumnCount ( ) ) ] ; Iterator < Cell > iterator = reversed ? other . reverseIterator ( ) : other . iterator ( ) ; while ( iterator . hasNext ( ) ) cells [ size ++ ] = iterator . next ( ) ; sortedSize = size ; isSorted = true ; } }
Fast path when this ABSC is empty .
180
9
12,176
private void internalRemove ( int index ) { int moving = size - index - 1 ; if ( moving > 0 ) System . arraycopy ( cells , index + 1 , cells , index , moving ) ; cells [ -- size ] = null ; }
Remove the cell at a given index shifting the rest of the array to the left if needed . Please note that we mostly remove from the end so the shifting should be rare .
51
35
12,177
private void reconcileWith ( int i , Cell cell ) { cells [ i ] = cell . reconcile ( cells [ i ] ) ; }
Reconcile with a cell at position i . Assume that i is a valid position .
28
20
12,178
private Region getRegion ( ) { while ( true ) { // Try to get the region Region region = currentRegion . get ( ) ; if ( region != null ) return region ; // No current region, so we want to allocate one. We race // against other allocators to CAS in a Region, and if we fail we stash the region for re-use region = RACE_ALLOCATED . poll ( ) ; if ( region == null ) region = new Region ( allocateOnHeapOnly ? ByteBuffer . allocate ( REGION_SIZE ) : ByteBuffer . allocateDirect ( REGION_SIZE ) ) ; if ( currentRegion . compareAndSet ( null , region ) ) { if ( ! allocateOnHeapOnly ) offHeapRegions . add ( region ) ; regionCount . incrementAndGet ( ) ; logger . trace ( "{} regions now allocated in {}" , regionCount , this ) ; return region ; } // someone else won race - that's fine, we'll try to grab theirs // in the next iteration of the loop. RACE_ALLOCATED . add ( region ) ; } }
Get the current region or if there is no current region allocate a new one
234
15
12,179
public static void skipIndex ( DataInput in ) throws IOException { /* read only the column index list */ int columnIndexSize = in . readInt ( ) ; /* skip the column index data */ if ( in instanceof FileDataInput ) { FileUtils . skipBytesFully ( in , columnIndexSize ) ; } else { // skip bytes byte [ ] skip = new byte [ columnIndexSize ] ; in . readFully ( skip ) ; } }
Skip the index
97
3
12,180
public static List < IndexInfo > deserializeIndex ( FileDataInput in , CType type ) throws IOException { int columnIndexSize = in . readInt ( ) ; if ( columnIndexSize == 0 ) return Collections . < IndexInfo > emptyList ( ) ; ArrayList < IndexInfo > indexList = new ArrayList < IndexInfo > ( ) ; FileMark mark = in . mark ( ) ; ISerializer < IndexInfo > serializer = type . indexSerializer ( ) ; while ( in . bytesPastMark ( mark ) < columnIndexSize ) { indexList . add ( serializer . deserialize ( in ) ) ; } assert in . bytesPastMark ( mark ) == columnIndexSize ; return indexList ; }
Deserialize the index into a structure and return it
156
11
12,181
public Timer newTimer ( String opType , int sampleCount ) { final Timer timer = new Timer ( sampleCount ) ; if ( ! timers . containsKey ( opType ) ) timers . put ( opType , new ArrayList < Timer > ( ) ) ; timers . get ( opType ) . add ( timer ) ; return timer ; }
build a new timer and add it to the set of running timers .
75
14
12,182
@ Deprecated public void close ( org . apache . hadoop . mapred . Reporter reporter ) throws IOException { close ( ) ; }
Fills the deprecated RecordWriter interface for streaming .
30
10
12,183
static SelectStatement forSelection ( CFMetaData cfm , Selection selection ) { return new SelectStatement ( cfm , 0 , defaultParameters , selection , null ) ; }
queried data through processColumnFamily .
36
8
12,184
private boolean selectACollection ( ) { if ( ! cfm . comparator . hasCollections ( ) ) return false ; for ( ColumnDefinition def : selection . getColumns ( ) ) { if ( def . type . isCollection ( ) && def . type . isMultiCell ( ) ) return true ; } return false ; }
Returns true if a non - frozen collection is selected false otherwise .
71
13
12,185
private static Composite addEOC ( Composite composite , Bound eocBound ) { return eocBound == Bound . END ? composite . end ( ) : composite . start ( ) ; }
Adds an EOC to the specified Composite .
38
9
12,186
private static void addValue ( CBuilder builder , ColumnDefinition def , ByteBuffer value ) throws InvalidRequestException { if ( value == null ) throw new InvalidRequestException ( String . format ( "Invalid null value in condition for column %s" , def . name ) ) ; builder . add ( value ) ; }
Adds the specified value to the specified builder
66
8
12,187
void processColumnFamily ( ByteBuffer key , ColumnFamily cf , QueryOptions options , long now , Selection . ResultSetBuilder result ) throws InvalidRequestException { CFMetaData cfm = cf . metadata ( ) ; ByteBuffer [ ] keyComponents = null ; if ( cfm . getKeyValidator ( ) instanceof CompositeType ) { keyComponents = ( ( CompositeType ) cfm . getKeyValidator ( ) ) . split ( key ) ; } else { keyComponents = new ByteBuffer [ ] { key } ; } Iterator < Cell > cells = cf . getSortedColumns ( ) . iterator ( ) ; if ( sliceRestriction != null ) cells = applySliceRestriction ( cells , options ) ; CQL3Row . RowIterator iter = cfm . comparator . CQL3RowBuilder ( cfm , now ) . group ( cells ) ; // If there is static columns but there is no non-static row, then provided the select was a full // partition selection (i.e. not a 2ndary index search and there was no condition on clustering columns) // then we want to include the static columns in the result set (and we're done). CQL3Row staticRow = iter . getStaticRow ( ) ; if ( staticRow != null && ! iter . hasNext ( ) && ! usesSecondaryIndexing && hasNoClusteringColumnsRestriction ( ) ) { result . newRow ( ) ; for ( ColumnDefinition def : selection . getColumns ( ) ) { switch ( def . kind ) { case PARTITION_KEY : result . add ( keyComponents [ def . position ( ) ] ) ; break ; case STATIC : addValue ( result , def , staticRow , options ) ; break ; default : result . add ( ( ByteBuffer ) null ) ; } } return ; } while ( iter . hasNext ( ) ) { CQL3Row cql3Row = iter . next ( ) ; // Respect requested order result . newRow ( ) ; // Respect selection order for ( ColumnDefinition def : selection . getColumns ( ) ) { switch ( def . kind ) { case PARTITION_KEY : result . add ( keyComponents [ def . position ( ) ] ) ; break ; case CLUSTERING_COLUMN : result . add ( cql3Row . getClusteringColumn ( def . position ( ) ) ) ; break ; case COMPACT_VALUE : result . add ( cql3Row . getColumn ( null ) ) ; break ; case REGULAR : addValue ( result , def , cql3Row , options ) ; break ; case STATIC : addValue ( result , def , staticRow , options ) ; break ; } } } }
Used by ModificationStatement for CAS operations
582
8
12,188
private boolean isRestrictedByMultipleContains ( ColumnDefinition columnDef ) { if ( ! columnDef . type . isCollection ( ) ) return false ; Restriction restriction = metadataRestrictions . get ( columnDef . name ) ; if ( ! ( restriction instanceof Contains ) ) return false ; Contains contains = ( Contains ) restriction ; return ( contains . numberOfValues ( ) + contains . numberOfKeys ( ) ) > 1 ; }
Checks if the specified column is restricted by multiple contains or contains key .
92
15
12,189
synchronized void requestReport ( CountDownLatch signal ) { if ( finalReport != null ) { report = finalReport ; finalReport = new TimingInterval ( 0 ) ; signal . countDown ( ) ; } else reportRequest = signal ; }
checks to see if the timer is dead ; if not requests a report and otherwise fulfills the request itself
54
21
12,190
public synchronized void close ( ) { if ( reportRequest == null ) finalReport = buildReport ( ) ; else { finalReport = new TimingInterval ( 0 ) ; report = buildReport ( ) ; reportRequest . countDown ( ) ; reportRequest = null ; } }
closes the timer ; if a request is outstanding it furnishes the request otherwise it populates finalReport
58
21
12,191
public void write ( WritableByteChannel channel ) throws IOException { long totalSize = totalSize ( ) ; RandomAccessReader file = sstable . openDataReader ( ) ; ChecksumValidator validator = new File ( sstable . descriptor . filenameFor ( Component . CRC ) ) . exists ( ) ? DataIntegrityMetadata . checksumValidator ( sstable . descriptor ) : null ; transferBuffer = validator == null ? new byte [ DEFAULT_CHUNK_SIZE ] : new byte [ validator . chunkSize ] ; // setting up data compression stream compressedOutput = new LZFOutputStream ( Channels . newOutputStream ( channel ) ) ; long progress = 0L ; try { // stream each of the required sections of the file for ( Pair < Long , Long > section : sections ) { long start = validator == null ? section . left : validator . chunkStart ( section . left ) ; int readOffset = ( int ) ( section . left - start ) ; // seek to the beginning of the section file . seek ( start ) ; if ( validator != null ) validator . seek ( start ) ; // length of the section to read long length = section . right - start ; // tracks write progress long bytesRead = 0 ; while ( bytesRead < length ) { long lastBytesRead = write ( file , validator , readOffset , length , bytesRead ) ; bytesRead += lastBytesRead ; progress += ( lastBytesRead - readOffset ) ; session . progress ( sstable . descriptor , ProgressInfo . Direction . OUT , progress , totalSize ) ; readOffset = 0 ; } // make sure that current section is send compressedOutput . flush ( ) ; } } finally { // no matter what happens close file FileUtils . closeQuietly ( file ) ; FileUtils . closeQuietly ( validator ) ; } }
Stream file of specified sections to given channel .
396
9
12,192
protected long write ( RandomAccessReader reader , ChecksumValidator validator , int start , long length , long bytesTransferred ) throws IOException { int toTransfer = ( int ) Math . min ( transferBuffer . length , length - bytesTransferred ) ; int minReadable = ( int ) Math . min ( transferBuffer . length , reader . length ( ) - reader . getFilePointer ( ) ) ; reader . readFully ( transferBuffer , 0 , minReadable ) ; if ( validator != null ) validator . validate ( transferBuffer , 0 , minReadable ) ; limiter . acquire ( toTransfer - start ) ; compressedOutput . write ( transferBuffer , start , ( toTransfer - start ) ) ; return toTransfer ; }
Sequentially read bytes from the file and write them to the output stream
159
14
12,193
public static long sizeOnHeapOf ( ByteBuffer [ ] array ) { long allElementsSize = 0 ; for ( int i = 0 ; i < array . length ; i ++ ) if ( array [ i ] != null ) allElementsSize += sizeOnHeapOf ( array [ i ] ) ; return allElementsSize + sizeOfArray ( array ) ; }
Memory a ByteBuffer array consumes .
80
7
12,194
public static long sizeOnHeapOf ( ByteBuffer buffer ) { if ( buffer . isDirect ( ) ) return BUFFER_EMPTY_SIZE ; // if we're only referencing a sub-portion of the ByteBuffer, don't count the array overhead (assume it's slab // allocated, so amortized over all the allocations the overhead is negligible and better to undercount than over) if ( buffer . capacity ( ) > buffer . remaining ( ) ) return buffer . remaining ( ) ; return BUFFER_EMPTY_SIZE + sizeOfArray ( buffer . capacity ( ) , 1 ) ; }
Memory a byte buffer consumes
126
5
12,195
public static DebuggableThreadPoolExecutor createWithMaximumPoolSize ( String threadPoolName , int size , int keepAliveTime , TimeUnit unit ) { return new DebuggableThreadPoolExecutor ( size , Integer . MAX_VALUE , keepAliveTime , unit , new LinkedBlockingQueue < Runnable > ( ) , new NamedThreadFactory ( threadPoolName ) ) ; }
Returns a ThreadPoolExecutor with a fixed maximum number of threads but whose threads are terminated when idle for too long . When all threads are actively executing tasks new tasks are queued .
85
37
12,196
@ Override public void execute ( Runnable command ) { super . execute ( isTracing ( ) && ! ( command instanceof TraceSessionWrapper ) ? new TraceSessionWrapper < Object > ( Executors . callable ( command , null ) ) : command ) ; }
execute does not call newTaskFor
60
7
12,197
private void executeSet ( Tree statement ) throws TException , InvalidRequestException , UnavailableException , TimedOutException { if ( ! CliMain . isConnected ( ) || ! hasKeySpace ( ) ) return ; long startTime = System . nanoTime ( ) ; // ^(NODE_COLUMN_ACCESS <cf> <key> <column>) Tree columnFamilySpec = statement . getChild ( 0 ) ; Tree keyTree = columnFamilySpec . getChild ( 1 ) ; // could be a function or regular text String columnFamily = CliCompiler . getColumnFamily ( columnFamilySpec , currentCfDefs ( ) ) ; CfDef cfDef = getCfDef ( columnFamily ) ; int columnSpecCnt = CliCompiler . numColumnSpecifiers ( columnFamilySpec ) ; String value = CliUtils . unescapeSQLString ( statement . getChild ( 1 ) . getText ( ) ) ; Tree valueTree = statement . getChild ( 1 ) ; byte [ ] superColumnName = null ; ByteBuffer columnName ; // keyspace.cf['key'] if ( columnSpecCnt == 0 ) { sessionState . err . println ( "No cell name specified, (type 'help;' or '?' for help on syntax)." ) ; return ; } // keyspace.cf['key']['column'] = 'value' else if ( columnSpecCnt == 1 ) { // get the column name if ( cfDef . column_type . equals ( "Super" ) ) { sessionState . out . println ( "Column family " + columnFamily + " may only contain SuperColumns" ) ; return ; } columnName = getColumnName ( columnFamily , columnFamilySpec . getChild ( 2 ) ) ; } // keyspace.cf['key']['super_column']['column'] = 'value' else { assert ( columnSpecCnt == 2 ) : "serious parsing error (this is a bug)." ; superColumnName = getColumnName ( columnFamily , columnFamilySpec . getChild ( 2 ) ) . array ( ) ; columnName = getSubColumnName ( columnFamily , columnFamilySpec . getChild ( 3 ) ) ; } ByteBuffer columnValueInBytes ; switch ( valueTree . getType ( ) ) { case CliParser . FUNCTION_CALL : columnValueInBytes = convertValueByFunction ( valueTree , cfDef , columnName , true ) ; break ; default : columnValueInBytes = columnValueAsBytes ( columnName , columnFamily , value ) ; } ColumnParent parent = new ColumnParent ( columnFamily ) ; if ( superColumnName != null ) parent . setSuper_column ( superColumnName ) ; Column columnToInsert = new Column ( columnName ) . setValue ( columnValueInBytes ) . setTimestamp ( FBUtilities . timestampMicros ( ) ) ; // children count = 3 mean that we have ttl in arguments if ( statement . getChildCount ( ) == 3 ) { String ttl = statement . getChild ( 2 ) . getText ( ) ; try { columnToInsert . setTtl ( Integer . parseInt ( ttl ) ) ; } catch ( NumberFormatException e ) { sessionState . err . println ( String . format ( "TTL '%s' is invalid, should be a positive integer." , ttl ) ) ; return ; } catch ( Exception e ) { throw new RuntimeException ( e ) ; } } // do the insert thriftClient . insert ( getKeyAsBytes ( columnFamily , keyTree ) , parent , columnToInsert , consistencyLevel ) ; sessionState . out . println ( "Value inserted." ) ; elapsedTime ( startTime ) ; }
Execute SET statement
792
4
12,198
private void executeIncr ( Tree statement , long multiplier ) throws TException , NotFoundException , InvalidRequestException , UnavailableException , TimedOutException { if ( ! CliMain . isConnected ( ) || ! hasKeySpace ( ) ) return ; Tree columnFamilySpec = statement . getChild ( 0 ) ; String columnFamily = CliCompiler . getColumnFamily ( columnFamilySpec , currentCfDefs ( ) ) ; ByteBuffer key = getKeyAsBytes ( columnFamily , columnFamilySpec . getChild ( 1 ) ) ; int columnSpecCnt = CliCompiler . numColumnSpecifiers ( columnFamilySpec ) ; byte [ ] superColumnName = null ; ByteBuffer columnName ; // keyspace.cf['key']['column'] -- incr standard if ( columnSpecCnt == 1 ) { columnName = getColumnName ( columnFamily , columnFamilySpec . getChild ( 2 ) ) ; } // keyspace.cf['key']['column']['column'] -- incr super else if ( columnSpecCnt == 2 ) { superColumnName = getColumnName ( columnFamily , columnFamilySpec . getChild ( 2 ) ) . array ( ) ; columnName = getSubColumnName ( columnFamily , columnFamilySpec . getChild ( 3 ) ) ; } // The parser groks an arbitrary number of these so it is possible to get here. else { sessionState . out . println ( "Invalid row, super column, or column specification." ) ; return ; } ColumnParent parent = new ColumnParent ( columnFamily ) ; if ( superColumnName != null ) parent . setSuper_column ( superColumnName ) ; long value = 1L ; // children count = 3 mean that we have by in arguments if ( statement . getChildCount ( ) == 2 ) { String byValue = statement . getChild ( 1 ) . getText ( ) ; try { value = Long . parseLong ( byValue ) ; } catch ( NumberFormatException e ) { sessionState . err . println ( String . format ( "'%s' is an invalid value, should be an integer." , byValue ) ) ; return ; } catch ( Exception e ) { throw new RuntimeException ( e ) ; } } CounterColumn columnToInsert = new CounterColumn ( columnName , multiplier * value ) ; // do the insert thriftClient . add ( key , parent , columnToInsert , consistencyLevel ) ; sessionState . out . printf ( "Value %s%n" , multiplier < 0 ? "decremented." : "incremented." ) ; }
Execute INCR statement
547
5
12,199
private void executeAddKeySpace ( Tree statement ) { if ( ! CliMain . isConnected ( ) ) return ; // first value is the keyspace name, after that it is all key=value String keyspaceName = CliUtils . unescapeSQLString ( statement . getChild ( 0 ) . getText ( ) ) ; KsDef ksDef = new KsDef ( keyspaceName , DEFAULT_PLACEMENT_STRATEGY , new LinkedList < CfDef > ( ) ) ; try { String mySchemaVersion = thriftClient . system_add_keyspace ( updateKsDefAttributes ( statement , ksDef ) ) ; sessionState . out . println ( mySchemaVersion ) ; keyspacesMap . put ( keyspaceName , thriftClient . describe_keyspace ( keyspaceName ) ) ; } catch ( InvalidRequestException e ) { throw new RuntimeException ( e ) ; } catch ( Exception e ) { throw new RuntimeException ( e ) ; } }
Add a keyspace
216
4