idx
int64
0
41.2k
question
stringlengths
83
4.15k
target
stringlengths
5
715
8,300
public void execute ( Runnable task ) { tasksInProcess . incrementAndGet ( ) ; try { super . execute ( task ) ; } catch ( RuntimeException e ) { tasksInProcess . decrementAndGet ( ) ; throw e ; } catch ( Error e ) { tasksInProcess . decrementAndGet ( ) ; throw e ; } }
Before calling super s version of this method the amount of tasks which are currently in process is first incremented .
8,301
public static int getEffectiveIndexIntervalAfterIndex ( int index , int samplingLevel , int minIndexInterval ) { assert index >= 0 ; index %= samplingLevel ; List < Integer > originalIndexes = getOriginalIndexes ( samplingLevel ) ; int nextEntryOriginalIndex = ( index == originalIndexes . size ( ) - 1 ) ? BASE_SAMPLING_LEVEL : originalIndexes . get ( index + 1 ) ; return ( nextEntryOriginalIndex - originalIndexes . get ( index ) ) * minIndexInterval ; }
Calculates the effective index interval after the entry at index in an IndexSummary . In other words this returns the number of partitions in the primary on - disk index before the next partition that has an entry in the index summary . If samplingLevel == BASE_SAMPLING_LEVEL this will be equal to the index interval .
8,302
Allocation allocate ( Mutation mutation , int size ) { final OpOrder . Group opGroup = appendOrder . start ( ) ; try { int position = allocate ( size ) ; if ( position < 0 ) { opGroup . close ( ) ; return null ; } markDirty ( mutation , position ) ; return new Allocation ( this , opGroup , position , ( ByteBuffer ) buffer . duplicate ( ) . position ( position ) . limit ( position + size ) ) ; } catch ( Throwable t ) { opGroup . close ( ) ; throw t ; } }
Allocate space in this buffer for the provided mutation and return the allocated Allocation object . Returns null if there is not enough space in this segment and a new segment is needed .
8,303
private int allocate ( int size ) { while ( true ) { int prev = allocatePosition . get ( ) ; int next = prev + size ; if ( next >= buffer . capacity ( ) ) return - 1 ; if ( allocatePosition . compareAndSet ( prev , next ) ) return prev ; } }
allocate bytes in the segment or return - 1 if not enough space
8,304
void discardUnusedTail ( ) { try ( OpOrder . Group group = appendOrder . start ( ) ) { while ( true ) { int prev = allocatePosition . get ( ) ; int next = buffer . capacity ( ) + 1 ; if ( prev == next ) return ; if ( allocatePosition . compareAndSet ( prev , next ) ) { discardedTailFrom = prev ; return ; } } } }
ensures no more of this segment is writeable by allocating any unused section at the end and marking it discarded
8,305
synchronized void sync ( ) { try { if ( allocatePosition . get ( ) <= lastSyncedOffset + SYNC_MARKER_SIZE ) return ; int nextMarker ; nextMarker = allocate ( SYNC_MARKER_SIZE ) ; boolean close = false ; if ( nextMarker < 0 ) { discardUnusedTail ( ) ; close = true ; waitForModifications ( ) ; if ( discardedTailFrom < buffer . capacity ( ) - SYNC_MARKER_SIZE ) { nextMarker = discardedTailFrom ; } else { nextMarker = buffer . capacity ( ) ; } } else { waitForModifications ( ) ; } assert nextMarker > lastSyncedOffset ; int offset = lastSyncedOffset ; final PureJavaCrc32 crc = new PureJavaCrc32 ( ) ; crc . updateInt ( ( int ) ( id & 0xFFFFFFFFL ) ) ; crc . updateInt ( ( int ) ( id >>> 32 ) ) ; crc . updateInt ( offset ) ; buffer . putInt ( offset , nextMarker ) ; buffer . putInt ( offset + 4 , crc . getCrc ( ) ) ; if ( nextMarker < buffer . capacity ( ) ) { buffer . putInt ( nextMarker , 0 ) ; buffer . putInt ( nextMarker + 4 , 0 ) ; } buffer . force ( ) ; if ( close ) nextMarker = buffer . capacity ( ) ; lastSyncedOffset = nextMarker ; syncComplete . signalAll ( ) ; CLibrary . trySkipCache ( fd , offset , nextMarker ) ; if ( close ) internalClose ( ) ; } catch ( Exception e ) { throw new FSWriteError ( e , getPath ( ) ) ; } }
Forces a disk flush for this segment file .
8,306
CommitLogSegment recycle ( ) { try { sync ( ) ; } catch ( FSWriteError e ) { logger . error ( "I/O error flushing {} {}" , this , e . getMessage ( ) ) ; throw e ; } close ( ) ; return new CommitLogSegment ( getPath ( ) ) ; }
Recycle processes an unneeded segment file for reuse .
8,307
public synchronized void markClean ( UUID cfId , ReplayPosition context ) { if ( ! cfDirty . containsKey ( cfId ) ) return ; if ( context . segment == id ) markClean ( cfId , context . position ) ; else if ( context . segment > id ) markClean ( cfId , Integer . MAX_VALUE ) ; }
Marks the ColumnFamily specified by cfId as clean for this log segment . If the given context argument is contained in this file it will only mark the CF as clean if no newer writes have taken place .
8,308
public String dirtyString ( ) { StringBuilder sb = new StringBuilder ( ) ; for ( UUID cfId : getDirtyCFIDs ( ) ) { CFMetaData m = Schema . instance . getCFMetaData ( cfId ) ; sb . append ( m == null ? "<deleted>" : m . cfName ) . append ( " (" ) . append ( cfId ) . append ( "), " ) ; } return sb . toString ( ) ; }
For debugging not fast
8,309
public StreamPlan transferFiles ( InetAddress to , Collection < StreamSession . SSTableStreamingSections > sstableDetails ) { coordinator . transferFiles ( to , sstableDetails ) ; return this ; }
Add transfer task to send given SSTable files .
8,310
public Set < PermissionDetails > list ( AuthenticatedUser performer , Set < Permission > permissions , IResource resource , String of ) throws RequestValidationException , RequestExecutionException { if ( ! performer . isSuper ( ) && ! performer . getName ( ) . equals ( of ) ) throw new UnauthorizedException ( String . format ( "You are not authorized to view %s's permissions" , of == null ? "everyone" : of ) ) ; Set < PermissionDetails > details = new HashSet < PermissionDetails > ( ) ; for ( UntypedResultSet . Row row : process ( buildListQuery ( resource , of ) ) ) { if ( row . has ( PERMISSIONS ) ) { for ( String p : row . getSet ( PERMISSIONS , UTF8Type . instance ) ) { Permission permission = Permission . valueOf ( p ) ; if ( permissions . contains ( permission ) ) details . add ( new PermissionDetails ( row . getString ( USERNAME ) , DataResource . fromName ( row . getString ( RESOURCE ) ) , permission ) ) ; } } } return details ; }
allowed to see their own permissions .
8,311
public void revokeAll ( String droppedUser ) { try { process ( String . format ( "DELETE FROM %s.%s WHERE username = '%s'" , Auth . AUTH_KS , PERMISSIONS_CF , escape ( droppedUser ) ) ) ; } catch ( RequestExecutionException e ) { logger . warn ( "CassandraAuthorizer failed to revoke all permissions of {}: {}" , droppedUser , e ) ; } }
Called prior to deleting the user with DROP USER query . Internal hook so no permission checks are needed here .
8,312
public void submitSynchronous ( Runnable task ) { lock . writeLock ( ) . lock ( ) ; try { awaitInner ( ) ; task . run ( ) ; } catch ( InterruptedException e ) { Log . error ( e , "Task queue isolated submission interrupted" ) ; throw new RuntimeException ( e ) ; } catch ( Exception e ) { Log . error ( e , "Task queue isolated submission failed" ) ; throw new RuntimeException ( e ) ; } finally { lock . writeLock ( ) . unlock ( ) ; } }
Submits a non value - returning task for synchronous execution . It waits for all synchronous tasks to be completed .
8,313
public static int getfd ( FileDescriptor descriptor ) { Field field = FBUtilities . getProtectedField ( descriptor . getClass ( ) , "fd" ) ; if ( field == null ) return - 1 ; try { return field . getInt ( descriptor ) ; } catch ( Exception e ) { JVMStabilityInspector . inspectThrowable ( e ) ; logger . warn ( "unable to read fd field from FileDescriptor" ) ; } return - 1 ; }
Get system file descriptor from FileDescriptor object .
8,314
public boolean accepts ( OpOrder . Group opGroup , ReplayPosition replayPosition ) { OpOrder . Barrier barrier = this . writeBarrier ; if ( barrier == null ) return true ; if ( ! barrier . isAfter ( opGroup ) ) return false ; if ( replayPosition == null ) return true ; while ( true ) { ReplayPosition currentLast = lastReplayPosition . get ( ) ; if ( currentLast instanceof LastReplayPosition ) return currentLast . compareTo ( replayPosition ) >= 0 ; if ( currentLast != null && currentLast . compareTo ( replayPosition ) >= 0 ) return true ; if ( lastReplayPosition . compareAndSet ( currentLast , replayPosition ) ) return true ; } }
decide if this memtable should take the write or if it should go to the next memtable
8,315
long put ( DecoratedKey key , ColumnFamily cf , SecondaryIndexManager . Updater indexer , OpOrder . Group opGroup ) { AtomicBTreeColumns previous = rows . get ( key ) ; if ( previous == null ) { AtomicBTreeColumns empty = cf . cloneMeShallow ( AtomicBTreeColumns . factory , false ) ; final DecoratedKey cloneKey = allocator . clone ( key , opGroup ) ; previous = rows . putIfAbsent ( cloneKey , empty ) ; if ( previous == null ) { previous = empty ; int overhead = ( int ) ( cfs . partitioner . getHeapSizeOf ( key . getToken ( ) ) + ROW_OVERHEAD_HEAP_SIZE ) ; allocator . onHeap ( ) . allocate ( overhead , opGroup ) ; } else { allocator . reclaimer ( ) . reclaimImmediately ( cloneKey ) ; } } final Pair < Long , Long > pair = previous . addAllWithSizeDelta ( cf , allocator , opGroup , indexer ) ; liveDataSize . addAndGet ( pair . left ) ; currentOperations . addAndGet ( cf . getColumnCount ( ) + ( cf . isMarkedForDelete ( ) ? 1 : 0 ) + cf . deletionInfo ( ) . rangeCount ( ) ) ; return pair . right ; }
Should only be called by ColumnFamilyStore . apply via Keyspace . apply which supplies the appropriate OpOrdering .
8,316
public static void loadSchemas ( boolean updateVersion ) { ColumnFamilyStore schemaCFS = SystemKeyspace . schemaCFS ( SystemKeyspace . SCHEMA_KEYSPACES_CF ) ; if ( schemaCFS . estimateKeys ( ) == 0 ) { logger . info ( "Couldn't detect any schema definitions in local storage." ) ; if ( hasExistingNoSystemTables ( ) ) logger . info ( "Found keyspace data in data directories. Consider using cqlsh to define your schema." ) ; else logger . info ( "To create keyspaces and column families, see 'help create' in cqlsh." ) ; } else { Schema . instance . load ( DefsTables . loadFromKeyspace ( ) ) ; } if ( updateVersion ) Schema . instance . updateVersion ( ) ; }
Load schema definitions .
8,317
public static void createAllDirectories ( ) { try { if ( conf . data_file_directories . length == 0 ) throw new ConfigurationException ( "At least one DataFileDirectory must be specified" ) ; for ( String dataFileDirectory : conf . data_file_directories ) { FileUtils . createDirectory ( dataFileDirectory ) ; } if ( conf . commitlog_directory == null ) throw new ConfigurationException ( "commitlog_directory must be specified" ) ; FileUtils . createDirectory ( conf . commitlog_directory ) ; if ( conf . saved_caches_directory == null ) throw new ConfigurationException ( "saved_caches_directory must be specified" ) ; FileUtils . createDirectory ( conf . saved_caches_directory ) ; } catch ( ConfigurationException e ) { logger . error ( "Fatal error: {}" , e . getMessage ( ) ) ; System . err . println ( "Bad configuration; unable to start server" ) ; System . exit ( 1 ) ; } catch ( FSWriteError e ) { logger . error ( "Fatal error: {}" , e . getMessage ( ) ) ; System . err . println ( e . getCause ( ) . getMessage ( ) + "; unable to start server" ) ; System . exit ( 1 ) ; } }
Creates all storage - related directories .
8,318
public static long getTimeout ( MessagingService . Verb verb ) { switch ( verb ) { case READ : return getReadRpcTimeout ( ) ; case RANGE_SLICE : return getRangeRpcTimeout ( ) ; case TRUNCATE : return getTruncateRpcTimeout ( ) ; case READ_REPAIR : case MUTATION : case PAXOS_COMMIT : case PAXOS_PREPARE : case PAXOS_PROPOSE : return getWriteRpcTimeout ( ) ; case COUNTER_MUTATION : return getCounterWriteRpcTimeout ( ) ; default : return getRpcTimeout ( ) ; } }
not part of the Verb enum so we can change timeouts easily via JMX
8,319
public static SSLSocket getSocket ( EncryptionOptions options , InetAddress address , int port , InetAddress localAddress , int localPort ) throws IOException { SSLContext ctx = createSSLContext ( options , true ) ; SSLSocket socket = ( SSLSocket ) ctx . getSocketFactory ( ) . createSocket ( address , port , localAddress , localPort ) ; String [ ] suits = filterCipherSuites ( socket . getSupportedCipherSuites ( ) , options . cipher_suites ) ; socket . setEnabledCipherSuites ( suits ) ; socket . setEnabledProtocols ( ACCEPTED_PROTOCOLS ) ; return socket ; }
Create a socket and connect
8,320
public void collateOnDiskAtom ( ColumnFamily returnCF , Iterator < ? extends OnDiskAtom > toCollate , int gcBefore ) { filter . collectReducedColumns ( returnCF , gatherTombstones ( returnCF , toCollate ) , gcBefore , timestamp ) ; }
When there is only a single source of atoms we can skip the collate step
8,321
public static QueryFilter getIdentityFilter ( DecoratedKey key , String cfName , long timestamp ) { return new QueryFilter ( key , cfName , new IdentityQueryFilter ( ) , timestamp ) ; }
return a QueryFilter object that includes every column in the row . This is dangerous on large rows ; avoid except for test code .
8,322
public Memtable getMemtableFor ( OpOrder . Group opGroup , ReplayPosition replayPosition ) { for ( Memtable memtable : view . get ( ) . liveMemtables ) { if ( memtable . accepts ( opGroup , replayPosition ) ) return memtable ; } throw new AssertionError ( view . get ( ) . liveMemtables . toString ( ) ) ; }
get the Memtable that the ordered writeOp should be directed to
8,323
public void replaceWithNewInstances ( Collection < SSTableReader > toReplace , Collection < SSTableReader > replaceWith ) { replaceReaders ( toReplace , replaceWith , true ) ; }
Replaces existing sstables with new instances makes sure compaction strategies have the correct instance
8,324
public void replaceEarlyOpenedFiles ( Collection < SSTableReader > toReplace , Collection < SSTableReader > replaceWith ) { for ( SSTableReader s : toReplace ) assert s . openReason . equals ( SSTableReader . OpenReason . EARLY ) ; replaceReaders ( toReplace , replaceWith , false ) ; }
Adds the early opened files to the data tracker but does not tell compaction strategies about it
8,325
public void unreferenceSSTables ( ) { Set < SSTableReader > notCompacting ; View currentView , newView ; do { currentView = view . get ( ) ; notCompacting = currentView . nonCompactingSStables ( ) ; newView = currentView . replace ( notCompacting , Collections . < SSTableReader > emptySet ( ) ) ; } while ( ! view . compareAndSet ( currentView , newView ) ) ; if ( notCompacting . isEmpty ( ) ) { return ; } notifySSTablesChanged ( notCompacting , Collections . < SSTableReader > emptySet ( ) , OperationType . UNKNOWN ) ; removeOldSSTablesSize ( notCompacting ) ; releaseReferences ( notCompacting , true ) ; }
removes all sstables that are not busy compacting .
8,326
void removeUnreadableSSTables ( File directory ) { View currentView , newView ; Set < SSTableReader > remaining = new HashSet < > ( ) ; do { currentView = view . get ( ) ; for ( SSTableReader r : currentView . nonCompactingSStables ( ) ) if ( ! r . descriptor . directory . equals ( directory ) ) remaining . add ( r ) ; if ( remaining . size ( ) == currentView . nonCompactingSStables ( ) . size ( ) ) return ; newView = currentView . replace ( currentView . sstables , remaining ) ; } while ( ! view . compareAndSet ( currentView , newView ) ) ; for ( SSTableReader sstable : currentView . sstables ) if ( ! remaining . contains ( sstable ) ) sstable . selfRef ( ) . release ( ) ; notifySSTablesChanged ( remaining , Collections . < SSTableReader > emptySet ( ) , OperationType . UNKNOWN ) ; }
Removes every SSTable in the directory from the DataTracker s view .
8,327
public static SettingsCommandUser build ( String [ ] params ) { GroupedOptions options = GroupedOptions . select ( params , new Options ( new Uncertainty ( ) ) , new Options ( new Duration ( ) ) , new Options ( new Count ( ) ) ) ; if ( options == null ) { printHelp ( ) ; System . out . println ( "Invalid USER options provided, see output for valid options" ) ; System . exit ( 1 ) ; } return new SettingsCommandUser ( ( Options ) options ) ; }
CLI utility methods
8,328
public void addComparator ( Comparator < T > comparator , boolean reverse ) { checkLocked ( ) ; comparatorChain . add ( comparator ) ; if ( reverse == true ) { orderingBits . set ( comparatorChain . size ( ) - 1 ) ; } }
Add a Comparator to the end of the chain using the given sortFields order
8,329
public void setComparator ( int index , Comparator < T > comparator ) throws IndexOutOfBoundsException { setComparator ( index , comparator , false ) ; }
Replace the Comparator at the given index maintaining the existing sortFields order .
8,330
public void setComparator ( int index , Comparator < T > comparator , boolean reverse ) { checkLocked ( ) ; comparatorChain . set ( index , comparator ) ; if ( reverse == true ) { orderingBits . set ( index ) ; } else { orderingBits . clear ( index ) ; } }
Replace the Comparator at the given index in the ComparatorChain using the given sortFields order
8,331
public static KSMetaData fromSchema ( Row row , Iterable < CFMetaData > cfms , UTMetaData userTypes ) { UntypedResultSet . Row result = QueryProcessor . resultify ( "SELECT * FROM system.schema_keyspaces" , row ) . one ( ) ; try { return new KSMetaData ( result . getString ( "keyspace_name" ) , AbstractReplicationStrategy . getClass ( result . getString ( "strategy_class" ) ) , fromJsonMap ( result . getString ( "strategy_options" ) ) , result . getBoolean ( "durable_writes" ) , cfms , userTypes ) ; } catch ( ConfigurationException e ) { throw new RuntimeException ( e ) ; } }
Deserialize only Keyspace attributes without nested ColumnFamilies
8,332
public static KSMetaData fromSchema ( Row serializedKs , Row serializedCFs , Row serializedUserTypes ) { Map < String , CFMetaData > cfs = deserializeColumnFamilies ( serializedCFs ) ; UTMetaData userTypes = new UTMetaData ( UTMetaData . fromSchema ( serializedUserTypes ) ) ; return fromSchema ( serializedKs , cfs . values ( ) , userTypes ) ; }
Deserialize Keyspace with nested ColumnFamilies
8,333
public static Map < String , CFMetaData > deserializeColumnFamilies ( Row row ) { if ( row . cf == null ) return Collections . emptyMap ( ) ; Map < String , CFMetaData > cfms = new HashMap < > ( ) ; UntypedResultSet results = QueryProcessor . resultify ( "SELECT * FROM system.schema_columnfamilies" , row ) ; for ( UntypedResultSet . Row result : results ) { CFMetaData cfm = CFMetaData . fromSchema ( result ) ; cfms . put ( cfm . cfName , cfm ) ; } return cfms ; }
Deserialize ColumnFamilies from low - level schema representation all of them belong to the same keyspace
8,334
static TimingInterval merge ( Iterable < TimingInterval > intervals , int maxSamples , long start ) { ThreadLocalRandom rnd = ThreadLocalRandom . current ( ) ; long operationCount = 0 , partitionCount = 0 , rowCount = 0 , errorCount = 0 ; long maxLatency = 0 , totalLatency = 0 ; List < SampleOfLongs > latencies = new ArrayList < > ( ) ; long end = 0 ; long pauseStart = 0 , pauseEnd = Long . MAX_VALUE ; for ( TimingInterval interval : intervals ) { if ( interval != null ) { end = Math . max ( end , interval . end ) ; operationCount += interval . operationCount ; maxLatency = Math . max ( interval . maxLatency , maxLatency ) ; totalLatency += interval . totalLatency ; partitionCount += interval . partitionCount ; rowCount += interval . rowCount ; errorCount += interval . errorCount ; latencies . addAll ( Arrays . asList ( interval . sample ) ) ; if ( interval . pauseLength > 0 ) { pauseStart = Math . max ( pauseStart , interval . pauseStart ) ; pauseEnd = Math . min ( pauseEnd , interval . pauseStart + interval . pauseLength ) ; } } } if ( pauseEnd < pauseStart || pauseStart <= 0 ) { pauseEnd = pauseStart = 0 ; } return new TimingInterval ( start , end , maxLatency , pauseStart , pauseEnd - pauseStart , partitionCount , rowCount , totalLatency , operationCount , errorCount , SampleOfLongs . merge ( rnd , latencies , maxSamples ) ) ; }
merge multiple timer intervals together
8,335
public boolean supersedes ( RangeTombstone rt , Comparator < Composite > comparator ) { if ( rt . data . markedForDeleteAt > data . markedForDeleteAt ) return false ; return comparator . compare ( min , rt . min ) <= 0 && comparator . compare ( max , rt . max ) >= 0 ; }
This tombstone supersedes another one if it is more recent and cover a bigger range than rt .
8,336
public static String string ( ByteBuffer buffer , int position , int length ) throws CharacterCodingException { return string ( buffer , position , length , StandardCharsets . UTF_8 ) ; }
Decode a String representation . This method assumes that the encoding charset is UTF_8 .
8,337
public static int compareSubArrays ( ByteBuffer bytes1 , int offset1 , ByteBuffer bytes2 , int offset2 , int length ) { if ( bytes1 == null ) return bytes2 == null ? 0 : - 1 ; if ( bytes2 == null ) return 1 ; assert bytes1 . limit ( ) >= offset1 + length : "The first byte array isn't long enough for the specified offset and length." ; assert bytes2 . limit ( ) >= offset2 + length : "The second byte array isn't long enough for the specified offset and length." ; for ( int i = 0 ; i < length ; i ++ ) { byte byte1 = bytes1 . get ( offset1 + i ) ; byte byte2 = bytes2 . get ( offset2 + i ) ; if ( byte1 == byte2 ) continue ; return ( byte1 & 0xFF ) < ( byte2 & 0xFF ) ? - 1 : 1 ; } return 0 ; }
Compare two ByteBuffer at specified offsets for length . Compares the non equal bytes as unsigned .
8,338
public static ByteBuffer minimalBufferFor ( ByteBuffer buf ) { return buf . capacity ( ) > buf . remaining ( ) || ! buf . hasArray ( ) ? ByteBuffer . wrap ( getArray ( buf ) ) : buf ; }
trims size of bytebuffer to exactly number of bytes in it to do not hold too much memory
8,339
public static int getShortLength ( ByteBuffer bb , int position ) { int length = ( bb . get ( position ) & 0xFF ) << 8 ; return length | ( bb . get ( position + 1 ) & 0xFF ) ; }
Doesn t change bb position
8,340
public ArrayList < InetAddress > getNaturalEndpoints ( RingPosition searchPosition ) { ArrayList < InetAddress > l = new ArrayList < InetAddress > ( 1 ) ; l . add ( FBUtilities . getBroadcastAddress ( ) ) ; return l ; }
We need to override this even if we override calculateNaturalEndpoints because the default implementation depends on token calculations but LocalStrategy may be used before tokens are set up .
8,341
public static synchronized void removeTruncationRecord ( UUID cfId ) { String req = "DELETE truncated_at[?] from system.%s WHERE key = '%s'" ; executeInternal ( String . format ( req , LOCAL_CF , LOCAL_KEY ) , cfId ) ; truncationRecords = null ; forceBlockingFlush ( LOCAL_CF ) ; }
This method is used to remove information about truncation time for specified column family
8,342
public static synchronized void updateTokens ( InetAddress ep , Collection < Token > tokens ) { if ( ep . equals ( FBUtilities . getBroadcastAddress ( ) ) ) { removeEndpoint ( ep ) ; return ; } String req = "INSERT INTO system.%s (peer, tokens) VALUES (?, ?)" ; executeInternal ( String . format ( req , PEERS_CF ) , ep , tokensAsSet ( tokens ) ) ; }
Record tokens being used by another node
8,343
public static synchronized void removeEndpoint ( InetAddress ep ) { String req = "DELETE FROM system.%s WHERE peer = ?" ; executeInternal ( String . format ( req , PEERS_CF ) , ep ) ; }
Remove stored tokens being used by another node
8,344
public static synchronized void updateTokens ( Collection < Token > tokens ) { assert ! tokens . isEmpty ( ) : "removeEndpoint should be used instead" ; String req = "INSERT INTO system.%s (key, tokens) VALUES ('%s', ?)" ; executeInternal ( String . format ( req , LOCAL_CF , LOCAL_KEY ) , tokensAsSet ( tokens ) ) ; forceBlockingFlush ( LOCAL_CF ) ; }
This method is used to update the System Keyspace with the new tokens for this node
8,345
public static synchronized Collection < Token > updateLocalTokens ( Collection < Token > addTokens , Collection < Token > rmTokens ) { Collection < Token > tokens = getSavedTokens ( ) ; tokens . removeAll ( rmTokens ) ; tokens . addAll ( addTokens ) ; updateTokens ( tokens ) ; return tokens ; }
Convenience method to update the list of tokens in the local system keyspace .
8,346
public static SetMultimap < InetAddress , Token > loadTokens ( ) { SetMultimap < InetAddress , Token > tokenMap = HashMultimap . create ( ) ; for ( UntypedResultSet . Row row : executeInternal ( "SELECT peer, tokens FROM system." + PEERS_CF ) ) { InetAddress peer = row . getInetAddress ( "peer" ) ; if ( row . has ( "tokens" ) ) tokenMap . putAll ( peer , deserializeTokens ( row . getSet ( "tokens" , UTF8Type . instance ) ) ) ; } return tokenMap ; }
Return a map of stored tokens to IP addresses
8,347
public static Map < InetAddress , UUID > loadHostIds ( ) { Map < InetAddress , UUID > hostIdMap = new HashMap < InetAddress , UUID > ( ) ; for ( UntypedResultSet . Row row : executeInternal ( "SELECT peer, host_id FROM system." + PEERS_CF ) ) { InetAddress peer = row . getInetAddress ( "peer" ) ; if ( row . has ( "host_id" ) ) { hostIdMap . put ( peer , row . getUUID ( "host_id" ) ) ; } } return hostIdMap ; }
Return a map of store host_ids to IP addresses
8,348
public static InetAddress getPreferredIP ( InetAddress ep ) { String req = "SELECT preferred_ip FROM system.%s WHERE peer=?" ; UntypedResultSet result = executeInternal ( String . format ( req , PEERS_CF ) , ep ) ; if ( ! result . isEmpty ( ) && result . one ( ) . has ( "preferred_ip" ) ) return result . one ( ) . getInetAddress ( "preferred_ip" ) ; return ep ; }
Get preferred IP for given endpoint if it is known . Otherwise this returns given endpoint itself .
8,349
public static Map < InetAddress , Map < String , String > > loadDcRackInfo ( ) { Map < InetAddress , Map < String , String > > result = new HashMap < InetAddress , Map < String , String > > ( ) ; for ( UntypedResultSet . Row row : executeInternal ( "SELECT peer, data_center, rack from system." + PEERS_CF ) ) { InetAddress peer = row . getInetAddress ( "peer" ) ; if ( row . has ( "data_center" ) && row . has ( "rack" ) ) { Map < String , String > dcRack = new HashMap < String , String > ( ) ; dcRack . put ( "data_center" , row . getString ( "data_center" ) ) ; dcRack . put ( "rack" , row . getString ( "rack" ) ) ; result . put ( peer , dcRack ) ; } } return result ; }
Return a map of IP addresses containing a map of dc and rack info
8,350
public static UUID setLocalHostId ( UUID hostId ) { String req = "INSERT INTO system.%s (key, host_id) VALUES ('%s', ?)" ; executeInternal ( String . format ( req , LOCAL_CF , LOCAL_KEY ) , hostId ) ; return hostId ; }
Sets the local host ID explicitly . Should only be called outside of SystemTable when replacing a node .
8,351
public static void clearSSTableReadMeter ( String keyspace , String table , int generation ) { String cql = "DELETE FROM system.%s WHERE keyspace_name=? AND columnfamily_name=? and generation=?" ; executeInternal ( String . format ( cql , SSTABLE_ACTIVITY_CF ) , keyspace , table , generation ) ; }
Clears persisted read rates from system . sstable_activity for SSTables that have been deleted .
8,352
public static void updateSizeEstimates ( String keyspace , String table , Map < Range < Token > , Pair < Long , Long > > estimates ) { long timestamp = FBUtilities . timestampMicros ( ) ; CFMetaData estimatesTable = CFMetaData . SizeEstimatesCf ; Mutation mutation = new Mutation ( Keyspace . SYSTEM_KS , UTF8Type . instance . decompose ( keyspace ) ) ; mutation . deleteRange ( SIZE_ESTIMATES_CF , estimatesTable . comparator . make ( table ) . start ( ) , estimatesTable . comparator . make ( table ) . end ( ) , timestamp - 1 ) ; ColumnFamily cells = mutation . addOrGet ( estimatesTable ) ; for ( Map . Entry < Range < Token > , Pair < Long , Long > > entry : estimates . entrySet ( ) ) { Range < Token > range = entry . getKey ( ) ; Pair < Long , Long > values = entry . getValue ( ) ; Composite prefix = estimatesTable . comparator . make ( table , range . left . toString ( ) , range . right . toString ( ) ) ; CFRowAdder adder = new CFRowAdder ( cells , prefix , timestamp ) ; adder . add ( "partitions_count" , values . left ) . add ( "mean_partition_size" , values . right ) ; } mutation . apply ( ) ; }
Writes the current partition count and size estimates into SIZE_ESTIMATES_CF
8,353
public void maybeAddLatency ( IAsyncCallback cb , InetAddress address , long latency ) { if ( cb . isLatencyForSnitch ( ) ) addLatency ( address , latency ) ; }
Track latency information for the dynamic snitch
8,354
public void listen ( InetAddress localEp ) throws ConfigurationException { callbacks . reset ( ) ; for ( ServerSocket ss : getServerSockets ( localEp ) ) { SocketThread th = new SocketThread ( ss , "ACCEPT-" + localEp ) ; th . start ( ) ; socketThreads . add ( th ) ; } listenGate . signalAll ( ) ; }
Listen on the specified port .
8,355
public void registerVerbHandlers ( Verb verb , IVerbHandler verbHandler ) { assert ! verbHandlers . containsKey ( verb ) ; verbHandlers . put ( verb , verbHandler ) ; }
Register a verb and the corresponding verb handler with the Messaging Service .
8,356
public int sendRR ( MessageOut message , InetAddress to , IAsyncCallback cb , long timeout , boolean failureCallback ) { int id = addCallback ( cb , message , to , timeout , failureCallback ) ; sendOneWay ( failureCallback ? message . withParameter ( FAILURE_CALLBACK_PARAM , ONE_BYTE ) : message , id , to ) ; return id ; }
Send a non - mutation message to a given endpoint . This method specifies a callback which is invoked with the actual response .
8,357
public void sendOneWay ( MessageOut message , int id , InetAddress to ) { if ( logger . isTraceEnabled ( ) ) logger . trace ( FBUtilities . getBroadcastAddress ( ) + " sending " + message . verb + " to " + id + "@" + to ) ; if ( to . equals ( FBUtilities . getBroadcastAddress ( ) ) ) logger . trace ( "Message-to-self {} going over MessagingService" , message ) ; MessageOut processedMessage = SinkManager . processOutboundMessage ( message , id , to ) ; if ( processedMessage == null ) { return ; } OutboundTcpConnection connection = getConnection ( to , processedMessage ) ; connection . enqueue ( processedMessage , id ) ; }
Send a message to a given endpoint . This method adheres to the fire and forget style messaging .
8,358
private static ByteBuffer min ( ByteBuffer b1 , ByteBuffer b2 , AbstractType < ? > comparator ) { if ( b1 == null ) return b2 ; if ( b2 == null ) return b1 ; if ( comparator . compare ( b1 , b2 ) >= 0 ) return b2 ; return b1 ; }
return the min column
8,359
private static ByteBuffer max ( ByteBuffer b1 , ByteBuffer b2 , AbstractType < ? > comparator ) { if ( b1 == null ) return b2 ; if ( b2 == null ) return b1 ; if ( comparator . compare ( b1 , b2 ) >= 0 ) return b1 ; return b2 ; }
return the max column
8,360
public static List < ByteBuffer > mergeMin ( List < ByteBuffer > minColumnNames , List < ByteBuffer > candidates , CellNameType comparator ) { if ( minColumnNames . isEmpty ( ) ) return minimalBuffersFor ( candidates ) ; if ( candidates . isEmpty ( ) ) return minColumnNames ; List < ByteBuffer > biggest = minColumnNames . size ( ) > candidates . size ( ) ? minColumnNames : candidates ; List < ByteBuffer > smallest = minColumnNames . size ( ) > candidates . size ( ) ? candidates : minColumnNames ; List < ByteBuffer > retList = smallest . size ( ) == biggest . size ( ) ? new ArrayList < > ( smallest ) : maybeGrow ( smallest , biggest . size ( ) ) ; for ( int i = 0 ; i < biggest . size ( ) ; i ++ ) retList . set ( i , minimalBufferFor ( min ( retList . get ( i ) , biggest . get ( i ) , comparator . subtype ( i ) ) ) ) ; return retList ; }
Merge 2 lists of min cell name components .
8,361
public static List < ByteBuffer > mergeMax ( List < ByteBuffer > maxColumnNames , List < ByteBuffer > candidates , CellNameType comparator ) { if ( maxColumnNames . isEmpty ( ) ) return minimalBuffersFor ( candidates ) ; if ( candidates . isEmpty ( ) ) return maxColumnNames ; List < ByteBuffer > biggest = maxColumnNames . size ( ) > candidates . size ( ) ? maxColumnNames : candidates ; List < ByteBuffer > smallest = maxColumnNames . size ( ) > candidates . size ( ) ? candidates : maxColumnNames ; List < ByteBuffer > retList = smallest . size ( ) == biggest . size ( ) ? new ArrayList < > ( smallest ) : maybeGrow ( smallest , biggest . size ( ) ) ; for ( int i = 0 ; i < biggest . size ( ) ; i ++ ) retList . set ( i , minimalBufferFor ( max ( retList . get ( i ) , biggest . get ( i ) , comparator . subtype ( i ) ) ) ) ; return retList ; }
Merge 2 lists of max cell name components .
8,362
public static CounterCell createLocal ( CellName name , long value , long timestamp , long timestampOfLastDelete ) { return new BufferCounterCell ( name , contextManager . createLocal ( value ) , timestamp , timestampOfLastDelete ) ; }
For use by tests of compatibility with pre - 2 . 1 counter only .
8,363
private void addColumnsToCF ( List < ? > row , ColumnFamily cfamily ) { CFMetaData cfm = cfamily . metadata ( ) ; assert cfm != null ; for ( Object c : row ) { JsonColumn col = new JsonColumn < List > ( ( List ) c , cfm ) ; if ( col . isRangeTombstone ( ) ) { Composite start = cfm . comparator . fromByteBuffer ( col . getName ( ) ) ; Composite end = cfm . comparator . fromByteBuffer ( col . getValue ( ) ) ; cfamily . addAtom ( new RangeTombstone ( start , end , col . timestamp , col . localExpirationTime ) ) ; continue ; } assert cfm . isCQL3Table ( ) || col . getName ( ) . hasRemaining ( ) : "Cell name should not be empty" ; CellName cname = col . getName ( ) . hasRemaining ( ) ? cfm . comparator . cellFromByteBuffer ( col . getName ( ) ) : cfm . comparator . rowMarker ( Composites . EMPTY ) ; if ( col . isExpiring ( ) ) { cfamily . addColumn ( new BufferExpiringCell ( cname , col . getValue ( ) , col . timestamp , col . ttl , col . localExpirationTime ) ) ; } else if ( col . isCounter ( ) ) { cfamily . addColumn ( new BufferCounterCell ( cname , col . getValue ( ) , col . timestamp , col . timestampOfLastDelete ) ) ; } else if ( col . isDeleted ( ) ) { cfamily . addTombstone ( cname , col . getValue ( ) , col . timestamp ) ; } else if ( col . isRangeTombstone ( ) ) { CellName end = cfm . comparator . cellFromByteBuffer ( col . getValue ( ) ) ; cfamily . addAtom ( new RangeTombstone ( cname , end , col . timestamp , col . localExpirationTime ) ) ; } else if ( cname . isEmpty ( ) ) { cfamily . addColumn ( cfm . comparator . rowMarker ( Composites . EMPTY ) , col . getValue ( ) , col . timestamp ) ; } else { cfamily . addColumn ( cname , col . getValue ( ) , col . timestamp ) ; } } }
Add columns to a column family .
8,364
public int importJson ( String jsonFile , String keyspace , String cf , String ssTablePath ) throws IOException { ColumnFamily columnFamily = ArrayBackedSortedColumns . factory . create ( keyspace , cf ) ; IPartitioner partitioner = DatabaseDescriptor . getPartitioner ( ) ; int importedKeys = ( isSorted ) ? importSorted ( jsonFile , columnFamily , ssTablePath , partitioner ) : importUnsorted ( jsonFile , columnFamily , ssTablePath , partitioner ) ; if ( importedKeys != - 1 ) System . out . printf ( "%d keys imported successfully.%n" , importedKeys ) ; return importedKeys ; }
Convert a JSON formatted file to an SSTable .
8,365
private AbstractType < ? > getKeyValidator ( ColumnFamily columnFamily ) { if ( "true" . equals ( System . getProperty ( "skip.key.validator" , "false" ) ) ) { return BytesType . instance ; } return columnFamily . metadata ( ) . getKeyValidator ( ) ; }
Get key validator for column family
8,366
public static void main ( String [ ] args ) throws ParseException , ConfigurationException { CommandLineParser parser = new PosixParser ( ) ; try { cmd = parser . parse ( options , args ) ; } catch ( org . apache . commons . cli . ParseException e ) { System . err . println ( e . getMessage ( ) ) ; printProgramUsage ( ) ; System . exit ( 1 ) ; } if ( cmd . getArgs ( ) . length != 2 ) { printProgramUsage ( ) ; System . exit ( 1 ) ; } String json = cmd . getArgs ( ) [ 0 ] ; String ssTable = cmd . getArgs ( ) [ 1 ] ; String keyspace = cmd . getOptionValue ( KEYSPACE_OPTION ) ; String cfamily = cmd . getOptionValue ( COLUMN_FAMILY_OPTION ) ; Integer keyCountToImport = null ; boolean isSorted = false ; if ( cmd . hasOption ( KEY_COUNT_OPTION ) ) { keyCountToImport = Integer . valueOf ( cmd . getOptionValue ( KEY_COUNT_OPTION ) ) ; } if ( cmd . hasOption ( IS_SORTED_OPTION ) ) { isSorted = true ; } DatabaseDescriptor . loadSchemas ( false ) ; if ( Schema . instance . getNonSystemKeyspaces ( ) . size ( ) < 1 ) { String msg = "no non-system keyspaces are defined" ; System . err . println ( msg ) ; throw new ConfigurationException ( msg ) ; } try { new SSTableImport ( keyCountToImport , isSorted ) . importJson ( json , keyspace , cfamily , ssTable ) ; } catch ( Exception e ) { JVMStabilityInspector . inspectThrowable ( e ) ; e . printStackTrace ( ) ; System . err . println ( "ERROR: " + e . getMessage ( ) ) ; System . exit ( - 1 ) ; } System . exit ( 0 ) ; }
Converts JSON to an SSTable file . JSON input can either be a file specified using an optional command line argument or supplied on standard in .
8,367
public void updateHostId ( UUID hostId , InetAddress endpoint ) { assert hostId != null ; assert endpoint != null ; lock . writeLock ( ) . lock ( ) ; try { InetAddress storedEp = endpointToHostIdMap . inverse ( ) . get ( hostId ) ; if ( storedEp != null ) { if ( ! storedEp . equals ( endpoint ) && ( FailureDetector . instance . isAlive ( storedEp ) ) ) { throw new RuntimeException ( String . format ( "Host ID collision between active endpoint %s and %s (id=%s)" , storedEp , endpoint , hostId ) ) ; } } UUID storedId = endpointToHostIdMap . get ( endpoint ) ; if ( ( storedId != null ) && ( ! storedId . equals ( hostId ) ) ) logger . warn ( "Changing {}'s host ID from {} to {}" , endpoint , storedId , hostId ) ; endpointToHostIdMap . forcePut ( endpoint , hostId ) ; } finally { lock . writeLock ( ) . unlock ( ) ; } }
Store an end - point to host ID mapping . Each ID must be unique and cannot be changed after the fact .
8,368
public UUID getHostId ( InetAddress endpoint ) { lock . readLock ( ) . lock ( ) ; try { return endpointToHostIdMap . get ( endpoint ) ; } finally { lock . readLock ( ) . unlock ( ) ; } }
Return the unique host ID for an end - point .
8,369
public InetAddress getEndpointForHostId ( UUID hostId ) { lock . readLock ( ) . lock ( ) ; try { return endpointToHostIdMap . inverse ( ) . get ( hostId ) ; } finally { lock . readLock ( ) . unlock ( ) ; } }
Return the end - point for a unique host ID
8,370
public void addMovingEndpoint ( Token token , InetAddress endpoint ) { assert endpoint != null ; lock . writeLock ( ) . lock ( ) ; try { movingEndpoints . add ( Pair . create ( token , endpoint ) ) ; } finally { lock . writeLock ( ) . unlock ( ) ; } }
Add a new moving endpoint
8,371
public TokenMetadata cloneOnlyTokenMap ( ) { lock . readLock ( ) . lock ( ) ; try { return new TokenMetadata ( SortedBiMultiValMap . < Token , InetAddress > create ( tokenToEndpointMap , null , inetaddressCmp ) , HashBiMap . create ( endpointToHostIdMap ) , new Topology ( topology ) ) ; } finally { lock . readLock ( ) . unlock ( ) ; } }
Create a copy of TokenMetadata with only tokenToEndpointMap . That is pending ranges bootstrap tokens and leaving endpoints are not included in the copy .
8,372
public TokenMetadata cachedOnlyTokenMap ( ) { TokenMetadata tm = cachedTokenMap . get ( ) ; if ( tm != null ) return tm ; synchronized ( this ) { if ( ( tm = cachedTokenMap . get ( ) ) != null ) return tm ; tm = cloneOnlyTokenMap ( ) ; cachedTokenMap . set ( tm ) ; return tm ; } }
Return a cached TokenMetadata with only tokenToEndpointMap i . e . the same as cloneOnlyTokenMap but uses a cached copy that is invalided when the ring changes so in the common case no extra locking is required .
8,373
public TokenMetadata cloneAfterAllLeft ( ) { lock . readLock ( ) . lock ( ) ; try { TokenMetadata allLeftMetadata = cloneOnlyTokenMap ( ) ; for ( InetAddress endpoint : leavingEndpoints ) allLeftMetadata . removeEndpoint ( endpoint ) ; return allLeftMetadata ; } finally { lock . readLock ( ) . unlock ( ) ; } }
Create a copy of TokenMetadata with tokenToEndpointMap reflecting situation after all current leave operations have finished .
8,374
public TokenMetadata cloneAfterAllSettled ( ) { lock . readLock ( ) . lock ( ) ; try { TokenMetadata metadata = cloneOnlyTokenMap ( ) ; for ( InetAddress endpoint : leavingEndpoints ) metadata . removeEndpoint ( endpoint ) ; for ( Pair < Token , InetAddress > pair : movingEndpoints ) metadata . updateNormalToken ( pair . left , pair . right ) ; return metadata ; } finally { lock . readLock ( ) . unlock ( ) ; } }
Create a copy of TokenMetadata with tokenToEndpointMap reflecting situation after all current leave and move operations have finished .
8,375
private void tickIfNecessary ( ) { final long oldTick = lastTick . get ( ) ; final long newTick = clock . tick ( ) ; final long age = newTick - oldTick ; if ( age > TICK_INTERVAL ) { final long newIntervalStartTick = newTick - age % TICK_INTERVAL ; if ( lastTick . compareAndSet ( oldTick , newIntervalStartTick ) ) { final long requiredTicks = age / TICK_INTERVAL ; for ( long i = 0 ; i < requiredTicks ; i ++ ) { m15Rate . tick ( ) ; m120Rate . tick ( ) ; } } } }
Updates the moving averages as needed .
8,376
public synchronized void beginSampling ( int capacity ) { if ( ! enabled ) { summary = new StreamSummary < T > ( capacity ) ; hll = new HyperLogLogPlus ( 14 ) ; enabled = true ; } }
Start to record samples
8,377
public synchronized SamplerResult < T > finishSampling ( int count ) { List < Counter < T > > results = Collections . EMPTY_LIST ; long cardinality = 0 ; if ( enabled ) { enabled = false ; results = summary . topK ( count ) ; cardinality = hll . cardinality ( ) ; } return new SamplerResult < T > ( results , cardinality ) ; }
Call to stop collecting samples and gather the results
8,378
public void addSample ( final T item , final long hash , final int value ) { if ( enabled ) { final Object lock = this ; samplerExecutor . execute ( new Runnable ( ) { public void run ( ) { synchronized ( lock ) { if ( enabled ) { try { summary . offer ( item , value ) ; hll . offerHashed ( hash ) ; } catch ( Exception e ) { logger . debug ( "Failure to offer sample" , e ) ; } } } } } ) ; } }
Adds a sample to statistics collection . This method is non - blocking and will use the Sampler thread pool to record results if the sampler is enabled . If not sampling this is a NOOP
8,379
public Tuple getNext ( ) throws IOException { try { if ( ! reader . nextKeyValue ( ) ) return null ; CfInfo cfInfo = getCfInfo ( loadSignature ) ; CfDef cfDef = cfInfo . cfDef ; Row row = reader . getCurrentValue ( ) ; Tuple tuple = TupleFactory . getInstance ( ) . newTuple ( cfDef . column_metadata . size ( ) ) ; Iterator < ColumnDef > itera = cfDef . column_metadata . iterator ( ) ; int i = 0 ; while ( itera . hasNext ( ) ) { ColumnDef cdef = itera . next ( ) ; ByteBuffer columnValue = row . getBytesUnsafe ( ByteBufferUtil . string ( cdef . name . duplicate ( ) ) ) ; if ( columnValue != null ) { Cell cell = new BufferCell ( CellNames . simpleDense ( cdef . name ) , columnValue ) ; AbstractType < ? > validator = getValidatorMap ( cfDef ) . get ( cdef . name ) ; setTupleValue ( tuple , i , cqlColumnToObj ( cell , cfDef ) , validator ) ; } else tuple . set ( i , null ) ; i ++ ; } return tuple ; } catch ( InterruptedException e ) { throw new IOException ( e . getMessage ( ) ) ; } }
get next row
8,380
private Object cqlColumnToObj ( Cell col , CfDef cfDef ) throws IOException { Map < ByteBuffer , AbstractType > validators = getValidatorMap ( cfDef ) ; ByteBuffer cellName = col . name ( ) . toByteBuffer ( ) ; if ( validators . get ( cellName ) == null ) return cassandraToObj ( getDefaultMarshallers ( cfDef ) . get ( MarshallerType . DEFAULT_VALIDATOR ) , col . value ( ) ) ; else return cassandraToObj ( validators . get ( cellName ) , col . value ( ) ) ; }
convert a cql column to an object
8,381
protected List < ColumnDef > getColumnMetadata ( Cassandra . Client client ) throws InvalidRequestException , UnavailableException , TimedOutException , SchemaDisagreementException , TException , CharacterCodingException , org . apache . cassandra . exceptions . InvalidRequestException , ConfigurationException , NotFoundException { List < ColumnDef > keyColumns = null ; try { keyColumns = getKeysMeta ( client ) ; } catch ( Exception e ) { logger . error ( "Error in retrieving key columns" , e ) ; } List < ColumnDef > columns = getColumnMeta ( client , false , ! hasCompactValueAlias ) ; if ( keyColumns != null && columns != null ) keyColumns . addAll ( columns ) ; return keyColumns ; }
include key columns
8,382
private Map < String , ByteBuffer > tupleToKeyMap ( Tuple t ) throws IOException { Map < String , ByteBuffer > keys = new HashMap < String , ByteBuffer > ( ) ; for ( int i = 0 ; i < t . size ( ) ; i ++ ) { if ( t . getType ( i ) == DataType . TUPLE ) { Tuple inner = ( Tuple ) t . get ( i ) ; if ( inner . size ( ) == 2 ) { Object name = inner . get ( 0 ) ; if ( name != null ) { keys . put ( name . toString ( ) , objToBB ( inner . get ( 1 ) ) ) ; } else throw new IOException ( "Key name was empty" ) ; } else throw new IOException ( "Keys were not in name and value pairs" ) ; } else { throw new IOException ( "keys was not a tuple" ) ; } } return keys ; }
convert key tuple to key map
8,383
private void cqlQueryFromTuple ( Map < String , ByteBuffer > key , Tuple t , int offset ) throws IOException { for ( int i = offset ; i < t . size ( ) ; i ++ ) { if ( t . getType ( i ) == DataType . TUPLE ) { Tuple inner = ( Tuple ) t . get ( i ) ; if ( inner . size ( ) > 0 ) { List < ByteBuffer > bindedVariables = bindedVariablesFromTuple ( inner ) ; if ( bindedVariables . size ( ) > 0 ) sendCqlQuery ( key , bindedVariables ) ; else throw new IOException ( "Missing binded variables" ) ; } } else { throw new IOException ( "Output type was not a tuple" ) ; } } }
send CQL query request using data from tuple
8,384
private List < ByteBuffer > bindedVariablesFromTuple ( Tuple t ) throws IOException { List < ByteBuffer > variables = new ArrayList < ByteBuffer > ( ) ; for ( int i = 0 ; i < t . size ( ) ; i ++ ) variables . add ( objToBB ( t . get ( i ) ) ) ; return variables ; }
compose a list of binded variables
8,385
private void sendCqlQuery ( Map < String , ByteBuffer > key , List < ByteBuffer > bindedVariables ) throws IOException { try { writer . write ( key , bindedVariables ) ; } catch ( InterruptedException e ) { throw new IOException ( e ) ; } }
writer write the data by executing CQL query
8,386
private String getWhereClauseForPartitionFilter ( ) { UDFContext context = UDFContext . getUDFContext ( ) ; Properties property = context . getUDFProperties ( AbstractCassandraStorage . class ) ; return property . getProperty ( PARTITION_FILTER_SIGNATURE ) ; }
retrieve where clause for partition filter
8,387
private BigInteger bigForBytes ( byte [ ] bytes , int sigbytes ) { byte [ ] b ; if ( sigbytes != bytes . length ) { b = new byte [ sigbytes ] ; System . arraycopy ( bytes , 0 , b , 0 , bytes . length ) ; } else b = bytes ; return new BigInteger ( 1 , b ) ; }
Convert a byte array containing the most significant of sigbytes bytes representing a big - endian magnitude into a BigInteger .
8,388
public static void shutdownNow ( ) { for ( Stage stage : Stage . values ( ) ) { StageManager . stages . get ( stage ) . shutdownNow ( ) ; } }
This method shuts down all registered stages .
8,389
public static Pair < BigInteger , Boolean > midpoint ( BigInteger left , BigInteger right , int sigbits ) { BigInteger midpoint ; boolean remainder ; if ( left . compareTo ( right ) < 0 ) { BigInteger sum = left . add ( right ) ; remainder = sum . testBit ( 0 ) ; midpoint = sum . shiftRight ( 1 ) ; } else { BigInteger max = TWO . pow ( sigbits ) ; BigInteger distance = max . add ( right ) . subtract ( left ) ; remainder = distance . testBit ( 0 ) ; midpoint = distance . shiftRight ( 1 ) . add ( left ) . mod ( max ) ; } return Pair . create ( midpoint , remainder ) ; }
Given two bit arrays represented as BigIntegers containing the given number of significant bits calculate a midpoint .
8,390
public static < T > T construct ( String classname , String readable ) throws ConfigurationException { Class < T > cls = FBUtilities . classForName ( classname , readable ) ; try { return cls . newInstance ( ) ; } catch ( IllegalAccessException e ) { throw new ConfigurationException ( String . format ( "Default constructor for %s class '%s' is inaccessible." , readable , classname ) ) ; } catch ( InstantiationException e ) { throw new ConfigurationException ( String . format ( "Cannot use abstract class '%s' as %s." , classname , readable ) ) ; } catch ( Exception e ) { if ( e . getCause ( ) instanceof ConfigurationException ) throw ( ConfigurationException ) e . getCause ( ) ; throw new ConfigurationException ( String . format ( "Error instantiating %s class '%s'." , readable , classname ) , e ) ; } }
Constructs an instance of the given class which must have a no - arg or default constructor .
8,391
public static void exec ( ProcessBuilder pb ) throws IOException { Process p = pb . start ( ) ; try { int errCode = p . waitFor ( ) ; if ( errCode != 0 ) { BufferedReader in = new BufferedReader ( new InputStreamReader ( p . getInputStream ( ) ) ) ; BufferedReader err = new BufferedReader ( new InputStreamReader ( p . getErrorStream ( ) ) ) ; StringBuilder sb = new StringBuilder ( ) ; String str ; while ( ( str = in . readLine ( ) ) != null ) sb . append ( str ) . append ( System . getProperty ( "line.separator" ) ) ; while ( ( str = err . readLine ( ) ) != null ) sb . append ( str ) . append ( System . getProperty ( "line.separator" ) ) ; throw new IOException ( "Exception while executing the command: " + StringUtils . join ( pb . command ( ) , " " ) + ", command error Code: " + errCode + ", command output: " + sb . toString ( ) ) ; } } catch ( InterruptedException e ) { throw new AssertionError ( e ) ; } }
Starts and waits for the given
8,392
public static void removeUnreadableSSTables ( File directory ) { for ( Keyspace keyspace : Keyspace . all ( ) ) { for ( ColumnFamilyStore baseCfs : keyspace . getColumnFamilyStores ( ) ) { for ( ColumnFamilyStore cfs : baseCfs . concatWithIndexes ( ) ) cfs . maybeRemoveUnreadableSSTables ( directory ) ; } } }
Removes every SSTable in the directory from the appropriate DataTracker s view .
8,393
public void snapshot ( String snapshotName , String columnFamilyName ) throws IOException { assert snapshotName != null ; boolean tookSnapShot = false ; for ( ColumnFamilyStore cfStore : columnFamilyStores . values ( ) ) { if ( columnFamilyName == null || cfStore . name . equals ( columnFamilyName ) ) { tookSnapShot = true ; cfStore . snapshot ( snapshotName ) ; } } if ( ( columnFamilyName != null ) && ! tookSnapShot ) throw new IOException ( "Failed taking snapshot. Column family " + columnFamilyName + " does not exist." ) ; }
Take a snapshot of the specific column family or the entire set of column families if columnFamily is null with a given timestamp
8,394
public boolean snapshotExists ( String snapshotName ) { assert snapshotName != null ; for ( ColumnFamilyStore cfStore : columnFamilyStores . values ( ) ) { if ( cfStore . snapshotExists ( snapshotName ) ) return true ; } return false ; }
Check whether snapshots already exists for a given name .
8,395
public static void clearSnapshot ( String snapshotName , String keyspace ) { List < File > snapshotDirs = Directories . getKSChildDirectories ( keyspace ) ; Directories . clearSnapshot ( snapshotName , snapshotDirs ) ; }
Clear all the snapshots for a given keyspace .
8,396
public void dropCf ( UUID cfId ) { assert columnFamilyStores . containsKey ( cfId ) ; ColumnFamilyStore cfs = columnFamilyStores . remove ( cfId ) ; if ( cfs == null ) return ; cfs . keyspace . writeOrder . awaitNewBarrier ( ) ; cfs . readOrdering . awaitNewBarrier ( ) ; unloadCf ( cfs ) ; }
best invoked on the compaction mananger .
8,397
public void initCf ( UUID cfId , String cfName , boolean loadSSTables ) { ColumnFamilyStore cfs = columnFamilyStores . get ( cfId ) ; if ( cfs == null ) { ColumnFamilyStore oldCfs = columnFamilyStores . putIfAbsent ( cfId , ColumnFamilyStore . createColumnFamilyStore ( this , cfName , loadSSTables ) ) ; if ( oldCfs != null ) throw new IllegalStateException ( "added multiple mappings for cf id " + cfId ) ; } else { assert cfs . name . equals ( cfName ) ; cfs . metadata . reload ( ) ; cfs . reload ( ) ; } }
adds a cf to internal structures ends up creating disk files ) .
8,398
public void apply ( Mutation mutation , boolean writeCommitLog , boolean updateIndexes ) { try ( OpOrder . Group opGroup = writeOrder . start ( ) ) { ReplayPosition replayPosition = null ; if ( writeCommitLog ) { Tracing . trace ( "Appending to commitlog" ) ; replayPosition = CommitLog . instance . add ( mutation ) ; } DecoratedKey key = StorageService . getPartitioner ( ) . decorateKey ( mutation . key ( ) ) ; for ( ColumnFamily cf : mutation . getColumnFamilies ( ) ) { ColumnFamilyStore cfs = columnFamilyStores . get ( cf . id ( ) ) ; if ( cfs == null ) { logger . error ( "Attempting to mutate non-existant column family {}" , cf . id ( ) ) ; continue ; } Tracing . trace ( "Adding to {} memtable" , cf . metadata ( ) . cfName ) ; SecondaryIndexManager . Updater updater = updateIndexes ? cfs . indexManager . updaterFor ( key , cf , opGroup ) : SecondaryIndexManager . nullUpdater ; cfs . apply ( key , cf , updater , opGroup , replayPosition ) ; } } }
This method appends a row to the global CommitLog then updates memtables and indexes .
8,399
public static Function . Factory factory ( final Function fun ) { return new Function . Factory ( ) { public Function create ( String ksName , String cfName ) { return fun ; } } ; }
Creates a trivial factory that always return the provided function .