idx
int64
0
165k
question
stringlengths
73
4.15k
target
stringlengths
5
918
len_question
int64
21
890
len_target
int64
3
255
147,700
protected void debugLog ( String operationType , Long receivedTimeInMs ) { long durationInMs = receivedTimeInMs - ( this . parsedRequestOriginTimeInMs ) ; int numVectorClockEntries = ( this . parsedVectorClock == null ? 0 : this . parsedVectorClock . getVersionMap ( ) . size ( ) ) ; logger . debug ( "Received a new request. Operation type: " + operationType + " , Key(s): " + keysHexString ( this . parsedKeys ) + " , Store: " + this . storeName + " , Origin time (in ms): " + ( this . parsedRequestOriginTimeInMs ) + " , Request received at time(in ms): " + receivedTimeInMs + " , Num vector clock entries: " + numVectorClockEntries + " , Duration from RESTClient to CoordinatorRestRequestValidator(in ms): " + durationInMs ) ; }
Prints a debug log message that details the time taken for the Http request to be parsed by the coordinator
198
22
147,701
@ Deprecated @ Override public File fetch ( String source , String dest , long diskQuotaSizeInKB ) throws Exception { return fetchFromSource ( source , dest , null , null , - 1 , diskQuotaSizeInKB , null ) ; }
Used for unit tests only .
54
6
147,702
public static void main ( String [ ] args ) throws Exception { if ( args . length < 1 ) Utils . croak ( "USAGE: java " + HdfsFetcher . class . getName ( ) + " url [keytab-location kerberos-username hadoop-config-path [destDir]]" ) ; String url = args [ 0 ] ; VoldemortConfig config = new VoldemortConfig ( - 1 , "" ) ; HdfsFetcher fetcher = new HdfsFetcher ( config ) ; String destDir = null ; Long diskQuotaSizeInKB ; if ( args . length >= 4 ) { fetcher . voldemortConfig . setReadOnlyKeytabPath ( args [ 1 ] ) ; fetcher . voldemortConfig . setReadOnlyKerberosUser ( args [ 2 ] ) ; fetcher . voldemortConfig . setHadoopConfigPath ( args [ 3 ] ) ; } if ( args . length >= 5 ) destDir = args [ 4 ] ; if ( args . length >= 6 ) diskQuotaSizeInKB = Long . parseLong ( args [ 5 ] ) ; else diskQuotaSizeInKB = null ; // for testing we want to be able to download a single file allowFetchingOfSingleFile = true ; FileSystem fs = HadoopUtils . getHadoopFileSystem ( fetcher . voldemortConfig , url ) ; Path p = new Path ( url ) ; FileStatus status = fs . listStatus ( p ) [ 0 ] ; long size = status . getLen ( ) ; long start = System . currentTimeMillis ( ) ; if ( destDir == null ) destDir = System . getProperty ( "java.io.tmpdir" ) + File . separator + start ; File location = fetcher . fetch ( url , destDir , null , null , - 1 , null , diskQuotaSizeInKB ) ; double rate = size * Time . MS_PER_SECOND / ( double ) ( System . currentTimeMillis ( ) - start ) ; NumberFormat nf = NumberFormat . getInstance ( ) ; nf . setMaximumFractionDigits ( 2 ) ; System . out . println ( "Fetch to " + location + " completed: " + nf . format ( rate / ( 1024.0 * 1024.0 ) ) + " MB/sec." ) ; fs . close ( ) ; }
Main method for testing fetching
518
6
147,703
public synchronized int getPartitionStoreMoves ( ) { int count = 0 ; for ( List < Integer > entry : storeToPartitionIds . values ( ) ) count += entry . size ( ) ; return count ; }
Total count of partition - stores moved in this task .
48
11
147,704
public synchronized int getPartitionStoreCount ( ) { int count = 0 ; for ( String store : storeToPartitionIds . keySet ( ) ) { count += storeToPartitionIds . get ( store ) . size ( ) ; } return count ; }
Returns the total count of partitions across all stores .
57
10
147,705
public static String taskListToString ( List < RebalanceTaskInfo > infos ) { StringBuffer sb = new StringBuffer ( ) ; for ( RebalanceTaskInfo info : infos ) { sb . append ( "\t" ) . append ( info . getDonorId ( ) ) . append ( " -> " ) . append ( info . getStealerId ( ) ) . append ( " : [" ) ; for ( String storeName : info . getPartitionStores ( ) ) { sb . append ( "{" ) . append ( storeName ) . append ( " : " ) . append ( info . getPartitionIds ( storeName ) ) . append ( "}" ) ; } sb . append ( "]" ) . append ( Utils . NEWLINE ) ; } return sb . toString ( ) ; }
Pretty prints a task list of rebalancing tasks .
180
11
147,706
@ Override public void map ( GenericData . Record record , AvroCollector < Pair < ByteBuffer , ByteBuffer > > collector , Reporter reporter ) throws IOException { byte [ ] keyBytes = null ; byte [ ] valBytes = null ; Object keyRecord = null ; Object valRecord = null ; try { keyRecord = record . get ( keyField ) ; valRecord = record . get ( valField ) ; keyBytes = keySerializer . toBytes ( keyRecord ) ; valBytes = valueSerializer . toBytes ( valRecord ) ; this . collectorWrapper . setCollector ( collector ) ; this . mapper . map ( keyBytes , valBytes , this . collectorWrapper ) ; recordCounter ++ ; } catch ( OutOfMemoryError oom ) { logger . error ( oomErrorMessage ( reporter ) ) ; if ( keyBytes == null ) { logger . error ( "keyRecord caused OOM!" ) ; } else { logger . error ( "keyRecord: " + keyRecord ) ; logger . error ( "valRecord: " + ( valBytes == null ? "caused OOM" : valRecord ) ) ; } throw new VoldemortException ( oomErrorMessage ( reporter ) , oom ) ; } }
Create the voldemort key and value from the input Avro record by extracting the key and value and map it out for each of the responsible voldemort nodes
263
31
147,707
public static Pointer mmap ( long len , int prot , int flags , int fildes , long off ) throws IOException { // we don't really have a need to change the recommended pointer. Pointer addr = new Pointer ( 0 ) ; Pointer result = Delegate . mmap ( addr , new NativeLong ( len ) , prot , flags , fildes , new NativeLong ( off ) ) ; if ( Pointer . nativeValue ( result ) == - 1 ) { if ( logger . isDebugEnabled ( ) ) logger . debug ( errno . strerror ( ) ) ; throw new IOException ( "mmap failed: " + errno . strerror ( ) ) ; } return result ; }
Map the given region of the given file descriptor into memory . Returns a Pointer to the newly mapped memory throws an IOException on error .
154
28
147,708
public static void mlock ( Pointer addr , long len ) { int res = Delegate . mlock ( addr , new NativeLong ( len ) ) ; if ( res != 0 ) { if ( logger . isDebugEnabled ( ) ) { logger . debug ( "Mlock failed probably because of insufficient privileges, errno:" + errno . strerror ( ) + ", return value:" + res ) ; } } else { if ( logger . isDebugEnabled ( ) ) logger . debug ( "Mlock successfull" ) ; } }
Lock the given region . Does not report failures .
114
10
147,709
public static void munlock ( Pointer addr , long len ) { if ( Delegate . munlock ( addr , new NativeLong ( len ) ) != 0 ) { if ( logger . isDebugEnabled ( ) ) logger . debug ( "munlocking failed with errno:" + errno . strerror ( ) ) ; } else { if ( logger . isDebugEnabled ( ) ) logger . debug ( "munlocking region" ) ; } }
Unlock the given region . Does not report failures .
94
11
147,710
public JsonTypeDefinition projectionType ( String ... properties ) { if ( this . getType ( ) instanceof Map < ? , ? > ) { Map < ? , ? > type = ( Map < ? , ? > ) getType ( ) ; Arrays . sort ( properties ) ; Map < String , Object > newType = new LinkedHashMap < String , Object > ( ) ; for ( String prop : properties ) newType . put ( prop , type . get ( prop ) ) ; return new JsonTypeDefinition ( newType ) ; } else { throw new IllegalArgumentException ( "Cannot take the projection of a type that is not a Map." ) ; } }
Get the type created by selecting only a subset of properties from this type . The type must be a map for this to work
144
25
147,711
private void writeBufferedValsToStorage ( ) { List < Versioned < byte [ ] > > obsoleteVals = storageEngine . multiVersionPut ( currBufferedKey , currBufferedVals ) ; // log Obsolete versions in debug mode if ( logger . isDebugEnabled ( ) && obsoleteVals . size ( ) > 0 ) { logger . debug ( "updateEntries (Streaming multi-version-put) rejected these versions as obsolete : " + StoreUtils . getVersions ( obsoleteVals ) + " for key " + currBufferedKey ) ; } currBufferedVals = new ArrayList < Versioned < byte [ ] > > ( VALS_BUFFER_EXPECTED_SIZE ) ; }
Persists the current set of versions buffered for the current key into storage using the multiVersionPut api
160
21
147,712
public synchronized boolean acquireRebalancingPermit ( int nodeId ) { boolean added = rebalancePermits . add ( nodeId ) ; logger . info ( "Acquiring rebalancing permit for node id " + nodeId + ", returned: " + added ) ; return added ; }
Acquire a permit for a particular node id so as to allow rebalancing
62
16
147,713
public synchronized void releaseRebalancingPermit ( int nodeId ) { boolean removed = rebalancePermits . remove ( nodeId ) ; logger . info ( "Releasing rebalancing permit for node id " + nodeId + ", returned: " + removed ) ; if ( ! removed ) throw new VoldemortException ( new IllegalStateException ( "Invalid state, must hold a " + "permit to release" ) ) ; }
Release the rebalancing permit for a particular node id
91
11
147,714
private void swapROStores ( List < String > swappedStoreNames , boolean useSwappedStoreNames ) { try { for ( StoreDefinition storeDef : metadataStore . getStoreDefList ( ) ) { // Only pick up the RO stores if ( storeDef . getType ( ) . compareTo ( ReadOnlyStorageConfiguration . TYPE_NAME ) == 0 ) { if ( useSwappedStoreNames && ! swappedStoreNames . contains ( storeDef . getName ( ) ) ) { continue ; } ReadOnlyStorageEngine engine = ( ReadOnlyStorageEngine ) storeRepository . getStorageEngine ( storeDef . getName ( ) ) ; if ( engine == null ) { throw new VoldemortException ( "Could not find storage engine for " + storeDef . getName ( ) + " to swap " ) ; } logger . info ( "Swapping RO store " + storeDef . getName ( ) ) ; // Time to swap this store - Could have used admin client, // but why incur the overhead? engine . swapFiles ( engine . getCurrentDirPath ( ) ) ; // Add to list of stores already swapped if ( ! useSwappedStoreNames ) swappedStoreNames . add ( storeDef . getName ( ) ) ; } } } catch ( Exception e ) { logger . error ( "Error while swapping RO store" ) ; throw new VoldemortException ( e ) ; } }
Goes through all the RO Stores in the plan and swaps it
288
13
147,715
private void changeClusterAndStores ( String clusterKey , final Cluster cluster , String storesKey , final List < StoreDefinition > storeDefs ) { metadataStore . writeLock . lock ( ) ; try { VectorClock updatedVectorClock = ( ( VectorClock ) metadataStore . get ( clusterKey , null ) . get ( 0 ) . getVersion ( ) ) . incremented ( metadataStore . getNodeId ( ) , System . currentTimeMillis ( ) ) ; metadataStore . put ( clusterKey , Versioned . value ( ( Object ) cluster , updatedVectorClock ) ) ; // now put new stores updatedVectorClock = ( ( VectorClock ) metadataStore . get ( storesKey , null ) . get ( 0 ) . getVersion ( ) ) . incremented ( metadataStore . getNodeId ( ) , System . currentTimeMillis ( ) ) ; metadataStore . put ( storesKey , Versioned . value ( ( Object ) storeDefs , updatedVectorClock ) ) ; } catch ( Exception e ) { logger . info ( "Error while changing cluster to " + cluster + "for key " + clusterKey ) ; throw new VoldemortException ( e ) ; } finally { metadataStore . writeLock . unlock ( ) ; } }
Updates the cluster and store metadata atomically
260
9
147,716
public int rebalanceNode ( final RebalanceTaskInfo stealInfo ) { final RebalanceTaskInfo info = metadataStore . getRebalancerState ( ) . find ( stealInfo . getDonorId ( ) ) ; // Do we have the plan in the state? if ( info == null ) { throw new VoldemortException ( "Could not find plan " + stealInfo + " in the server state on " + metadataStore . getNodeId ( ) ) ; } else if ( ! info . equals ( stealInfo ) ) { // If we do have the plan, is it the same throw new VoldemortException ( "The plan in server state " + info + " is not the same as the process passed " + stealInfo ) ; } else if ( ! acquireRebalancingPermit ( stealInfo . getDonorId ( ) ) ) { // Both are same, now try to acquire a lock for the donor node throw new AlreadyRebalancingException ( "Node " + metadataStore . getNodeId ( ) + " is already rebalancing from donor " + info . getDonorId ( ) + " with info " + info ) ; } // Acquired lock successfully, start rebalancing... int requestId = asyncService . getUniqueRequestId ( ) ; // Why do we pass 'info' instead of 'stealInfo'? So that we can change // the state as the stores finish rebalance asyncService . submitOperation ( requestId , new StealerBasedRebalanceAsyncOperation ( this , voldemortConfig , metadataStore , requestId , info ) ) ; return requestId ; }
This function is responsible for starting the actual async rebalance operation . This is run if this node is the stealer node
336
25
147,717
protected void prepForWrite ( SelectionKey selectionKey ) { if ( logger . isTraceEnabled ( ) ) traceInputBufferState ( "About to clear read buffer" ) ; if ( requestHandlerFactory . shareReadWriteBuffer ( ) == false ) { inputStream . clear ( ) ; } if ( logger . isTraceEnabled ( ) ) traceInputBufferState ( "Cleared read buffer" ) ; outputStream . getBuffer ( ) . flip ( ) ; selectionKey . interestOps ( SelectionKey . OP_WRITE ) ; }
Flips the output buffer and lets the Selector know we re ready to write .
113
17
147,718
private boolean initRequestHandler ( SelectionKey selectionKey ) { ByteBuffer inputBuffer = inputStream . getBuffer ( ) ; int remaining = inputBuffer . remaining ( ) ; // Don't have enough bytes to determine the protocol yet... if ( remaining < 3 ) return true ; byte [ ] protoBytes = { inputBuffer . get ( 0 ) , inputBuffer . get ( 1 ) , inputBuffer . get ( 2 ) } ; try { String proto = ByteUtils . getString ( protoBytes , "UTF-8" ) ; inputBuffer . clear ( ) ; RequestFormatType requestFormatType = RequestFormatType . fromCode ( proto ) ; requestHandler = requestHandlerFactory . getRequestHandler ( requestFormatType ) ; if ( logger . isInfoEnabled ( ) ) logger . info ( "Protocol negotiated for " + socketChannel . socket ( ) + ": " + requestFormatType . getDisplayName ( ) ) ; // The protocol negotiation is the first request, so respond by // sticking the bytes in the output buffer, signaling the Selector, // and returning false to denote no further processing is needed. outputStream . getBuffer ( ) . put ( ByteUtils . getBytes ( "ok" , "UTF-8" ) ) ; prepForWrite ( selectionKey ) ; return false ; } catch ( IllegalArgumentException e ) { // okay we got some nonsense. For backwards compatibility, // assume this is an old client who does not know how to negotiate RequestFormatType requestFormatType = RequestFormatType . VOLDEMORT_V0 ; requestHandler = requestHandlerFactory . getRequestHandler ( requestFormatType ) ; if ( logger . isInfoEnabled ( ) ) logger . info ( "No protocol proposal given for " + socketChannel . socket ( ) + ", assuming " + requestFormatType . getDisplayName ( ) ) ; return true ; } }
Returns true if the request should continue .
390
8
147,719
public void rememberAndDisableQuota ( ) { for ( Integer nodeId : nodeIds ) { boolean quotaEnforcement = Boolean . parseBoolean ( adminClient . metadataMgmtOps . getRemoteMetadata ( nodeId , MetadataStore . QUOTA_ENFORCEMENT_ENABLED_KEY ) . getValue ( ) ) ; mapNodeToQuotaEnforcingEnabled . put ( nodeId , quotaEnforcement ) ; } adminClient . metadataMgmtOps . updateRemoteMetadata ( nodeIds , MetadataStore . QUOTA_ENFORCEMENT_ENABLED_KEY , Boolean . toString ( false ) ) ; }
Before cluster management operations i . e . remember and disable quota enforcement settings
140
14
147,720
public void resetQuotaAndRecoverEnforcement ( ) { for ( Integer nodeId : nodeIds ) { boolean quotaEnforcement = mapNodeToQuotaEnforcingEnabled . get ( nodeId ) ; adminClient . metadataMgmtOps . updateRemoteMetadata ( Arrays . asList ( nodeId ) , MetadataStore . QUOTA_ENFORCEMENT_ENABLED_KEY , Boolean . toString ( quotaEnforcement ) ) ; } for ( String storeName : storeNames ) { adminClient . quotaMgmtOps . rebalanceQuota ( storeName ) ; } }
After cluster management operations i . e . reset quota and recover quota enforcement settings
129
15
147,721
public void incrementVersion ( int node , long time ) { if ( node < 0 || node > Short . MAX_VALUE ) throw new IllegalArgumentException ( node + " is outside the acceptable range of node ids." ) ; this . timestamp = time ; Long version = versionMap . get ( ( short ) node ) ; if ( version == null ) { version = 1L ; } else { version = version + 1L ; } versionMap . put ( ( short ) node , version ) ; if ( versionMap . size ( ) >= MAX_NUMBER_OF_VERSIONS ) { throw new IllegalStateException ( "Vector clock is full!" ) ; } }
Increment the version info associated with the given node
140
10
147,722
public VectorClock incremented ( int nodeId , long time ) { VectorClock copyClock = this . clone ( ) ; copyClock . incrementVersion ( nodeId , time ) ; return copyClock ; }
Get new vector clock based on this clock but incremented on index nodeId
42
15
147,723
private Map < Integer , Integer > getNodeIdToPrimaryCount ( Cluster cluster ) { Map < Integer , Integer > nodeIdToPrimaryCount = Maps . newHashMap ( ) ; for ( Node node : cluster . getNodes ( ) ) { nodeIdToPrimaryCount . put ( node . getId ( ) , node . getPartitionIds ( ) . size ( ) ) ; } return nodeIdToPrimaryCount ; }
Go through all nodes and determine how many partition Ids each node hosts .
91
15
147,724
private Map < Integer , Integer > getNodeIdToZonePrimaryCount ( Cluster cluster , StoreRoutingPlan storeRoutingPlan ) { Map < Integer , Integer > nodeIdToZonePrimaryCount = Maps . newHashMap ( ) ; for ( Integer nodeId : cluster . getNodeIds ( ) ) { nodeIdToZonePrimaryCount . put ( nodeId , storeRoutingPlan . getZonePrimaryPartitionIds ( nodeId ) . size ( ) ) ; } return nodeIdToZonePrimaryCount ; }
Go through all partition IDs and determine which node is first in the replicating node list for every zone . This determines the number of zone primaries each node hosts .
109
32
147,725
private Map < Integer , Integer > getNodeIdToNaryCount ( Cluster cluster , StoreRoutingPlan storeRoutingPlan ) { Map < Integer , Integer > nodeIdToNaryCount = Maps . newHashMap ( ) ; for ( int nodeId : cluster . getNodeIds ( ) ) { nodeIdToNaryCount . put ( nodeId , storeRoutingPlan . getZoneNAryPartitionIds ( nodeId ) . size ( ) ) ; } return nodeIdToNaryCount ; }
Go through all node IDs and determine which node
110
9
147,726
private String dumpZoneNAryDetails ( StoreRoutingPlan storeRoutingPlan ) { StringBuilder sb = new StringBuilder ( ) ; sb . append ( "\tDetailed Dump (Zone N-Aries):" ) . append ( Utils . NEWLINE ) ; for ( Node node : storeRoutingPlan . getCluster ( ) . getNodes ( ) ) { int zoneId = node . getZoneId ( ) ; int nodeId = node . getId ( ) ; sb . append ( "\tNode ID: " + nodeId + " in zone " + zoneId ) . append ( Utils . NEWLINE ) ; List < Integer > naries = storeRoutingPlan . getZoneNAryPartitionIds ( nodeId ) ; Map < Integer , List < Integer > > zoneNaryTypeToPartitionIds = new HashMap < Integer , List < Integer > > ( ) ; for ( int nary : naries ) { int zoneReplicaType = storeRoutingPlan . getZoneNaryForNodesPartition ( zoneId , nodeId , nary ) ; if ( ! zoneNaryTypeToPartitionIds . containsKey ( zoneReplicaType ) ) { zoneNaryTypeToPartitionIds . put ( zoneReplicaType , new ArrayList < Integer > ( ) ) ; } zoneNaryTypeToPartitionIds . get ( zoneReplicaType ) . add ( nary ) ; } for ( int replicaType : new TreeSet < Integer > ( zoneNaryTypeToPartitionIds . keySet ( ) ) ) { sb . append ( "\t\t" + replicaType + " : " ) ; sb . append ( zoneNaryTypeToPartitionIds . get ( replicaType ) . toString ( ) ) ; sb . append ( Utils . NEWLINE ) ; } } return sb . toString ( ) ; }
Dumps the partition IDs per node in terms of zone n - ary type .
412
17
147,727
private Pair < Double , String > summarizeBalance ( final Map < Integer , Integer > nodeIdToPartitionCount , String title ) { StringBuilder builder = new StringBuilder ( ) ; builder . append ( "\n" + title + "\n" ) ; Map < Integer , ZoneBalanceStats > zoneToBalanceStats = new HashMap < Integer , ZoneBalanceStats > ( ) ; for ( Integer zoneId : cluster . getZoneIds ( ) ) { zoneToBalanceStats . put ( zoneId , new ZoneBalanceStats ( ) ) ; } for ( Node node : cluster . getNodes ( ) ) { int curCount = nodeIdToPartitionCount . get ( node . getId ( ) ) ; builder . append ( "\tNode ID: " + node . getId ( ) + " : " + curCount + " (" + node . getHost ( ) + ")\n" ) ; zoneToBalanceStats . get ( node . getZoneId ( ) ) . addPartitions ( curCount ) ; } // double utilityToBeMinimized = Double.MIN_VALUE; double utilityToBeMinimized = 0 ; for ( Integer zoneId : cluster . getZoneIds ( ) ) { builder . append ( "Zone " + zoneId + "\n" ) ; builder . append ( zoneToBalanceStats . get ( zoneId ) . dumpStats ( ) ) ; utilityToBeMinimized += zoneToBalanceStats . get ( zoneId ) . getUtility ( ) ; /*- * Another utility function to consider if(zoneToBalanceStats.get(zoneId).getMaxMinRatio() > utilityToBeMinimized) { utilityToBeMinimized = zoneToBalanceStats.get(zoneId).getUtility(); } */ } return Pair . create ( utilityToBeMinimized , builder . toString ( ) ) ; }
Summarizes balance for the given nodeId to PartitionCount .
397
14
147,728
private void rebalanceStore ( String storeName , final AdminClient adminClient , RebalanceTaskInfo stealInfo , boolean isReadOnlyStore ) { // Move partitions if ( stealInfo . getPartitionIds ( storeName ) != null && stealInfo . getPartitionIds ( storeName ) . size ( ) > 0 ) { logger . info ( getHeader ( stealInfo ) + "Starting partitions migration for store " + storeName + " from donor node " + stealInfo . getDonorId ( ) ) ; int asyncId = adminClient . storeMntOps . migratePartitions ( stealInfo . getDonorId ( ) , metadataStore . getNodeId ( ) , storeName , stealInfo . getPartitionIds ( storeName ) , null , stealInfo . getInitialCluster ( ) ) ; rebalanceStatusList . add ( asyncId ) ; if ( logger . isDebugEnabled ( ) ) { logger . debug ( getHeader ( stealInfo ) + "Waiting for completion for " + storeName + " with async id " + asyncId ) ; } adminClient . rpcOps . waitForCompletion ( metadataStore . getNodeId ( ) , asyncId , voldemortConfig . getRebalancingTimeoutSec ( ) , TimeUnit . SECONDS , getStatus ( ) ) ; rebalanceStatusList . remove ( ( Object ) asyncId ) ; logger . info ( getHeader ( stealInfo ) + "Completed partition migration for store " + storeName + " from donor node " + stealInfo . getDonorId ( ) ) ; } logger . info ( getHeader ( stealInfo ) + "Finished all migration for store " + storeName ) ; }
Blocking function which completes the migration of one store
360
10
147,729
public void recordSyncOpTimeNs ( SocketDestination dest , long opTimeNs ) { if ( dest != null ) { getOrCreateNodeStats ( dest ) . recordSyncOpTimeNs ( null , opTimeNs ) ; recordSyncOpTimeNs ( null , opTimeNs ) ; } else { this . syncOpTimeRequestCounter . addRequest ( opTimeNs ) ; } }
Record operation for sync ops time
82
6
147,730
public void recordAsyncOpTimeNs ( SocketDestination dest , long opTimeNs ) { if ( dest != null ) { getOrCreateNodeStats ( dest ) . recordAsyncOpTimeNs ( null , opTimeNs ) ; recordAsyncOpTimeNs ( null , opTimeNs ) ; } else { this . asynOpTimeRequestCounter . addRequest ( opTimeNs ) ; } }
Record operation for async ops time
83
6
147,731
public void recordConnectionEstablishmentTimeUs ( SocketDestination dest , long connEstTimeUs ) { if ( dest != null ) { getOrCreateNodeStats ( dest ) . recordConnectionEstablishmentTimeUs ( null , connEstTimeUs ) ; recordConnectionEstablishmentTimeUs ( null , connEstTimeUs ) ; } else { this . connectionEstablishmentRequestCounter . addRequest ( connEstTimeUs * Time . NS_PER_US ) ; } }
Record the connection establishment time
97
5
147,732
public void recordCheckoutTimeUs ( SocketDestination dest , long checkoutTimeUs ) { if ( dest != null ) { getOrCreateNodeStats ( dest ) . recordCheckoutTimeUs ( null , checkoutTimeUs ) ; recordCheckoutTimeUs ( null , checkoutTimeUs ) ; } else { this . checkoutTimeRequestCounter . addRequest ( checkoutTimeUs * Time . NS_PER_US ) ; } }
Record the checkout wait time in us
89
7
147,733
public void recordCheckoutQueueLength ( SocketDestination dest , int queueLength ) { if ( dest != null ) { getOrCreateNodeStats ( dest ) . recordCheckoutQueueLength ( null , queueLength ) ; recordCheckoutQueueLength ( null , queueLength ) ; } else { this . checkoutQueueLengthHistogram . insert ( queueLength ) ; checkMonitoringInterval ( ) ; } }
Record the checkout queue length
85
5
147,734
public void recordResourceRequestTimeUs ( SocketDestination dest , long resourceRequestTimeUs ) { if ( dest != null ) { getOrCreateNodeStats ( dest ) . recordResourceRequestTimeUs ( null , resourceRequestTimeUs ) ; recordResourceRequestTimeUs ( null , resourceRequestTimeUs ) ; } else { this . resourceRequestTimeRequestCounter . addRequest ( resourceRequestTimeUs * Time . NS_PER_US ) ; } }
Record the resource request wait time in us
94
8
147,735
public void recordResourceRequestQueueLength ( SocketDestination dest , int queueLength ) { if ( dest != null ) { getOrCreateNodeStats ( dest ) . recordResourceRequestQueueLength ( null , queueLength ) ; recordResourceRequestQueueLength ( null , queueLength ) ; } else { this . resourceRequestQueueLengthHistogram . insert ( queueLength ) ; checkMonitoringInterval ( ) ; } }
Record the resource request queue length
86
6
147,736
public void close ( ) { Iterator < SocketDestination > it = getStatsMap ( ) . keySet ( ) . iterator ( ) ; while ( it . hasNext ( ) ) { try { SocketDestination destination = it . next ( ) ; JmxUtils . unregisterMbean ( JmxUtils . createObjectName ( JmxUtils . getPackageName ( ClientRequestExecutor . class ) , "stats_" + destination . toString ( ) . replace ( ' ' , ' ' ) + identifierString ) ) ; } catch ( Exception e ) { } } }
Unregister all MBeans
125
6
147,737
private < T > T request ( ClientRequest < T > delegate , String operationName ) { long startTimeMs = - 1 ; long startTimeNs = - 1 ; if ( logger . isDebugEnabled ( ) ) { startTimeMs = System . currentTimeMillis ( ) ; } ClientRequestExecutor clientRequestExecutor = pool . checkout ( destination ) ; String debugMsgStr = "" ; startTimeNs = System . nanoTime ( ) ; BlockingClientRequest < T > blockingClientRequest = null ; try { blockingClientRequest = new BlockingClientRequest < T > ( delegate , timeoutMs ) ; clientRequestExecutor . addClientRequest ( blockingClientRequest , timeoutMs , System . nanoTime ( ) - startTimeNs ) ; boolean awaitResult = blockingClientRequest . await ( ) ; if ( awaitResult == false ) { blockingClientRequest . timeOut ( ) ; } if ( logger . isDebugEnabled ( ) ) debugMsgStr += "success" ; return blockingClientRequest . getResult ( ) ; } catch ( InterruptedException e ) { if ( logger . isDebugEnabled ( ) ) debugMsgStr += "unreachable: " + e . getMessage ( ) ; throw new UnreachableStoreException ( "Failure in " + operationName + " on " + destination + ": " + e . getMessage ( ) , e ) ; } catch ( UnreachableStoreException e ) { clientRequestExecutor . close ( ) ; if ( logger . isDebugEnabled ( ) ) debugMsgStr += "failure: " + e . getMessage ( ) ; throw new UnreachableStoreException ( "Failure in " + operationName + " on " + destination + ": " + e . getMessage ( ) , e . getCause ( ) ) ; } finally { if ( blockingClientRequest != null && ! blockingClientRequest . isComplete ( ) ) { // close the executor if we timed out clientRequestExecutor . close ( ) ; } // Record operation time long opTimeNs = Utils . elapsedTimeNs ( startTimeNs , System . nanoTime ( ) ) ; if ( stats != null ) { stats . recordSyncOpTimeNs ( destination , opTimeNs ) ; } if ( logger . isDebugEnabled ( ) ) { logger . debug ( "Sync request end, type: " + operationName + " requestRef: " + System . identityHashCode ( delegate ) + " totalTimeNs: " + opTimeNs + " start time: " + startTimeMs + " end time: " + System . currentTimeMillis ( ) + " client:" + clientRequestExecutor . getSocketChannel ( ) . socket ( ) . getLocalAddress ( ) + ":" + clientRequestExecutor . getSocketChannel ( ) . socket ( ) . getLocalPort ( ) + " server: " + clientRequestExecutor . getSocketChannel ( ) . socket ( ) . getRemoteSocketAddress ( ) + " outcome: " + debugMsgStr ) ; } pool . checkin ( destination , clientRequestExecutor ) ; } }
This method handles submitting and then waiting for the request from the server . It uses the ClientRequest API to actually write the request and then read back the response . This implementation will block for a response from the server .
647
43
147,738
private < T > void requestAsync ( ClientRequest < T > delegate , NonblockingStoreCallback callback , long timeoutMs , String operationName ) { pool . submitAsync ( this . destination , delegate , callback , timeoutMs , operationName ) ; }
This method handles submitting and then waiting for the request from the server . It uses the ClientRequest API to actually write the request and then read back the response . This implementation will not block for a response from the server .
51
44
147,739
@ JmxGetter ( name = "avgFetchKeysNetworkTimeMs" , description = "average time spent on network, for fetch keys" ) public double getAvgFetchKeysNetworkTimeMs ( ) { return networkTimeCounterMap . get ( Operation . FETCH_KEYS ) . getAvgEventValue ( ) / Time . NS_PER_MS ; }
Mbeans for FETCH_KEYS
80
9
147,740
@ JmxGetter ( name = "avgFetchEntriesNetworkTimeMs" , description = "average time spent on network, for streaming operations" ) public double getAvgFetchEntriesNetworkTimeMs ( ) { return networkTimeCounterMap . get ( Operation . FETCH_ENTRIES ) . getAvgEventValue ( ) / Time . NS_PER_MS ; }
Mbeans for FETCH_ENTRIES
83
10
147,741
@ JmxGetter ( name = "avgUpdateEntriesNetworkTimeMs" , description = "average time spent on network, for streaming operations" ) public double getAvgUpdateEntriesNetworkTimeMs ( ) { return networkTimeCounterMap . get ( Operation . UPDATE_ENTRIES ) . getAvgEventValue ( ) / Time . NS_PER_MS ; }
Mbeans for UPDATE_ENTRIES
79
8
147,742
@ JmxGetter ( name = "avgSlopUpdateNetworkTimeMs" , description = "average time spent on network, for streaming operations" ) public double getAvgSlopUpdateNetworkTimeMs ( ) { return networkTimeCounterMap . get ( Operation . SLOP_UPDATE ) . getAvgEventValue ( ) / Time . NS_PER_MS ; }
Mbeans for SLOP_UPDATE
78
7
147,743
public static String getJavaClassFromSchemaInfo ( String schemaInfo ) { final String ONLY_JAVA_CLIENTS_SUPPORTED = "Only Java clients are supported currently, so the format of the schema-info should be: <schema-info>java=foo.Bar</schema-info> where foo.Bar is the fully qualified name of the message." ; if ( StringUtils . isEmpty ( schemaInfo ) ) throw new IllegalArgumentException ( "This serializer requires a non-empty schema-info." ) ; String [ ] languagePairs = StringUtils . split ( schemaInfo , ' ' ) ; if ( languagePairs . length > 1 ) throw new IllegalArgumentException ( ONLY_JAVA_CLIENTS_SUPPORTED ) ; String [ ] javaPair = StringUtils . split ( languagePairs [ 0 ] , ' ' ) ; if ( javaPair . length != 2 || ! javaPair [ 0 ] . trim ( ) . equals ( "java" ) ) throw new IllegalArgumentException ( ONLY_JAVA_CLIENTS_SUPPORTED ) ; return javaPair [ 1 ] . trim ( ) ; }
Extracts the java class name from the schema info
257
11
147,744
public static List < StoreDefinition > filterStores ( List < StoreDefinition > storeDefs , final boolean isReadOnly ) { List < StoreDefinition > filteredStores = Lists . newArrayList ( ) ; for ( StoreDefinition storeDef : storeDefs ) { if ( storeDef . getType ( ) . equals ( ReadOnlyStorageConfiguration . TYPE_NAME ) == isReadOnly ) { filteredStores . add ( storeDef ) ; } } return filteredStores ; }
Given a list of store definitions filters the list depending on the boolean
101
13
147,745
public static List < String > getStoreNames ( List < StoreDefinition > storeDefList ) { List < String > storeList = new ArrayList < String > ( ) ; for ( StoreDefinition def : storeDefList ) { storeList . add ( def . getName ( ) ) ; } return storeList ; }
Given a list of store definitions return a list of store names
66
12
147,746
public static Set < String > getStoreNamesSet ( List < StoreDefinition > storeDefList ) { HashSet < String > storeSet = new HashSet < String > ( ) ; for ( StoreDefinition def : storeDefList ) { storeSet . add ( def . getName ( ) ) ; } return storeSet ; }
Given a list of store definitions return a set of store names
68
12
147,747
public static HashMap < StoreDefinition , Integer > getUniqueStoreDefinitionsWithCounts ( List < StoreDefinition > storeDefs ) { HashMap < StoreDefinition , Integer > uniqueStoreDefs = Maps . newHashMap ( ) ; for ( StoreDefinition storeDef : storeDefs ) { if ( uniqueStoreDefs . isEmpty ( ) ) { uniqueStoreDefs . put ( storeDef , 1 ) ; } else { StoreDefinition sameStore = null ; // Go over all the other stores to find if this is unique for ( StoreDefinition uniqueStoreDef : uniqueStoreDefs . keySet ( ) ) { if ( uniqueStoreDef . getReplicationFactor ( ) == storeDef . getReplicationFactor ( ) && uniqueStoreDef . getRoutingStrategyType ( ) . compareTo ( storeDef . getRoutingStrategyType ( ) ) == 0 ) { // Further check for the zone routing case if ( uniqueStoreDef . getRoutingStrategyType ( ) . compareTo ( RoutingStrategyType . ZONE_STRATEGY ) == 0 ) { boolean zonesSame = true ; for ( int zoneId : uniqueStoreDef . getZoneReplicationFactor ( ) . keySet ( ) ) { if ( storeDef . getZoneReplicationFactor ( ) . get ( zoneId ) == null || storeDef . getZoneReplicationFactor ( ) . get ( zoneId ) != uniqueStoreDef . getZoneReplicationFactor ( ) . get ( zoneId ) ) { zonesSame = false ; break ; } } if ( zonesSame ) { sameStore = uniqueStoreDef ; } } else { sameStore = uniqueStoreDef ; } if ( sameStore != null ) { // Bump up the count int currentCount = uniqueStoreDefs . get ( sameStore ) ; uniqueStoreDefs . put ( sameStore , currentCount + 1 ) ; break ; } } } if ( sameStore == null ) { // New store uniqueStoreDefs . put ( storeDef , 1 ) ; } } } return uniqueStoreDefs ; }
Given a list of store definitions find out and return a map of similar store definitions + count of them
433
20
147,748
public static boolean isAvroSchema ( String serializerName ) { if ( serializerName . equals ( AVRO_GENERIC_VERSIONED_TYPE_NAME ) || serializerName . equals ( AVRO_GENERIC_TYPE_NAME ) || serializerName . equals ( AVRO_REFLECTIVE_TYPE_NAME ) || serializerName . equals ( AVRO_SPECIFIC_TYPE_NAME ) ) { return true ; } else { return false ; } }
Determine whether or not a given serializedr is AVRO based
107
16
147,749
private static void validateIfAvroSchema ( SerializerDefinition serializerDef ) { if ( serializerDef . getName ( ) . equals ( AVRO_GENERIC_VERSIONED_TYPE_NAME ) || serializerDef . getName ( ) . equals ( AVRO_GENERIC_TYPE_NAME ) ) { SchemaEvolutionValidator . validateAllAvroSchemas ( serializerDef ) ; // check backwards compatibility if needed if ( serializerDef . getName ( ) . equals ( AVRO_GENERIC_VERSIONED_TYPE_NAME ) ) { SchemaEvolutionValidator . checkSchemaCompatibility ( serializerDef ) ; } } }
If provided with an AVRO schema validates it and checks if there are backwards compatible .
148
19
147,750
public synchronized void insert ( long data ) { resetIfNeeded ( ) ; long index = 0 ; if ( data >= this . upperBound ) { index = nBuckets - 1 ; } else if ( data < 0 ) { logger . error ( data + " can't be bucketed because it is negative!" ) ; return ; } else { index = data / step ; } if ( index < 0 || index >= nBuckets ) { // This should be dead code. Defending against code changes in // future. logger . error ( data + " can't be bucketed because index is not in range [0,nBuckets)." ) ; return ; } buckets [ ( int ) index ] ++ ; sum += data ; size ++ ; }
Insert a value into the right bucket of the histogram . If the value is larger than any bound insert into the last bucket . If the value is less than zero then ignore it .
158
37
147,751
private void checkAndAddNodeStore ( ) { for ( Node node : metadata . getCluster ( ) . getNodes ( ) ) { if ( ! routedStore . getInnerStores ( ) . containsKey ( node . getId ( ) ) ) { if ( ! storeRepository . hasNodeStore ( getName ( ) , node . getId ( ) ) ) { storeRepository . addNodeStore ( node . getId ( ) , createNodeStore ( node ) ) ; } routedStore . getInnerStores ( ) . put ( node . getId ( ) , storeRepository . getNodeStore ( getName ( ) , node . getId ( ) ) ) ; } } }
Check that all nodes in the new cluster have a corresponding entry in storeRepository and innerStores . add a NodeStore if not present is needed as with rebalancing we can add new nodes on the fly .
150
44
147,752
public ResourcePoolConfig setTimeout ( long timeout , TimeUnit unit ) { if ( timeout < 0 ) throw new IllegalArgumentException ( "The timeout must be a non-negative number." ) ; this . timeoutNs = TimeUnit . NANOSECONDS . convert ( timeout , unit ) ; return this ; }
The timeout which we block for when a resource is not available
67
12
147,753
private byte [ ] assembleValues ( List < Versioned < byte [ ] > > values ) throws IOException { ByteArrayOutputStream stream = new ByteArrayOutputStream ( ) ; DataOutputStream dataStream = new DataOutputStream ( stream ) ; for ( Versioned < byte [ ] > value : values ) { byte [ ] object = value . getValue ( ) ; dataStream . writeInt ( object . length ) ; dataStream . write ( object ) ; VectorClock clock = ( VectorClock ) value . getVersion ( ) ; dataStream . writeInt ( clock . sizeInBytes ( ) ) ; dataStream . write ( clock . toBytes ( ) ) ; } return stream . toByteArray ( ) ; }
Store the versioned values
150
5
147,754
private List < Versioned < byte [ ] > > disassembleValues ( byte [ ] values ) throws IOException { if ( values == null ) return new ArrayList < Versioned < byte [ ] > > ( 0 ) ; List < Versioned < byte [ ] > > returnList = new ArrayList < Versioned < byte [ ] > > ( ) ; ByteArrayInputStream stream = new ByteArrayInputStream ( values ) ; DataInputStream dataStream = new DataInputStream ( stream ) ; while ( dataStream . available ( ) > 0 ) { byte [ ] object = new byte [ dataStream . readInt ( ) ] ; dataStream . read ( object ) ; byte [ ] clockBytes = new byte [ dataStream . readInt ( ) ] ; dataStream . read ( clockBytes ) ; VectorClock clock = new VectorClock ( clockBytes ) ; returnList . add ( new Versioned < byte [ ] > ( object , clock ) ) ; } return returnList ; }
Splits up value into multiple versioned values
207
9
147,755
protected void statusInfoMessage ( final String tag ) { if ( logger . isInfoEnabled ( ) ) { logger . info ( tag + " : [partition: " + currentPartition + ", partitionFetched: " + currentPartitionFetched + "] for store " + storageEngine . getName ( ) ) ; } }
Simple info message for status
72
5
147,756
private int slopSize ( Versioned < Slop > slopVersioned ) { int nBytes = 0 ; Slop slop = slopVersioned . getValue ( ) ; nBytes += slop . getKey ( ) . length ( ) ; nBytes += ( ( VectorClock ) slopVersioned . getVersion ( ) ) . sizeInBytes ( ) ; switch ( slop . getOperation ( ) ) { case PUT : { nBytes += slop . getValue ( ) . length ; break ; } case DELETE : { break ; } default : logger . error ( "Unknown slop operation: " + slop . getOperation ( ) ) ; } return nBytes ; }
Returns the approximate size of slop to help in throttling
149
12
147,757
@ Override public < K , V > StoreClient < K , V > getStoreClient ( final String storeName , final InconsistencyResolver < Versioned < V > > resolver ) { // wrap it in LazyStoreClient here so any direct calls to this method // returns a lazy client return new LazyStoreClient < K , V > ( new Callable < StoreClient < K , V > > ( ) { @ Override public StoreClient < K , V > call ( ) throws Exception { Store < K , V , Object > clientStore = getRawStore ( storeName , resolver ) ; return new RESTClient < K , V > ( storeName , clientStore ) ; } } , true ) ; }
Creates a REST client used to perform Voldemort operations against the Coordinator
154
13
147,758
private static int abs ( int a ) { if ( a >= 0 ) return a ; else if ( a != Integer . MIN_VALUE ) return - a ; return Integer . MAX_VALUE ; }
A modified version of abs that always returns a non - negative value . Math . abs returns Integer . MIN_VALUE if a == Integer . MIN_VALUE and this method returns Integer . MAX_VALUE in that case .
41
44
147,759
@ Override public Integer getMasterPartition ( byte [ ] key ) { return abs ( hash . hash ( key ) ) % ( Math . max ( 1 , this . partitionToNode . length ) ) ; }
Obtain the master partition for a given key
45
9
147,760
protected boolean isSlopDead ( Cluster cluster , Set < String > storeNames , Slop slop ) { // destination node , no longer exists if ( ! cluster . getNodeIds ( ) . contains ( slop . getNodeId ( ) ) ) { return true ; } // destination store, no longer exists if ( ! storeNames . contains ( slop . getStoreName ( ) ) ) { return true ; } // else. slop is alive return false ; }
A slop is dead if the destination node or the store does not exist anymore on the cluster .
100
20
147,761
protected void handleDeadSlop ( SlopStorageEngine slopStorageEngine , Pair < ByteArray , Versioned < Slop > > keyAndVal ) { Versioned < Slop > versioned = keyAndVal . getSecond ( ) ; // If configured to delete the dead slop if ( voldemortConfig . getAutoPurgeDeadSlops ( ) ) { slopStorageEngine . delete ( keyAndVal . getFirst ( ) , versioned . getVersion ( ) ) ; if ( getLogger ( ) . isDebugEnabled ( ) ) { getLogger ( ) . debug ( "Auto purging dead slop :" + versioned . getValue ( ) ) ; } } else { // Keep ignoring the dead slops if ( getLogger ( ) . isDebugEnabled ( ) ) { getLogger ( ) . debug ( "Ignoring dead slop :" + versioned . getValue ( ) ) ; } } }
Handle slop for nodes that are no longer part of the cluster . It may not always be the case . For example shrinking a zone or deleting a store .
199
32
147,762
@ Override public void destroy ( SocketDestination dest , ClientRequestExecutor clientRequestExecutor ) throws Exception { clientRequestExecutor . close ( ) ; int numDestroyed = destroyed . incrementAndGet ( ) ; if ( stats != null ) { stats . incrementCount ( dest , ClientSocketStats . Tracked . CONNECTION_DESTROYED_EVENT ) ; } if ( logger . isDebugEnabled ( ) ) logger . debug ( "Destroyed socket " + numDestroyed + " connection to " + dest . getHost ( ) + ":" + dest . getPort ( ) ) ; }
Close the ClientRequestExecutor .
129
7
147,763
@ SuppressWarnings ( "unchecked" ) public static Properties readSingleClientConfigAvro ( String configAvro ) { Properties props = new Properties ( ) ; try { JsonDecoder decoder = new JsonDecoder ( CLIENT_CONFIG_AVRO_SCHEMA , configAvro ) ; GenericDatumReader < Object > datumReader = new GenericDatumReader < Object > ( CLIENT_CONFIG_AVRO_SCHEMA ) ; Map < Utf8 , Utf8 > flowMap = ( Map < Utf8 , Utf8 > ) datumReader . read ( null , decoder ) ; for ( Utf8 key : flowMap . keySet ( ) ) { props . put ( key . toString ( ) , flowMap . get ( key ) . toString ( ) ) ; } } catch ( Exception e ) { e . printStackTrace ( ) ; } return props ; }
Parses a string that contains single fat client config string in avro format
201
16
147,764
@ SuppressWarnings ( "unchecked" ) public static Map < String , Properties > readMultipleClientConfigAvro ( String configAvro ) { Map < String , Properties > mapStoreToProps = Maps . newHashMap ( ) ; try { JsonDecoder decoder = new JsonDecoder ( CLIENT_CONFIGS_AVRO_SCHEMA , configAvro ) ; GenericDatumReader < Object > datumReader = new GenericDatumReader < Object > ( CLIENT_CONFIGS_AVRO_SCHEMA ) ; Map < Utf8 , Map < Utf8 , Utf8 > > storeConfigs = ( Map < Utf8 , Map < Utf8 , Utf8 > > ) datumReader . read ( null , decoder ) ; // Store config props to return back for ( Utf8 storeName : storeConfigs . keySet ( ) ) { Properties props = new Properties ( ) ; Map < Utf8 , Utf8 > singleConfig = storeConfigs . get ( storeName ) ; for ( Utf8 key : singleConfig . keySet ( ) ) { props . put ( key . toString ( ) , singleConfig . get ( key ) . toString ( ) ) ; } if ( storeName == null || storeName . length ( ) == 0 ) { throw new Exception ( "Invalid store name found!" ) ; } mapStoreToProps . put ( storeName . toString ( ) , props ) ; } } catch ( Exception e ) { e . printStackTrace ( ) ; } return mapStoreToProps ; }
Parses a string that contains multiple fat client configs in avro format
345
16
147,765
public static String writeSingleClientConfigAvro ( Properties props ) { // TODO: Use a dedicated json lib. We shouldn't be manually manipulating json... String avroConfig = "" ; Boolean firstProp = true ; for ( String key : props . stringPropertyNames ( ) ) { if ( firstProp ) { firstProp = false ; } else { avroConfig = avroConfig + ",\n" ; } avroConfig = avroConfig + "\t\t\"" + key + "\": \"" + props . getProperty ( key ) + "\"" ; } if ( avroConfig . isEmpty ( ) ) { return "{}" ; } else { return "{\n" + avroConfig + "\n\t}" ; } }
Assembles an avro format string of single store config from store properties
159
15
147,766
public static String writeMultipleClientConfigAvro ( Map < String , Properties > mapStoreToProps ) { // TODO: Use a dedicated json lib. We shouldn't be manually manipulating json... String avroConfig = "" ; Boolean firstStore = true ; for ( String storeName : mapStoreToProps . keySet ( ) ) { if ( firstStore ) { firstStore = false ; } else { avroConfig = avroConfig + ",\n" ; } Properties props = mapStoreToProps . get ( storeName ) ; avroConfig = avroConfig + "\t\"" + storeName + "\": " + writeSingleClientConfigAvro ( props ) ; } return "{\n" + avroConfig + "\n}" ; }
Assembles an avro format string that contains multiple fat client configs from map of store to properties
161
21
147,767
public static Boolean compareSingleClientConfigAvro ( String configAvro1 , String configAvro2 ) { Properties props1 = readSingleClientConfigAvro ( configAvro1 ) ; Properties props2 = readSingleClientConfigAvro ( configAvro2 ) ; if ( props1 . equals ( props2 ) ) { return true ; } else { return false ; } }
Compares two avro strings which contains single store configs
80
12
147,768
public static Boolean compareMultipleClientConfigAvro ( String configAvro1 , String configAvro2 ) { Map < String , Properties > mapStoreToProps1 = readMultipleClientConfigAvro ( configAvro1 ) ; Map < String , Properties > mapStoreToProps2 = readMultipleClientConfigAvro ( configAvro2 ) ; Set < String > keySet1 = mapStoreToProps1 . keySet ( ) ; Set < String > keySet2 = mapStoreToProps2 . keySet ( ) ; if ( ! keySet1 . equals ( keySet2 ) ) { return false ; } for ( String storeName : keySet1 ) { Properties props1 = mapStoreToProps1 . get ( storeName ) ; Properties props2 = mapStoreToProps2 . get ( storeName ) ; if ( ! props1 . equals ( props2 ) ) { return false ; } } return true ; }
Compares two avro strings which contains multiple store configs
201
12
147,769
public static void printHelp ( PrintStream stream ) { stream . println ( ) ; stream . println ( "Voldemort Admin Tool Async-Job Commands" ) ; stream . println ( "---------------------------------------" ) ; stream . println ( "list Get async job list from nodes." ) ; stream . println ( "stop Stop async jobs on one node." ) ; stream . println ( ) ; stream . println ( "To get more information on each command," ) ; stream . println ( "please try \'help async-job <command-name>\'." ) ; stream . println ( ) ; }
Prints command - line help menu .
123
8
147,770
@ Override public void removeStorageEngine ( StorageEngine < ByteArray , byte [ ] , byte [ ] > engine ) { String storeName = engine . getName ( ) ; BdbStorageEngine bdbEngine = ( BdbStorageEngine ) engine ; synchronized ( lock ) { // Only cleanup the environment if it is per store. We cannot // cleanup a shared 'Environment' object if ( useOneEnvPerStore ) { Environment environment = this . environments . get ( storeName ) ; if ( environment == null ) { // Nothing to clean up. return ; } // Remove from the set of unreserved stores if needed. if ( this . unreservedStores . remove ( environment ) ) { logger . info ( "Removed environment for store name: " + storeName + " from unreserved stores" ) ; } else { logger . info ( "No environment found in unreserved stores for store name: " + storeName ) ; } // Try to delete the BDB directory associated File bdbDir = environment . getHome ( ) ; if ( bdbDir . exists ( ) && bdbDir . isDirectory ( ) ) { String bdbDirPath = bdbDir . getPath ( ) ; try { FileUtils . deleteDirectory ( bdbDir ) ; logger . info ( "Successfully deleted BDB directory : " + bdbDirPath + " for store name: " + storeName ) ; } catch ( IOException e ) { logger . error ( "Unable to delete BDB directory: " + bdbDirPath + " for store name: " + storeName ) ; } } // Remove the reference to BdbEnvironmentStats, which holds a // reference to the Environment BdbEnvironmentStats bdbEnvStats = bdbEngine . getBdbEnvironmentStats ( ) ; this . aggBdbStats . unTrackEnvironment ( bdbEnvStats ) ; // Unregister the JMX bean for Environment if ( voldemortConfig . isJmxEnabled ( ) ) { ObjectName name = JmxUtils . createObjectName ( JmxUtils . getPackageName ( bdbEnvStats . getClass ( ) ) , storeName ) ; // Un-register the environment stats mbean JmxUtils . unregisterMbean ( name ) ; } // Cleanup the environment environment . close ( ) ; this . environments . remove ( storeName ) ; logger . info ( "Successfully closed the environment for store name : " + storeName ) ; } } }
Clean up the environment object for the given storage engine
522
10
147,771
@ JmxOperation ( description = "Forcefully invoke the log cleaning" ) public void cleanLogs ( ) { synchronized ( lock ) { try { for ( Environment environment : environments . values ( ) ) { environment . cleanLog ( ) ; } } catch ( DatabaseException e ) { throw new VoldemortException ( e ) ; } } }
Forceful cleanup the logs
70
5
147,772
public void update ( StoreDefinition storeDef ) { if ( ! useOneEnvPerStore ) throw new VoldemortException ( "Memory foot print can be set only when using different environments per store" ) ; String storeName = storeDef . getName ( ) ; Environment environment = environments . get ( storeName ) ; // change reservation amount of reserved store if ( ! unreservedStores . contains ( environment ) && storeDef . hasMemoryFootprint ( ) ) { EnvironmentMutableConfig mConfig = environment . getMutableConfig ( ) ; long currentCacheSize = mConfig . getCacheSize ( ) ; long newCacheSize = storeDef . getMemoryFootprintMB ( ) * ByteUtils . BYTES_PER_MB ; if ( currentCacheSize != newCacheSize ) { long newReservedCacheSize = this . reservedCacheSize - currentCacheSize + newCacheSize ; // check that we leave a 'minimum' shared cache if ( ( voldemortConfig . getBdbCacheSize ( ) - newReservedCacheSize ) < voldemortConfig . getBdbMinimumSharedCache ( ) ) { throw new StorageInitializationException ( "Reservation of " + storeDef . getMemoryFootprintMB ( ) + " MB for store " + storeName + " violates minimum shared cache size of " + voldemortConfig . getBdbMinimumSharedCache ( ) ) ; } this . reservedCacheSize = newReservedCacheSize ; adjustCacheSizes ( ) ; mConfig . setCacheSize ( newCacheSize ) ; environment . setMutableConfig ( mConfig ) ; logger . info ( "Setting private cache for store " + storeDef . getName ( ) + " to " + newCacheSize ) ; } } else { // we cannot support changing a reserved store to unreserved or vice // versa since the sharedCache param is not mutable throw new VoldemortException ( "Cannot switch between shared and private cache dynamically" ) ; } }
Detect what has changed in the store definition and rewire BDB environments accordingly .
411
16
147,773
public static HashMap < Integer , List < Integer > > getBalancedNumberOfPrimaryPartitionsPerNode ( final Cluster nextCandidateCluster , Map < Integer , Integer > targetPartitionsPerZone ) { HashMap < Integer , List < Integer > > numPartitionsPerNode = Maps . newHashMap ( ) ; for ( Integer zoneId : nextCandidateCluster . getZoneIds ( ) ) { List < Integer > partitionsOnNode = Utils . distributeEvenlyIntoList ( nextCandidateCluster . getNumberOfNodesInZone ( zoneId ) , targetPartitionsPerZone . get ( zoneId ) ) ; numPartitionsPerNode . put ( zoneId , partitionsOnNode ) ; } return numPartitionsPerNode ; }
Determines how many primary partitions each node within each zone should have . The list of integers returned per zone is the same length as the number of nodes in that zone .
162
35
147,774
public static Pair < HashMap < Node , Integer > , HashMap < Node , Integer > > getDonorsAndStealersForBalance ( final Cluster nextCandidateCluster , Map < Integer , List < Integer > > numPartitionsPerNodePerZone ) { HashMap < Node , Integer > donorNodes = Maps . newHashMap ( ) ; HashMap < Node , Integer > stealerNodes = Maps . newHashMap ( ) ; HashMap < Integer , Integer > numNodesAssignedInZone = Maps . newHashMap ( ) ; for ( Integer zoneId : nextCandidateCluster . getZoneIds ( ) ) { numNodesAssignedInZone . put ( zoneId , 0 ) ; } for ( Node node : nextCandidateCluster . getNodes ( ) ) { int zoneId = node . getZoneId ( ) ; int offset = numNodesAssignedInZone . get ( zoneId ) ; numNodesAssignedInZone . put ( zoneId , offset + 1 ) ; int numPartitions = numPartitionsPerNodePerZone . get ( zoneId ) . get ( offset ) ; if ( numPartitions < node . getNumberOfPartitions ( ) ) { donorNodes . put ( node , numPartitions ) ; } else if ( numPartitions > node . getNumberOfPartitions ( ) ) { stealerNodes . put ( node , numPartitions ) ; } } // Print out donor/stealer information for ( Node node : donorNodes . keySet ( ) ) { System . out . println ( "Donor Node: " + node . getId ( ) + ", zoneId " + node . getZoneId ( ) + ", numPartitions " + node . getNumberOfPartitions ( ) + ", target number of partitions " + donorNodes . get ( node ) ) ; } for ( Node node : stealerNodes . keySet ( ) ) { System . out . println ( "Stealer Node: " + node . getId ( ) + ", zoneId " + node . getZoneId ( ) + ", numPartitions " + node . getNumberOfPartitions ( ) + ", target number of partitions " + stealerNodes . get ( node ) ) ; } return new Pair < HashMap < Node , Integer > , HashMap < Node , Integer > > ( donorNodes , stealerNodes ) ; }
Assign target number of partitions per node to specific node IDs . Then separates Nodes into donorNodes and stealerNodes based on whether the node needs to donate or steal primary partitions .
516
39
147,775
public static Cluster repeatedlyBalanceContiguousPartitionsPerZone ( final Cluster nextCandidateCluster , final int maxContiguousPartitionsPerZone ) { System . out . println ( "Looping to evenly balance partitions across zones while limiting contiguous partitions" ) ; // This loop is hard to make definitive. I.e., there are corner cases // for small clusters and/or clusters with few partitions for which it // may be impossible to achieve tight limits on contiguous run lenghts. // Therefore, a constant number of loops are run. Note that once the // goal is reached, the loop becomes a no-op. int repeatContigBalance = 10 ; Cluster returnCluster = nextCandidateCluster ; for ( int i = 0 ; i < repeatContigBalance ; i ++ ) { returnCluster = balanceContiguousPartitionsPerZone ( returnCluster , maxContiguousPartitionsPerZone ) ; returnCluster = balancePrimaryPartitions ( returnCluster , false ) ; System . out . println ( "Completed round of balancing contiguous partitions: round " + ( i + 1 ) + " of " + repeatContigBalance ) ; } return returnCluster ; }
Loops over cluster and repeatedly tries to break up contiguous runs of partitions . After each phase of breaking up contiguous partitions random partitions are selected to move between zones to balance the number of partitions in each zone . The second phase may re - introduce contiguous partition runs in another zone . Therefore this overall process is repeated multiple times .
245
64
147,776
public static Cluster balanceContiguousPartitionsPerZone ( final Cluster nextCandidateCluster , final int maxContiguousPartitionsPerZone ) { System . out . println ( "Balance number of contiguous partitions within a zone." ) ; System . out . println ( "numPartitionsPerZone" ) ; for ( int zoneId : nextCandidateCluster . getZoneIds ( ) ) { System . out . println ( zoneId + " : " + nextCandidateCluster . getNumberOfPartitionsInZone ( zoneId ) ) ; } System . out . println ( "numNodesPerZone" ) ; for ( int zoneId : nextCandidateCluster . getZoneIds ( ) ) { System . out . println ( zoneId + " : " + nextCandidateCluster . getNumberOfNodesInZone ( zoneId ) ) ; } // Break up contiguous partitions within each zone HashMap < Integer , List < Integer > > partitionsToRemoveFromZone = Maps . newHashMap ( ) ; System . out . println ( "Contiguous partitions" ) ; for ( Integer zoneId : nextCandidateCluster . getZoneIds ( ) ) { System . out . println ( "\tZone: " + zoneId ) ; Map < Integer , Integer > partitionToRunLength = PartitionBalanceUtils . getMapOfContiguousPartitions ( nextCandidateCluster , zoneId ) ; List < Integer > partitionsToRemoveFromThisZone = new ArrayList < Integer > ( ) ; for ( Map . Entry < Integer , Integer > entry : partitionToRunLength . entrySet ( ) ) { if ( entry . getValue ( ) > maxContiguousPartitionsPerZone ) { List < Integer > contiguousPartitions = new ArrayList < Integer > ( entry . getValue ( ) ) ; for ( int partitionId = entry . getKey ( ) ; partitionId < entry . getKey ( ) + entry . getValue ( ) ; partitionId ++ ) { contiguousPartitions . add ( partitionId % nextCandidateCluster . getNumberOfPartitions ( ) ) ; } System . out . println ( "Contiguous partitions: " + contiguousPartitions ) ; partitionsToRemoveFromThisZone . addAll ( Utils . removeItemsToSplitListEvenly ( contiguousPartitions , maxContiguousPartitionsPerZone ) ) ; } } partitionsToRemoveFromZone . put ( zoneId , partitionsToRemoveFromThisZone ) ; System . out . println ( "\t\tPartitions to remove: " + partitionsToRemoveFromThisZone ) ; } Cluster returnCluster = Cluster . cloneCluster ( nextCandidateCluster ) ; Random r = new Random ( ) ; for ( int zoneId : returnCluster . getZoneIds ( ) ) { for ( int partitionId : partitionsToRemoveFromZone . get ( zoneId ) ) { // Pick a random other zone Id List < Integer > otherZoneIds = new ArrayList < Integer > ( ) ; for ( int otherZoneId : returnCluster . getZoneIds ( ) ) { if ( otherZoneId != zoneId ) { otherZoneIds . add ( otherZoneId ) ; } } int whichOtherZoneId = otherZoneIds . get ( r . nextInt ( otherZoneIds . size ( ) ) ) ; // Pick a random node from other zone ID int whichNodeOffset = r . nextInt ( returnCluster . getNumberOfNodesInZone ( whichOtherZoneId ) ) ; int whichNodeId = new ArrayList < Integer > ( returnCluster . getNodeIdsInZone ( whichOtherZoneId ) ) . get ( whichNodeOffset ) ; // Steal partition from one zone to another! returnCluster = UpdateClusterUtils . createUpdatedCluster ( returnCluster , whichNodeId , Lists . newArrayList ( partitionId ) ) ; } } return returnCluster ; }
Ensures that no more than maxContiguousPartitionsPerZone partitions are contiguous within a single zone .
830
22
147,777
public static Cluster swapPartitions ( final Cluster nextCandidateCluster , final int nodeIdA , final int partitionIdA , final int nodeIdB , final int partitionIdB ) { Cluster returnCluster = Cluster . cloneCluster ( nextCandidateCluster ) ; // Swap partitions between nodes! returnCluster = UpdateClusterUtils . createUpdatedCluster ( returnCluster , nodeIdA , Lists . newArrayList ( partitionIdB ) ) ; returnCluster = UpdateClusterUtils . createUpdatedCluster ( returnCluster , nodeIdB , Lists . newArrayList ( partitionIdA ) ) ; return returnCluster ; }
Swaps two specified partitions .
140
6
147,778
public static Cluster swapRandomPartitionsWithinZone ( final Cluster nextCandidateCluster , final int zoneId ) { Cluster returnCluster = Cluster . cloneCluster ( nextCandidateCluster ) ; Random r = new Random ( ) ; List < Integer > nodeIdsInZone = new ArrayList < Integer > ( nextCandidateCluster . getNodeIdsInZone ( zoneId ) ) ; if ( nodeIdsInZone . size ( ) == 0 ) { return returnCluster ; } // Select random stealer node int stealerNodeOffset = r . nextInt ( nodeIdsInZone . size ( ) ) ; Integer stealerNodeId = nodeIdsInZone . get ( stealerNodeOffset ) ; // Select random stealer partition List < Integer > stealerPartitions = returnCluster . getNodeById ( stealerNodeId ) . getPartitionIds ( ) ; if ( stealerPartitions . size ( ) == 0 ) { return nextCandidateCluster ; } int stealerPartitionOffset = r . nextInt ( stealerPartitions . size ( ) ) ; int stealerPartitionId = stealerPartitions . get ( stealerPartitionOffset ) ; // Select random donor node List < Integer > donorNodeIds = new ArrayList < Integer > ( ) ; donorNodeIds . addAll ( nodeIdsInZone ) ; donorNodeIds . remove ( stealerNodeId ) ; if ( donorNodeIds . isEmpty ( ) ) { // No donor nodes! return returnCluster ; } int donorIdOffset = r . nextInt ( donorNodeIds . size ( ) ) ; Integer donorNodeId = donorNodeIds . get ( donorIdOffset ) ; // Select random donor partition List < Integer > donorPartitions = returnCluster . getNodeById ( donorNodeId ) . getPartitionIds ( ) ; int donorPartitionOffset = r . nextInt ( donorPartitions . size ( ) ) ; int donorPartitionId = donorPartitions . get ( donorPartitionOffset ) ; return swapPartitions ( returnCluster , stealerNodeId , stealerPartitionId , donorNodeId , donorPartitionId ) ; }
Within a single zone swaps one random partition on one random node with another random partition on different random node .
471
21
147,779
public static Cluster randomShufflePartitions ( final Cluster nextCandidateCluster , final int randomSwapAttempts , final int randomSwapSuccesses , final List < Integer > randomSwapZoneIds , List < StoreDefinition > storeDefs ) { List < Integer > zoneIds = null ; if ( randomSwapZoneIds . isEmpty ( ) ) { zoneIds = new ArrayList < Integer > ( nextCandidateCluster . getZoneIds ( ) ) ; } else { zoneIds = new ArrayList < Integer > ( randomSwapZoneIds ) ; } List < Integer > nodeIds = new ArrayList < Integer > ( ) ; Cluster returnCluster = Cluster . cloneCluster ( nextCandidateCluster ) ; double currentUtility = new PartitionBalance ( returnCluster , storeDefs ) . getUtility ( ) ; int successes = 0 ; for ( int i = 0 ; i < randomSwapAttempts ; i ++ ) { // Iterate over zone ids to decide which node ids to include for // intra-zone swapping. // In future, if there is a need to support inter-zone swapping, // then just remove the // zone specific logic that populates nodeIdSet and add all nodes // from across all zones. int zoneIdOffset = i % zoneIds . size ( ) ; Set < Integer > nodeIdSet = nextCandidateCluster . getNodeIdsInZone ( zoneIds . get ( zoneIdOffset ) ) ; nodeIds = new ArrayList < Integer > ( nodeIdSet ) ; Collections . shuffle ( zoneIds , new Random ( System . currentTimeMillis ( ) ) ) ; Cluster shuffleResults = swapRandomPartitionsAmongNodes ( returnCluster , nodeIds ) ; double nextUtility = new PartitionBalance ( shuffleResults , storeDefs ) . getUtility ( ) ; if ( nextUtility < currentUtility ) { System . out . println ( "Swap improved max-min ratio: " + currentUtility + " -> " + nextUtility + " (improvement " + successes + " on swap attempt " + i + ")" ) ; successes ++ ; returnCluster = shuffleResults ; currentUtility = nextUtility ; } if ( successes >= randomSwapSuccesses ) { // Enough successes, move on. break ; } } return returnCluster ; }
Randomly shuffle partitions between nodes within every zone .
511
10
147,780
public static Cluster swapGreedyRandomPartitions ( final Cluster nextCandidateCluster , final List < Integer > nodeIds , final int greedySwapMaxPartitionsPerNode , final int greedySwapMaxPartitionsPerZone , List < StoreDefinition > storeDefs ) { System . out . println ( "GreedyRandom : nodeIds:" + nodeIds ) ; Cluster returnCluster = Cluster . cloneCluster ( nextCandidateCluster ) ; double currentUtility = new PartitionBalance ( returnCluster , storeDefs ) . getUtility ( ) ; int nodeIdA = - 1 ; int nodeIdB = - 1 ; int partitionIdA = - 1 ; int partitionIdB = - 1 ; for ( int nodeIdAPrime : nodeIds ) { System . out . println ( "GreedyRandom : processing nodeId:" + nodeIdAPrime ) ; List < Integer > partitionIdsAPrime = new ArrayList < Integer > ( ) ; partitionIdsAPrime . addAll ( returnCluster . getNodeById ( nodeIdAPrime ) . getPartitionIds ( ) ) ; Collections . shuffle ( partitionIdsAPrime ) ; int maxPartitionsInAPrime = Math . min ( greedySwapMaxPartitionsPerNode , partitionIdsAPrime . size ( ) ) ; for ( int offsetAPrime = 0 ; offsetAPrime < maxPartitionsInAPrime ; offsetAPrime ++ ) { Integer partitionIdAPrime = partitionIdsAPrime . get ( offsetAPrime ) ; List < Pair < Integer , Integer > > partitionIdsZone = new ArrayList < Pair < Integer , Integer > > ( ) ; for ( int nodeIdBPrime : nodeIds ) { if ( nodeIdBPrime == nodeIdAPrime ) continue ; for ( Integer partitionIdBPrime : returnCluster . getNodeById ( nodeIdBPrime ) . getPartitionIds ( ) ) { partitionIdsZone . add ( new Pair < Integer , Integer > ( nodeIdBPrime , partitionIdBPrime ) ) ; } } Collections . shuffle ( partitionIdsZone ) ; int maxPartitionsInZone = Math . min ( greedySwapMaxPartitionsPerZone , partitionIdsZone . size ( ) ) ; for ( int offsetZone = 0 ; offsetZone < maxPartitionsInZone ; offsetZone ++ ) { Integer nodeIdBPrime = partitionIdsZone . get ( offsetZone ) . getFirst ( ) ; Integer partitionIdBPrime = partitionIdsZone . get ( offsetZone ) . getSecond ( ) ; Cluster swapResult = swapPartitions ( returnCluster , nodeIdAPrime , partitionIdAPrime , nodeIdBPrime , partitionIdBPrime ) ; double swapUtility = new PartitionBalance ( swapResult , storeDefs ) . getUtility ( ) ; if ( swapUtility < currentUtility ) { currentUtility = swapUtility ; System . out . println ( " -> " + currentUtility ) ; nodeIdA = nodeIdAPrime ; partitionIdA = partitionIdAPrime ; nodeIdB = nodeIdBPrime ; partitionIdB = partitionIdBPrime ; } } } } if ( nodeIdA == - 1 ) { return returnCluster ; } return swapPartitions ( returnCluster , nodeIdA , partitionIdA , nodeIdB , partitionIdB ) ; }
For each node in specified zones tries swapping some minimum number of random partitions per node with some minimum number of random partitions from other specified nodes . Chooses the best swap in each iteration . Large values of the greedSwapMaxPartitions ... arguments make this method equivalent to comparing every possible swap . This may get very expensive .
744
65
147,781
public static Cluster greedyShufflePartitions ( final Cluster nextCandidateCluster , final int greedyAttempts , final int greedySwapMaxPartitionsPerNode , final int greedySwapMaxPartitionsPerZone , List < Integer > greedySwapZoneIds , List < StoreDefinition > storeDefs ) { List < Integer > zoneIds = null ; if ( greedySwapZoneIds . isEmpty ( ) ) { zoneIds = new ArrayList < Integer > ( nextCandidateCluster . getZoneIds ( ) ) ; } else { zoneIds = new ArrayList < Integer > ( greedySwapZoneIds ) ; } List < Integer > nodeIds = new ArrayList < Integer > ( ) ; Cluster returnCluster = Cluster . cloneCluster ( nextCandidateCluster ) ; double currentUtility = new PartitionBalance ( returnCluster , storeDefs ) . getUtility ( ) ; for ( int i = 0 ; i < greedyAttempts ; i ++ ) { // Iterate over zone ids to decide which node ids to include for // intra-zone swapping. // In future, if there is a need to support inter-zone swapping, // then just remove the // zone specific logic that populates nodeIdSet and add all nodes // from across all zones. int zoneIdOffset = i % zoneIds . size ( ) ; Set < Integer > nodeIdSet = nextCandidateCluster . getNodeIdsInZone ( zoneIds . get ( zoneIdOffset ) ) ; nodeIds = new ArrayList < Integer > ( nodeIdSet ) ; Collections . shuffle ( zoneIds , new Random ( System . currentTimeMillis ( ) ) ) ; Cluster shuffleResults = swapGreedyRandomPartitions ( returnCluster , nodeIds , greedySwapMaxPartitionsPerNode , greedySwapMaxPartitionsPerZone , storeDefs ) ; double nextUtility = new PartitionBalance ( shuffleResults , storeDefs ) . getUtility ( ) ; System . out . println ( "Swap improved max-min ratio: " + currentUtility + " -> " + nextUtility + " (swap attempt " + i + " in zone " + zoneIds . get ( zoneIdOffset ) + ")" ) ; returnCluster = shuffleResults ; currentUtility = nextUtility ; } return returnCluster ; }
Within a single zone tries swapping some minimum number of random partitions per node with some minimum number of random partitions from other nodes within the zone . Chooses the best swap in each iteration . Large values of the greedSwapMaxPartitions ... arguments make this method equivalent to comparing every possible swap . This is very expensive .
509
64
147,782
@ Override protected void stopInner ( ) { /* * TODO REST-Server Need to handle inflight operations. What happens to * the existing async operations when a channel.close() is issued in * Netty? */ if ( this . nettyServerChannel != null ) { this . nettyServerChannel . close ( ) ; } if ( allChannels != null ) { allChannels . close ( ) . awaitUninterruptibly ( ) ; } this . bootstrap . releaseExternalResources ( ) ; }
Closes the Netty Channel and releases all resources
109
10
147,783
protected int parseZoneId ( ) { int result = - 1 ; String zoneIdStr = this . request . getHeader ( RestMessageHeaders . X_VOLD_ZONE_ID ) ; if ( zoneIdStr != null ) { try { int zoneId = Integer . parseInt ( zoneIdStr ) ; if ( zoneId < 0 ) { logger . error ( "ZoneId cannot be negative. Assuming the default zone id." ) ; } else { result = zoneId ; } } catch ( NumberFormatException nfe ) { logger . error ( "Exception when validating request. Incorrect zone id parameter. Cannot parse this to int: " + zoneIdStr , nfe ) ; } } return result ; }
Retrieve and validate the zone id value from the REST request . X - VOLD - Zone - Id is the zone id header .
152
27
147,784
@ Override protected void registerRequest ( RestRequestValidator requestValidator , ChannelHandlerContext ctx , MessageEvent messageEvent ) { // At this point we know the request is valid and we have a // error handler. So we construct the composite Voldemort // request object. CompositeVoldemortRequest < ByteArray , byte [ ] > requestObject = requestValidator . constructCompositeVoldemortRequestObject ( ) ; if ( requestObject != null ) { // Dropping dead requests from going to next handler long now = System . currentTimeMillis ( ) ; if ( requestObject . getRequestOriginTimeInMs ( ) + requestObject . getRoutingTimeoutInMs ( ) <= now ) { RestErrorHandler . writeErrorResponse ( messageEvent , HttpResponseStatus . REQUEST_TIMEOUT , "current time: " + now + "\torigin time: " + requestObject . getRequestOriginTimeInMs ( ) + "\ttimeout in ms: " + requestObject . getRoutingTimeoutInMs ( ) ) ; return ; } else { Store store = getStore ( requestValidator . getStoreName ( ) , requestValidator . getParsedRoutingType ( ) ) ; if ( store != null ) { VoldemortStoreRequest voldemortStoreRequest = new VoldemortStoreRequest ( requestObject , store , parseZoneId ( ) ) ; Channels . fireMessageReceived ( ctx , voldemortStoreRequest ) ; } else { logger . error ( "Error when getting store. Non Existing store name." ) ; RestErrorHandler . writeErrorResponse ( messageEvent , HttpResponseStatus . BAD_REQUEST , "Non Existing store name. Critical error." ) ; return ; } } } }
Constructs a valid request and passes it on to the next handler . It also creates the Store object corresponding to the store name specified in the REST request .
361
31
147,785
private Pair < Cluster , List < StoreDefinition > > getCurrentClusterState ( ) { // Retrieve the latest cluster metadata from the existing nodes Versioned < Cluster > currentVersionedCluster = adminClient . rebalanceOps . getLatestCluster ( Utils . nodeListToNodeIdList ( Lists . newArrayList ( adminClient . getAdminClientCluster ( ) . getNodes ( ) ) ) ) ; Cluster cluster = currentVersionedCluster . getValue ( ) ; List < StoreDefinition > storeDefs = adminClient . rebalanceOps . getCurrentStoreDefinitions ( cluster ) ; return new Pair < Cluster , List < StoreDefinition > > ( cluster , storeDefs ) ; }
Probe the existing cluster to retrieve the current cluster xml and stores xml .
151
15
147,786
private void executePlan ( RebalancePlan rebalancePlan ) { logger . info ( "Starting to execute rebalance Plan!" ) ; int batchCount = 0 ; int partitionStoreCount = 0 ; long totalTimeMs = 0 ; List < RebalanceBatchPlan > entirePlan = rebalancePlan . getPlan ( ) ; int numBatches = entirePlan . size ( ) ; int numPartitionStores = rebalancePlan . getPartitionStoresMoved ( ) ; for ( RebalanceBatchPlan batchPlan : entirePlan ) { logger . info ( "======== REBALANCING BATCH " + ( batchCount + 1 ) + " ========" ) ; RebalanceUtils . printBatchLog ( batchCount , logger , batchPlan . toString ( ) ) ; long startTimeMs = System . currentTimeMillis ( ) ; // ACTUALLY DO A BATCH OF REBALANCING! executeBatch ( batchCount , batchPlan ) ; totalTimeMs += ( System . currentTimeMillis ( ) - startTimeMs ) ; // Bump up the statistics batchCount ++ ; partitionStoreCount += batchPlan . getPartitionStoreMoves ( ) ; batchStatusLog ( batchCount , numBatches , partitionStoreCount , numPartitionStores , totalTimeMs ) ; } }
Executes the rebalance plan . Does so batch - by - batch . Between each batch status is dumped to logger . info .
283
27
147,787
private void batchStatusLog ( int batchCount , int numBatches , int partitionStoreCount , int numPartitionStores , long totalTimeMs ) { // Calculate the estimated end time and pretty print stats double rate = 1 ; long estimatedTimeMs = 0 ; if ( numPartitionStores > 0 ) { rate = partitionStoreCount / numPartitionStores ; estimatedTimeMs = ( long ) ( totalTimeMs / rate ) - totalTimeMs ; } StringBuilder sb = new StringBuilder ( ) ; sb . append ( "Batch Complete!" ) . append ( Utils . NEWLINE ) . append ( "\tbatches moved: " ) . append ( batchCount ) . append ( " out of " ) . append ( numBatches ) . append ( Utils . NEWLINE ) . append ( "\tPartition stores moved: " ) . append ( partitionStoreCount ) . append ( " out of " ) . append ( numPartitionStores ) . append ( Utils . NEWLINE ) . append ( "\tPercent done: " ) . append ( decimalFormatter . format ( rate * 100.0 ) ) . append ( Utils . NEWLINE ) . append ( "\tEstimated time left: " ) . append ( estimatedTimeMs ) . append ( " ms (" ) . append ( TimeUnit . MILLISECONDS . toHours ( estimatedTimeMs ) ) . append ( " hours)" ) ; RebalanceUtils . printBatchLog ( batchCount , logger , sb . toString ( ) ) ; }
Pretty print a progress update after each batch complete .
330
10
147,788
private void proxyPause ( ) { logger . info ( "Pausing after cluster state has changed to allow proxy bridges to be established. " + "Will start rebalancing work on servers in " + proxyPauseSec + " seconds." ) ; try { Thread . sleep ( TimeUnit . SECONDS . toMillis ( proxyPauseSec ) ) ; } catch ( InterruptedException e ) { logger . warn ( "Sleep interrupted in proxy pause." ) ; } }
Pause between cluster change in metadata and starting server rebalancing work .
97
14
147,789
private void executeSubBatch ( final int batchId , RebalanceBatchPlanProgressBar progressBar , final Cluster batchRollbackCluster , final List < StoreDefinition > batchRollbackStoreDefs , final List < RebalanceTaskInfo > rebalanceTaskPlanList , boolean hasReadOnlyStores , boolean hasReadWriteStores , boolean finishedReadOnlyStores ) { RebalanceUtils . printBatchLog ( batchId , logger , "Submitting rebalance tasks " ) ; // Get an ExecutorService in place used for submitting our tasks ExecutorService service = RebalanceUtils . createExecutors ( maxParallelRebalancing ) ; // Sub-list of the above list final List < RebalanceTask > failedTasks = Lists . newArrayList ( ) ; final List < RebalanceTask > incompleteTasks = Lists . newArrayList ( ) ; // Semaphores for donor nodes - To avoid multiple disk sweeps Map < Integer , Semaphore > donorPermits = new HashMap < Integer , Semaphore > ( ) ; for ( Node node : batchRollbackCluster . getNodes ( ) ) { donorPermits . put ( node . getId ( ) , new Semaphore ( 1 ) ) ; } try { // List of tasks which will run asynchronously List < RebalanceTask > allTasks = executeTasks ( batchId , progressBar , service , rebalanceTaskPlanList , donorPermits ) ; RebalanceUtils . printBatchLog ( batchId , logger , "All rebalance tasks submitted" ) ; // Wait and shutdown after (infinite) timeout RebalanceUtils . executorShutDown ( service , Long . MAX_VALUE ) ; RebalanceUtils . printBatchLog ( batchId , logger , "Finished waiting for executors" ) ; // Collects all failures + incomplete tasks from the rebalance // tasks. List < Exception > failures = Lists . newArrayList ( ) ; for ( RebalanceTask task : allTasks ) { if ( task . hasException ( ) ) { failedTasks . add ( task ) ; failures . add ( task . getError ( ) ) ; } else if ( ! task . isComplete ( ) ) { incompleteTasks . add ( task ) ; } } if ( failedTasks . size ( ) > 0 ) { throw new VoldemortRebalancingException ( "Rebalance task terminated unsuccessfully on tasks " + failedTasks , failures ) ; } // If there were no failures, then we could have had a genuine // timeout ( Rebalancing took longer than the operator expected ). // We should throw a VoldemortException and not a // VoldemortRebalancingException ( which will start reverting // metadata ). The operator may want to manually then resume the // process. if ( incompleteTasks . size ( ) > 0 ) { throw new VoldemortException ( "Rebalance tasks are still incomplete / running " + incompleteTasks ) ; } } catch ( VoldemortRebalancingException e ) { logger . error ( "Failure while migrating partitions for rebalance task " + batchId ) ; if ( hasReadOnlyStores && hasReadWriteStores && finishedReadOnlyStores ) { // Case 0 adminClient . rebalanceOps . rebalanceStateChange ( null , batchRollbackCluster , null , batchRollbackStoreDefs , null , true , true , false , false , false ) ; } else if ( hasReadWriteStores && finishedReadOnlyStores ) { // Case 4 adminClient . rebalanceOps . rebalanceStateChange ( null , batchRollbackCluster , null , batchRollbackStoreDefs , null , false , true , false , false , false ) ; } throw e ; } finally { if ( ! service . isShutdown ( ) ) { RebalanceUtils . printErrorLog ( batchId , logger , "Could not shutdown service cleanly for rebalance task " + batchId , null ) ; service . shutdownNow ( ) ; } } }
The smallest granularity of rebalancing where - in we move partitions for a sub - set of stores . Finally at the end of the movement the node is removed out of rebalance state
854
39
147,790
public static ConsistencyLevel determineConsistency ( Map < Value , Set < ClusterNode > > versionNodeSetMap , int replicationFactor ) { boolean fullyConsistent = true ; Value latestVersion = null ; for ( Map . Entry < Value , Set < ClusterNode > > versionNodeSetEntry : versionNodeSetMap . entrySet ( ) ) { Value value = versionNodeSetEntry . getKey ( ) ; if ( latestVersion == null ) { latestVersion = value ; } else if ( value . isTimeStampLaterThan ( latestVersion ) ) { latestVersion = value ; } Set < ClusterNode > nodeSet = versionNodeSetEntry . getValue ( ) ; fullyConsistent = fullyConsistent && ( nodeSet . size ( ) == replicationFactor ) ; } if ( fullyConsistent ) { return ConsistencyLevel . FULL ; } else { // latest write consistent, effectively consistent if ( latestVersion != null && versionNodeSetMap . get ( latestVersion ) . size ( ) == replicationFactor ) { return ConsistencyLevel . LATEST_CONSISTENT ; } // all other states inconsistent return ConsistencyLevel . INCONSISTENT ; } }
Determine the consistency level of a key
249
9
147,791
public static void cleanIneligibleKeys ( Map < ByteArray , Map < Value , Set < ClusterNode > > > keyVersionNodeSetMap , int requiredWrite ) { Set < ByteArray > keysToDelete = new HashSet < ByteArray > ( ) ; for ( Map . Entry < ByteArray , Map < Value , Set < ClusterNode > > > entry : keyVersionNodeSetMap . entrySet ( ) ) { Set < Value > valuesToDelete = new HashSet < Value > ( ) ; ByteArray key = entry . getKey ( ) ; Map < Value , Set < ClusterNode > > valueNodeSetMap = entry . getValue ( ) ; // mark version for deletion if not enough writes for ( Map . Entry < Value , Set < ClusterNode > > versionNodeSetEntry : valueNodeSetMap . entrySet ( ) ) { Set < ClusterNode > nodeSet = versionNodeSetEntry . getValue ( ) ; if ( nodeSet . size ( ) < requiredWrite ) { valuesToDelete . add ( versionNodeSetEntry . getKey ( ) ) ; } } // delete versions for ( Value v : valuesToDelete ) { valueNodeSetMap . remove ( v ) ; } // mark key for deletion if no versions left if ( valueNodeSetMap . size ( ) == 0 ) { keysToDelete . add ( key ) ; } } // delete keys for ( ByteArray k : keysToDelete ) { keyVersionNodeSetMap . remove ( k ) ; } }
Determine if a key version is invalid by comparing the version s existence and required writes configuration
312
19
147,792
public static String keyVersionToString ( ByteArray key , Map < Value , Set < ClusterNode > > versionMap , String storeName , Integer partitionId ) { StringBuilder record = new StringBuilder ( ) ; for ( Map . Entry < Value , Set < ClusterNode > > versionSet : versionMap . entrySet ( ) ) { Value value = versionSet . getKey ( ) ; Set < ClusterNode > nodeSet = versionSet . getValue ( ) ; record . append ( "BAD_KEY," ) ; record . append ( storeName + "," ) ; record . append ( partitionId + "," ) ; record . append ( ByteUtils . toHexString ( key . get ( ) ) + "," ) ; record . append ( nodeSet . toString ( ) . replace ( ", " , ";" ) + "," ) ; record . append ( value . toString ( ) ) ; } return record . toString ( ) ; }
Convert a key - version - nodeSet information to string
202
12
147,793
@ Override public void sendResponse ( StoreStats performanceStats , boolean isFromLocalZone , long startTimeInMs ) throws Exception { ChannelBuffer responseContent = ChannelBuffers . dynamicBuffer ( this . responseValue . length ) ; responseContent . writeBytes ( responseValue ) ; // 1. Create the Response object HttpResponse response = new DefaultHttpResponse ( HTTP_1_1 , OK ) ; // 2. Set the right headers response . setHeader ( CONTENT_TYPE , "binary" ) ; response . setHeader ( CONTENT_TRANSFER_ENCODING , "binary" ) ; // 3. Copy the data into the payload response . setContent ( responseContent ) ; response . setHeader ( CONTENT_LENGTH , response . getContent ( ) . readableBytes ( ) ) ; if ( logger . isDebugEnabled ( ) ) { logger . debug ( "Response = " + response ) ; } // Write the response to the Netty Channel this . messageEvent . getChannel ( ) . write ( response ) ; if ( performanceStats != null && isFromLocalZone ) { recordStats ( performanceStats , startTimeInMs , Tracked . GET ) ; } }
Sends a normal HTTP response containing the serialization information in a XML format
249
15
147,794
public FailureDetectorConfig setCluster ( Cluster cluster ) { Utils . notNull ( cluster ) ; this . cluster = cluster ; /* * FIXME: this is the hacky way to refresh the admin connection * verifier, but it'll just work. The clean way to do so is to have a * centralized metadata management, and all references of cluster object * point to that. */ if ( this . connectionVerifier instanceof AdminConnectionVerifier ) { ( ( AdminConnectionVerifier ) connectionVerifier ) . setCluster ( cluster ) ; } return this ; }
Look at the comments on cluster variable to see why this is problematic
120
13
147,795
@ Deprecated public synchronized FailureDetectorConfig setNodes ( Collection < Node > nodes ) { Utils . notNull ( nodes ) ; this . nodes = new HashSet < Node > ( nodes ) ; return this ; }
Assigns a list of nodes in the cluster represented by this failure detector configuration .
47
17
147,796
public boolean hasNodeWithId ( int nodeId ) { Node node = nodesById . get ( nodeId ) ; if ( node == null ) { return false ; } return true ; }
Given a cluster and a node id checks if the node exists
39
12
147,797
public static Cluster cloneCluster ( Cluster cluster ) { // Could add a better .clone() implementation that clones the derived // data structures. The constructor invoked by this clone implementation // can be slow for large numbers of partitions. Probably faster to copy // all the maps and stuff. return new Cluster ( cluster . getName ( ) , new ArrayList < Node > ( cluster . getNodes ( ) ) , new ArrayList < Zone > ( cluster . getZones ( ) ) ) ; /*- * Historic "clone" code being kept in case this, for some reason, was the "right" way to be doing this. ClusterMapper mapper = new ClusterMapper(); return mapper.readCluster(new StringReader(mapper.writeCluster(cluster))); */ }
Clones the cluster by constructing a new one with same name partition layout and nodes .
165
17
147,798
public AdminClient checkout ( ) { if ( isClosed . get ( ) ) { throw new IllegalStateException ( "Pool is closing" ) ; } AdminClient client ; // Try to get one from the Cache. while ( ( client = clientCache . poll ( ) ) != null ) { if ( ! client . isClusterModified ( ) ) { return client ; } else { // Cluster is Modified, after the AdminClient is created. Close it client . close ( ) ; } } // None is available, create new one. return createAdminClient ( ) ; }
get an AdminClient from the cache if exists if not create new one and return it . This method is non - blocking .
120
25
147,799
public void checkin ( AdminClient client ) { if ( isClosed . get ( ) ) { throw new IllegalStateException ( "Pool is closing" ) ; } if ( client == null ) { throw new IllegalArgumentException ( "client is null" ) ; } boolean isCheckedIn = clientCache . offer ( client ) ; if ( ! isCheckedIn ) { // Cache is already full, close this AdminClient client . close ( ) ; } }
submit the adminClient after usage is completed . Behavior is undefined if checkin is called with objects not retrieved from checkout .
96
24