idx int64 0 41.2k | question stringlengths 83 4.15k | target stringlengths 5 715 |
|---|---|---|
24,300 | public static void mlock ( Pointer addr , long len ) { int res = Delegate . mlock ( addr , new NativeLong ( len ) ) ; if ( res != 0 ) { if ( logger . isDebugEnabled ( ) ) { logger . debug ( "Mlock failed probably because of insufficient privileges, errno:" + errno . strerror ( ) + ", return value:" + res ) ; } } else { if ( logger . isDebugEnabled ( ) ) logger . debug ( "Mlock successfull" ) ; } } | Lock the given region . Does not report failures . |
24,301 | public static void munlock ( Pointer addr , long len ) { if ( Delegate . munlock ( addr , new NativeLong ( len ) ) != 0 ) { if ( logger . isDebugEnabled ( ) ) logger . debug ( "munlocking failed with errno:" + errno . strerror ( ) ) ; } else { if ( logger . isDebugEnabled ( ) ) logger . debug ( "munlocking region" ) ; } } | Unlock the given region . Does not report failures . |
24,302 | public JsonTypeDefinition projectionType ( String ... properties ) { if ( this . getType ( ) instanceof Map < ? , ? > ) { Map < ? , ? > type = ( Map < ? , ? > ) getType ( ) ; Arrays . sort ( properties ) ; Map < String , Object > newType = new LinkedHashMap < String , Object > ( ) ; for ( String prop : properties ) newType . put ( prop , type . get ( prop ) ) ; return new JsonTypeDefinition ( newType ) ; } else { throw new IllegalArgumentException ( "Cannot take the projection of a type that is not a Map." ) ; } } | Get the type created by selecting only a subset of properties from this type . The type must be a map for this to work |
24,303 | private void writeBufferedValsToStorage ( ) { List < Versioned < byte [ ] > > obsoleteVals = storageEngine . multiVersionPut ( currBufferedKey , currBufferedVals ) ; if ( logger . isDebugEnabled ( ) && obsoleteVals . size ( ) > 0 ) { logger . debug ( "updateEntries (Streaming multi-version-put) rejected these versions as obsolete : " + StoreUtils . getVersions ( obsoleteVals ) + " for key " + currBufferedKey ) ; } currBufferedVals = new ArrayList < Versioned < byte [ ] > > ( VALS_BUFFER_EXPECTED_SIZE ) ; } | Persists the current set of versions buffered for the current key into storage using the multiVersionPut api |
24,304 | public synchronized boolean acquireRebalancingPermit ( int nodeId ) { boolean added = rebalancePermits . add ( nodeId ) ; logger . info ( "Acquiring rebalancing permit for node id " + nodeId + ", returned: " + added ) ; return added ; } | Acquire a permit for a particular node id so as to allow rebalancing |
24,305 | public synchronized void releaseRebalancingPermit ( int nodeId ) { boolean removed = rebalancePermits . remove ( nodeId ) ; logger . info ( "Releasing rebalancing permit for node id " + nodeId + ", returned: " + removed ) ; if ( ! removed ) throw new VoldemortException ( new IllegalStateException ( "Invalid state, must hold a " + "permit to release" ) ) ; } | Release the rebalancing permit for a particular node id |
24,306 | private void swapROStores ( List < String > swappedStoreNames , boolean useSwappedStoreNames ) { try { for ( StoreDefinition storeDef : metadataStore . getStoreDefList ( ) ) { if ( storeDef . getType ( ) . compareTo ( ReadOnlyStorageConfiguration . TYPE_NAME ) == 0 ) { if ( useSwappedStoreNames && ! swappedStoreNames . contains ( storeDef . getName ( ) ) ) { continue ; } ReadOnlyStorageEngine engine = ( ReadOnlyStorageEngine ) storeRepository . getStorageEngine ( storeDef . getName ( ) ) ; if ( engine == null ) { throw new VoldemortException ( "Could not find storage engine for " + storeDef . getName ( ) + " to swap " ) ; } logger . info ( "Swapping RO store " + storeDef . getName ( ) ) ; engine . swapFiles ( engine . getCurrentDirPath ( ) ) ; if ( ! useSwappedStoreNames ) swappedStoreNames . add ( storeDef . getName ( ) ) ; } } } catch ( Exception e ) { logger . error ( "Error while swapping RO store" ) ; throw new VoldemortException ( e ) ; } } | Goes through all the RO Stores in the plan and swaps it |
24,307 | private void changeClusterAndStores ( String clusterKey , final Cluster cluster , String storesKey , final List < StoreDefinition > storeDefs ) { metadataStore . writeLock . lock ( ) ; try { VectorClock updatedVectorClock = ( ( VectorClock ) metadataStore . get ( clusterKey , null ) . get ( 0 ) . getVersion ( ) ) . incremented ( metadataStore . getNodeId ( ) , System . currentTimeMillis ( ) ) ; metadataStore . put ( clusterKey , Versioned . value ( ( Object ) cluster , updatedVectorClock ) ) ; updatedVectorClock = ( ( VectorClock ) metadataStore . get ( storesKey , null ) . get ( 0 ) . getVersion ( ) ) . incremented ( metadataStore . getNodeId ( ) , System . currentTimeMillis ( ) ) ; metadataStore . put ( storesKey , Versioned . value ( ( Object ) storeDefs , updatedVectorClock ) ) ; } catch ( Exception e ) { logger . info ( "Error while changing cluster to " + cluster + "for key " + clusterKey ) ; throw new VoldemortException ( e ) ; } finally { metadataStore . writeLock . unlock ( ) ; } } | Updates the cluster and store metadata atomically |
24,308 | public int rebalanceNode ( final RebalanceTaskInfo stealInfo ) { final RebalanceTaskInfo info = metadataStore . getRebalancerState ( ) . find ( stealInfo . getDonorId ( ) ) ; if ( info == null ) { throw new VoldemortException ( "Could not find plan " + stealInfo + " in the server state on " + metadataStore . getNodeId ( ) ) ; } else if ( ! info . equals ( stealInfo ) ) { throw new VoldemortException ( "The plan in server state " + info + " is not the same as the process passed " + stealInfo ) ; } else if ( ! acquireRebalancingPermit ( stealInfo . getDonorId ( ) ) ) { throw new AlreadyRebalancingException ( "Node " + metadataStore . getNodeId ( ) + " is already rebalancing from donor " + info . getDonorId ( ) + " with info " + info ) ; } int requestId = asyncService . getUniqueRequestId ( ) ; asyncService . submitOperation ( requestId , new StealerBasedRebalanceAsyncOperation ( this , voldemortConfig , metadataStore , requestId , info ) ) ; return requestId ; } | This function is responsible for starting the actual async rebalance operation . This is run if this node is the stealer node |
24,309 | protected void prepForWrite ( SelectionKey selectionKey ) { if ( logger . isTraceEnabled ( ) ) traceInputBufferState ( "About to clear read buffer" ) ; if ( requestHandlerFactory . shareReadWriteBuffer ( ) == false ) { inputStream . clear ( ) ; } if ( logger . isTraceEnabled ( ) ) traceInputBufferState ( "Cleared read buffer" ) ; outputStream . getBuffer ( ) . flip ( ) ; selectionKey . interestOps ( SelectionKey . OP_WRITE ) ; } | Flips the output buffer and lets the Selector know we re ready to write . |
24,310 | private boolean initRequestHandler ( SelectionKey selectionKey ) { ByteBuffer inputBuffer = inputStream . getBuffer ( ) ; int remaining = inputBuffer . remaining ( ) ; if ( remaining < 3 ) return true ; byte [ ] protoBytes = { inputBuffer . get ( 0 ) , inputBuffer . get ( 1 ) , inputBuffer . get ( 2 ) } ; try { String proto = ByteUtils . getString ( protoBytes , "UTF-8" ) ; inputBuffer . clear ( ) ; RequestFormatType requestFormatType = RequestFormatType . fromCode ( proto ) ; requestHandler = requestHandlerFactory . getRequestHandler ( requestFormatType ) ; if ( logger . isInfoEnabled ( ) ) logger . info ( "Protocol negotiated for " + socketChannel . socket ( ) + ": " + requestFormatType . getDisplayName ( ) ) ; outputStream . getBuffer ( ) . put ( ByteUtils . getBytes ( "ok" , "UTF-8" ) ) ; prepForWrite ( selectionKey ) ; return false ; } catch ( IllegalArgumentException e ) { RequestFormatType requestFormatType = RequestFormatType . VOLDEMORT_V0 ; requestHandler = requestHandlerFactory . getRequestHandler ( requestFormatType ) ; if ( logger . isInfoEnabled ( ) ) logger . info ( "No protocol proposal given for " + socketChannel . socket ( ) + ", assuming " + requestFormatType . getDisplayName ( ) ) ; return true ; } } | Returns true if the request should continue . |
24,311 | public void rememberAndDisableQuota ( ) { for ( Integer nodeId : nodeIds ) { boolean quotaEnforcement = Boolean . parseBoolean ( adminClient . metadataMgmtOps . getRemoteMetadata ( nodeId , MetadataStore . QUOTA_ENFORCEMENT_ENABLED_KEY ) . getValue ( ) ) ; mapNodeToQuotaEnforcingEnabled . put ( nodeId , quotaEnforcement ) ; } adminClient . metadataMgmtOps . updateRemoteMetadata ( nodeIds , MetadataStore . QUOTA_ENFORCEMENT_ENABLED_KEY , Boolean . toString ( false ) ) ; } | Before cluster management operations i . e . remember and disable quota enforcement settings |
24,312 | public void resetQuotaAndRecoverEnforcement ( ) { for ( Integer nodeId : nodeIds ) { boolean quotaEnforcement = mapNodeToQuotaEnforcingEnabled . get ( nodeId ) ; adminClient . metadataMgmtOps . updateRemoteMetadata ( Arrays . asList ( nodeId ) , MetadataStore . QUOTA_ENFORCEMENT_ENABLED_KEY , Boolean . toString ( quotaEnforcement ) ) ; } for ( String storeName : storeNames ) { adminClient . quotaMgmtOps . rebalanceQuota ( storeName ) ; } } | After cluster management operations i . e . reset quota and recover quota enforcement settings |
24,313 | public void incrementVersion ( int node , long time ) { if ( node < 0 || node > Short . MAX_VALUE ) throw new IllegalArgumentException ( node + " is outside the acceptable range of node ids." ) ; this . timestamp = time ; Long version = versionMap . get ( ( short ) node ) ; if ( version == null ) { version = 1L ; } else { version = version + 1L ; } versionMap . put ( ( short ) node , version ) ; if ( versionMap . size ( ) >= MAX_NUMBER_OF_VERSIONS ) { throw new IllegalStateException ( "Vector clock is full!" ) ; } } | Increment the version info associated with the given node |
24,314 | public VectorClock incremented ( int nodeId , long time ) { VectorClock copyClock = this . clone ( ) ; copyClock . incrementVersion ( nodeId , time ) ; return copyClock ; } | Get new vector clock based on this clock but incremented on index nodeId |
24,315 | private Map < Integer , Integer > getNodeIdToPrimaryCount ( Cluster cluster ) { Map < Integer , Integer > nodeIdToPrimaryCount = Maps . newHashMap ( ) ; for ( Node node : cluster . getNodes ( ) ) { nodeIdToPrimaryCount . put ( node . getId ( ) , node . getPartitionIds ( ) . size ( ) ) ; } return nodeIdToPrimaryCount ; } | Go through all nodes and determine how many partition Ids each node hosts . |
24,316 | private Map < Integer , Integer > getNodeIdToZonePrimaryCount ( Cluster cluster , StoreRoutingPlan storeRoutingPlan ) { Map < Integer , Integer > nodeIdToZonePrimaryCount = Maps . newHashMap ( ) ; for ( Integer nodeId : cluster . getNodeIds ( ) ) { nodeIdToZonePrimaryCount . put ( nodeId , storeRoutingPlan . getZonePrimaryPartitionIds ( nodeId ) . size ( ) ) ; } return nodeIdToZonePrimaryCount ; } | Go through all partition IDs and determine which node is first in the replicating node list for every zone . This determines the number of zone primaries each node hosts . |
24,317 | private Map < Integer , Integer > getNodeIdToNaryCount ( Cluster cluster , StoreRoutingPlan storeRoutingPlan ) { Map < Integer , Integer > nodeIdToNaryCount = Maps . newHashMap ( ) ; for ( int nodeId : cluster . getNodeIds ( ) ) { nodeIdToNaryCount . put ( nodeId , storeRoutingPlan . getZoneNAryPartitionIds ( nodeId ) . size ( ) ) ; } return nodeIdToNaryCount ; } | Go through all node IDs and determine which node |
24,318 | private String dumpZoneNAryDetails ( StoreRoutingPlan storeRoutingPlan ) { StringBuilder sb = new StringBuilder ( ) ; sb . append ( "\tDetailed Dump (Zone N-Aries):" ) . append ( Utils . NEWLINE ) ; for ( Node node : storeRoutingPlan . getCluster ( ) . getNodes ( ) ) { int zoneId = node . getZoneId ( ) ; int nodeId = node . getId ( ) ; sb . append ( "\tNode ID: " + nodeId + " in zone " + zoneId ) . append ( Utils . NEWLINE ) ; List < Integer > naries = storeRoutingPlan . getZoneNAryPartitionIds ( nodeId ) ; Map < Integer , List < Integer > > zoneNaryTypeToPartitionIds = new HashMap < Integer , List < Integer > > ( ) ; for ( int nary : naries ) { int zoneReplicaType = storeRoutingPlan . getZoneNaryForNodesPartition ( zoneId , nodeId , nary ) ; if ( ! zoneNaryTypeToPartitionIds . containsKey ( zoneReplicaType ) ) { zoneNaryTypeToPartitionIds . put ( zoneReplicaType , new ArrayList < Integer > ( ) ) ; } zoneNaryTypeToPartitionIds . get ( zoneReplicaType ) . add ( nary ) ; } for ( int replicaType : new TreeSet < Integer > ( zoneNaryTypeToPartitionIds . keySet ( ) ) ) { sb . append ( "\t\t" + replicaType + " : " ) ; sb . append ( zoneNaryTypeToPartitionIds . get ( replicaType ) . toString ( ) ) ; sb . append ( Utils . NEWLINE ) ; } } return sb . toString ( ) ; } | Dumps the partition IDs per node in terms of zone n - ary type . |
24,319 | private Pair < Double , String > summarizeBalance ( final Map < Integer , Integer > nodeIdToPartitionCount , String title ) { StringBuilder builder = new StringBuilder ( ) ; builder . append ( "\n" + title + "\n" ) ; Map < Integer , ZoneBalanceStats > zoneToBalanceStats = new HashMap < Integer , ZoneBalanceStats > ( ) ; for ( Integer zoneId : cluster . getZoneIds ( ) ) { zoneToBalanceStats . put ( zoneId , new ZoneBalanceStats ( ) ) ; } for ( Node node : cluster . getNodes ( ) ) { int curCount = nodeIdToPartitionCount . get ( node . getId ( ) ) ; builder . append ( "\tNode ID: " + node . getId ( ) + " : " + curCount + " (" + node . getHost ( ) + ")\n" ) ; zoneToBalanceStats . get ( node . getZoneId ( ) ) . addPartitions ( curCount ) ; } double utilityToBeMinimized = 0 ; for ( Integer zoneId : cluster . getZoneIds ( ) ) { builder . append ( "Zone " + zoneId + "\n" ) ; builder . append ( zoneToBalanceStats . get ( zoneId ) . dumpStats ( ) ) ; utilityToBeMinimized += zoneToBalanceStats . get ( zoneId ) . getUtility ( ) ; } return Pair . create ( utilityToBeMinimized , builder . toString ( ) ) ; } | Summarizes balance for the given nodeId to PartitionCount . |
24,320 | private void rebalanceStore ( String storeName , final AdminClient adminClient , RebalanceTaskInfo stealInfo , boolean isReadOnlyStore ) { if ( stealInfo . getPartitionIds ( storeName ) != null && stealInfo . getPartitionIds ( storeName ) . size ( ) > 0 ) { logger . info ( getHeader ( stealInfo ) + "Starting partitions migration for store " + storeName + " from donor node " + stealInfo . getDonorId ( ) ) ; int asyncId = adminClient . storeMntOps . migratePartitions ( stealInfo . getDonorId ( ) , metadataStore . getNodeId ( ) , storeName , stealInfo . getPartitionIds ( storeName ) , null , stealInfo . getInitialCluster ( ) ) ; rebalanceStatusList . add ( asyncId ) ; if ( logger . isDebugEnabled ( ) ) { logger . debug ( getHeader ( stealInfo ) + "Waiting for completion for " + storeName + " with async id " + asyncId ) ; } adminClient . rpcOps . waitForCompletion ( metadataStore . getNodeId ( ) , asyncId , voldemortConfig . getRebalancingTimeoutSec ( ) , TimeUnit . SECONDS , getStatus ( ) ) ; rebalanceStatusList . remove ( ( Object ) asyncId ) ; logger . info ( getHeader ( stealInfo ) + "Completed partition migration for store " + storeName + " from donor node " + stealInfo . getDonorId ( ) ) ; } logger . info ( getHeader ( stealInfo ) + "Finished all migration for store " + storeName ) ; } | Blocking function which completes the migration of one store |
24,321 | public void recordSyncOpTimeNs ( SocketDestination dest , long opTimeNs ) { if ( dest != null ) { getOrCreateNodeStats ( dest ) . recordSyncOpTimeNs ( null , opTimeNs ) ; recordSyncOpTimeNs ( null , opTimeNs ) ; } else { this . syncOpTimeRequestCounter . addRequest ( opTimeNs ) ; } } | Record operation for sync ops time |
24,322 | public void recordAsyncOpTimeNs ( SocketDestination dest , long opTimeNs ) { if ( dest != null ) { getOrCreateNodeStats ( dest ) . recordAsyncOpTimeNs ( null , opTimeNs ) ; recordAsyncOpTimeNs ( null , opTimeNs ) ; } else { this . asynOpTimeRequestCounter . addRequest ( opTimeNs ) ; } } | Record operation for async ops time |
24,323 | public void recordConnectionEstablishmentTimeUs ( SocketDestination dest , long connEstTimeUs ) { if ( dest != null ) { getOrCreateNodeStats ( dest ) . recordConnectionEstablishmentTimeUs ( null , connEstTimeUs ) ; recordConnectionEstablishmentTimeUs ( null , connEstTimeUs ) ; } else { this . connectionEstablishmentRequestCounter . addRequest ( connEstTimeUs * Time . NS_PER_US ) ; } } | Record the connection establishment time |
24,324 | public void recordCheckoutTimeUs ( SocketDestination dest , long checkoutTimeUs ) { if ( dest != null ) { getOrCreateNodeStats ( dest ) . recordCheckoutTimeUs ( null , checkoutTimeUs ) ; recordCheckoutTimeUs ( null , checkoutTimeUs ) ; } else { this . checkoutTimeRequestCounter . addRequest ( checkoutTimeUs * Time . NS_PER_US ) ; } } | Record the checkout wait time in us |
24,325 | public void recordCheckoutQueueLength ( SocketDestination dest , int queueLength ) { if ( dest != null ) { getOrCreateNodeStats ( dest ) . recordCheckoutQueueLength ( null , queueLength ) ; recordCheckoutQueueLength ( null , queueLength ) ; } else { this . checkoutQueueLengthHistogram . insert ( queueLength ) ; checkMonitoringInterval ( ) ; } } | Record the checkout queue length |
24,326 | public void recordResourceRequestTimeUs ( SocketDestination dest , long resourceRequestTimeUs ) { if ( dest != null ) { getOrCreateNodeStats ( dest ) . recordResourceRequestTimeUs ( null , resourceRequestTimeUs ) ; recordResourceRequestTimeUs ( null , resourceRequestTimeUs ) ; } else { this . resourceRequestTimeRequestCounter . addRequest ( resourceRequestTimeUs * Time . NS_PER_US ) ; } } | Record the resource request wait time in us |
24,327 | public void recordResourceRequestQueueLength ( SocketDestination dest , int queueLength ) { if ( dest != null ) { getOrCreateNodeStats ( dest ) . recordResourceRequestQueueLength ( null , queueLength ) ; recordResourceRequestQueueLength ( null , queueLength ) ; } else { this . resourceRequestQueueLengthHistogram . insert ( queueLength ) ; checkMonitoringInterval ( ) ; } } | Record the resource request queue length |
24,328 | public void close ( ) { Iterator < SocketDestination > it = getStatsMap ( ) . keySet ( ) . iterator ( ) ; while ( it . hasNext ( ) ) { try { SocketDestination destination = it . next ( ) ; JmxUtils . unregisterMbean ( JmxUtils . createObjectName ( JmxUtils . getPackageName ( ClientRequestExecutor . class ) , "stats_" + destination . toString ( ) . replace ( ':' , '_' ) + identifierString ) ) ; } catch ( Exception e ) { } } } | Unregister all MBeans |
24,329 | private < T > T request ( ClientRequest < T > delegate , String operationName ) { long startTimeMs = - 1 ; long startTimeNs = - 1 ; if ( logger . isDebugEnabled ( ) ) { startTimeMs = System . currentTimeMillis ( ) ; } ClientRequestExecutor clientRequestExecutor = pool . checkout ( destination ) ; String debugMsgStr = "" ; startTimeNs = System . nanoTime ( ) ; BlockingClientRequest < T > blockingClientRequest = null ; try { blockingClientRequest = new BlockingClientRequest < T > ( delegate , timeoutMs ) ; clientRequestExecutor . addClientRequest ( blockingClientRequest , timeoutMs , System . nanoTime ( ) - startTimeNs ) ; boolean awaitResult = blockingClientRequest . await ( ) ; if ( awaitResult == false ) { blockingClientRequest . timeOut ( ) ; } if ( logger . isDebugEnabled ( ) ) debugMsgStr += "success" ; return blockingClientRequest . getResult ( ) ; } catch ( InterruptedException e ) { if ( logger . isDebugEnabled ( ) ) debugMsgStr += "unreachable: " + e . getMessage ( ) ; throw new UnreachableStoreException ( "Failure in " + operationName + " on " + destination + ": " + e . getMessage ( ) , e ) ; } catch ( UnreachableStoreException e ) { clientRequestExecutor . close ( ) ; if ( logger . isDebugEnabled ( ) ) debugMsgStr += "failure: " + e . getMessage ( ) ; throw new UnreachableStoreException ( "Failure in " + operationName + " on " + destination + ": " + e . getMessage ( ) , e . getCause ( ) ) ; } finally { if ( blockingClientRequest != null && ! blockingClientRequest . isComplete ( ) ) { clientRequestExecutor . close ( ) ; } long opTimeNs = Utils . elapsedTimeNs ( startTimeNs , System . nanoTime ( ) ) ; if ( stats != null ) { stats . recordSyncOpTimeNs ( destination , opTimeNs ) ; } if ( logger . isDebugEnabled ( ) ) { logger . debug ( "Sync request end, type: " + operationName + " requestRef: " + System . identityHashCode ( delegate ) + " totalTimeNs: " + opTimeNs + " start time: " + startTimeMs + " end time: " + System . currentTimeMillis ( ) + " client:" + clientRequestExecutor . getSocketChannel ( ) . socket ( ) . getLocalAddress ( ) + ":" + clientRequestExecutor . getSocketChannel ( ) . socket ( ) . getLocalPort ( ) + " server: " + clientRequestExecutor . getSocketChannel ( ) . socket ( ) . getRemoteSocketAddress ( ) + " outcome: " + debugMsgStr ) ; } pool . checkin ( destination , clientRequestExecutor ) ; } } | This method handles submitting and then waiting for the request from the server . It uses the ClientRequest API to actually write the request and then read back the response . This implementation will block for a response from the server . |
24,330 | private < T > void requestAsync ( ClientRequest < T > delegate , NonblockingStoreCallback callback , long timeoutMs , String operationName ) { pool . submitAsync ( this . destination , delegate , callback , timeoutMs , operationName ) ; } | This method handles submitting and then waiting for the request from the server . It uses the ClientRequest API to actually write the request and then read back the response . This implementation will not block for a response from the server . |
24,331 | @ JmxGetter ( name = "avgFetchKeysNetworkTimeMs" , description = "average time spent on network, for fetch keys" ) public double getAvgFetchKeysNetworkTimeMs ( ) { return networkTimeCounterMap . get ( Operation . FETCH_KEYS ) . getAvgEventValue ( ) / Time . NS_PER_MS ; } | Mbeans for FETCH_KEYS |
24,332 | @ JmxGetter ( name = "avgFetchEntriesNetworkTimeMs" , description = "average time spent on network, for streaming operations" ) public double getAvgFetchEntriesNetworkTimeMs ( ) { return networkTimeCounterMap . get ( Operation . FETCH_ENTRIES ) . getAvgEventValue ( ) / Time . NS_PER_MS ; } | Mbeans for FETCH_ENTRIES |
24,333 | @ JmxGetter ( name = "avgUpdateEntriesNetworkTimeMs" , description = "average time spent on network, for streaming operations" ) public double getAvgUpdateEntriesNetworkTimeMs ( ) { return networkTimeCounterMap . get ( Operation . UPDATE_ENTRIES ) . getAvgEventValue ( ) / Time . NS_PER_MS ; } | Mbeans for UPDATE_ENTRIES |
24,334 | @ JmxGetter ( name = "avgSlopUpdateNetworkTimeMs" , description = "average time spent on network, for streaming operations" ) public double getAvgSlopUpdateNetworkTimeMs ( ) { return networkTimeCounterMap . get ( Operation . SLOP_UPDATE ) . getAvgEventValue ( ) / Time . NS_PER_MS ; } | Mbeans for SLOP_UPDATE |
24,335 | public static String getJavaClassFromSchemaInfo ( String schemaInfo ) { final String ONLY_JAVA_CLIENTS_SUPPORTED = "Only Java clients are supported currently, so the format of the schema-info should be: <schema-info>java=foo.Bar</schema-info> where foo.Bar is the fully qualified name of the message." ; if ( StringUtils . isEmpty ( schemaInfo ) ) throw new IllegalArgumentException ( "This serializer requires a non-empty schema-info." ) ; String [ ] languagePairs = StringUtils . split ( schemaInfo , ',' ) ; if ( languagePairs . length > 1 ) throw new IllegalArgumentException ( ONLY_JAVA_CLIENTS_SUPPORTED ) ; String [ ] javaPair = StringUtils . split ( languagePairs [ 0 ] , '=' ) ; if ( javaPair . length != 2 || ! javaPair [ 0 ] . trim ( ) . equals ( "java" ) ) throw new IllegalArgumentException ( ONLY_JAVA_CLIENTS_SUPPORTED ) ; return javaPair [ 1 ] . trim ( ) ; } | Extracts the java class name from the schema info |
24,336 | public static List < StoreDefinition > filterStores ( List < StoreDefinition > storeDefs , final boolean isReadOnly ) { List < StoreDefinition > filteredStores = Lists . newArrayList ( ) ; for ( StoreDefinition storeDef : storeDefs ) { if ( storeDef . getType ( ) . equals ( ReadOnlyStorageConfiguration . TYPE_NAME ) == isReadOnly ) { filteredStores . add ( storeDef ) ; } } return filteredStores ; } | Given a list of store definitions filters the list depending on the boolean |
24,337 | public static List < String > getStoreNames ( List < StoreDefinition > storeDefList ) { List < String > storeList = new ArrayList < String > ( ) ; for ( StoreDefinition def : storeDefList ) { storeList . add ( def . getName ( ) ) ; } return storeList ; } | Given a list of store definitions return a list of store names |
24,338 | public static Set < String > getStoreNamesSet ( List < StoreDefinition > storeDefList ) { HashSet < String > storeSet = new HashSet < String > ( ) ; for ( StoreDefinition def : storeDefList ) { storeSet . add ( def . getName ( ) ) ; } return storeSet ; } | Given a list of store definitions return a set of store names |
24,339 | public static HashMap < StoreDefinition , Integer > getUniqueStoreDefinitionsWithCounts ( List < StoreDefinition > storeDefs ) { HashMap < StoreDefinition , Integer > uniqueStoreDefs = Maps . newHashMap ( ) ; for ( StoreDefinition storeDef : storeDefs ) { if ( uniqueStoreDefs . isEmpty ( ) ) { uniqueStoreDefs . put ( storeDef , 1 ) ; } else { StoreDefinition sameStore = null ; for ( StoreDefinition uniqueStoreDef : uniqueStoreDefs . keySet ( ) ) { if ( uniqueStoreDef . getReplicationFactor ( ) == storeDef . getReplicationFactor ( ) && uniqueStoreDef . getRoutingStrategyType ( ) . compareTo ( storeDef . getRoutingStrategyType ( ) ) == 0 ) { if ( uniqueStoreDef . getRoutingStrategyType ( ) . compareTo ( RoutingStrategyType . ZONE_STRATEGY ) == 0 ) { boolean zonesSame = true ; for ( int zoneId : uniqueStoreDef . getZoneReplicationFactor ( ) . keySet ( ) ) { if ( storeDef . getZoneReplicationFactor ( ) . get ( zoneId ) == null || storeDef . getZoneReplicationFactor ( ) . get ( zoneId ) != uniqueStoreDef . getZoneReplicationFactor ( ) . get ( zoneId ) ) { zonesSame = false ; break ; } } if ( zonesSame ) { sameStore = uniqueStoreDef ; } } else { sameStore = uniqueStoreDef ; } if ( sameStore != null ) { int currentCount = uniqueStoreDefs . get ( sameStore ) ; uniqueStoreDefs . put ( sameStore , currentCount + 1 ) ; break ; } } } if ( sameStore == null ) { uniqueStoreDefs . put ( storeDef , 1 ) ; } } } return uniqueStoreDefs ; } | Given a list of store definitions find out and return a map of similar store definitions + count of them |
24,340 | public static boolean isAvroSchema ( String serializerName ) { if ( serializerName . equals ( AVRO_GENERIC_VERSIONED_TYPE_NAME ) || serializerName . equals ( AVRO_GENERIC_TYPE_NAME ) || serializerName . equals ( AVRO_REFLECTIVE_TYPE_NAME ) || serializerName . equals ( AVRO_SPECIFIC_TYPE_NAME ) ) { return true ; } else { return false ; } } | Determine whether or not a given serializedr is AVRO based |
24,341 | private static void validateIfAvroSchema ( SerializerDefinition serializerDef ) { if ( serializerDef . getName ( ) . equals ( AVRO_GENERIC_VERSIONED_TYPE_NAME ) || serializerDef . getName ( ) . equals ( AVRO_GENERIC_TYPE_NAME ) ) { SchemaEvolutionValidator . validateAllAvroSchemas ( serializerDef ) ; if ( serializerDef . getName ( ) . equals ( AVRO_GENERIC_VERSIONED_TYPE_NAME ) ) { SchemaEvolutionValidator . checkSchemaCompatibility ( serializerDef ) ; } } } | If provided with an AVRO schema validates it and checks if there are backwards compatible . |
24,342 | public synchronized void insert ( long data ) { resetIfNeeded ( ) ; long index = 0 ; if ( data >= this . upperBound ) { index = nBuckets - 1 ; } else if ( data < 0 ) { logger . error ( data + " can't be bucketed because it is negative!" ) ; return ; } else { index = data / step ; } if ( index < 0 || index >= nBuckets ) { logger . error ( data + " can't be bucketed because index is not in range [0,nBuckets)." ) ; return ; } buckets [ ( int ) index ] ++ ; sum += data ; size ++ ; } | Insert a value into the right bucket of the histogram . If the value is larger than any bound insert into the last bucket . If the value is less than zero then ignore it . |
24,343 | private void checkAndAddNodeStore ( ) { for ( Node node : metadata . getCluster ( ) . getNodes ( ) ) { if ( ! routedStore . getInnerStores ( ) . containsKey ( node . getId ( ) ) ) { if ( ! storeRepository . hasNodeStore ( getName ( ) , node . getId ( ) ) ) { storeRepository . addNodeStore ( node . getId ( ) , createNodeStore ( node ) ) ; } routedStore . getInnerStores ( ) . put ( node . getId ( ) , storeRepository . getNodeStore ( getName ( ) , node . getId ( ) ) ) ; } } } | Check that all nodes in the new cluster have a corresponding entry in storeRepository and innerStores . add a NodeStore if not present is needed as with rebalancing we can add new nodes on the fly . |
24,344 | public ResourcePoolConfig setTimeout ( long timeout , TimeUnit unit ) { if ( timeout < 0 ) throw new IllegalArgumentException ( "The timeout must be a non-negative number." ) ; this . timeoutNs = TimeUnit . NANOSECONDS . convert ( timeout , unit ) ; return this ; } | The timeout which we block for when a resource is not available |
24,345 | private byte [ ] assembleValues ( List < Versioned < byte [ ] > > values ) throws IOException { ByteArrayOutputStream stream = new ByteArrayOutputStream ( ) ; DataOutputStream dataStream = new DataOutputStream ( stream ) ; for ( Versioned < byte [ ] > value : values ) { byte [ ] object = value . getValue ( ) ; dataStream . writeInt ( object . length ) ; dataStream . write ( object ) ; VectorClock clock = ( VectorClock ) value . getVersion ( ) ; dataStream . writeInt ( clock . sizeInBytes ( ) ) ; dataStream . write ( clock . toBytes ( ) ) ; } return stream . toByteArray ( ) ; } | Store the versioned values |
24,346 | private List < Versioned < byte [ ] > > disassembleValues ( byte [ ] values ) throws IOException { if ( values == null ) return new ArrayList < Versioned < byte [ ] > > ( 0 ) ; List < Versioned < byte [ ] > > returnList = new ArrayList < Versioned < byte [ ] > > ( ) ; ByteArrayInputStream stream = new ByteArrayInputStream ( values ) ; DataInputStream dataStream = new DataInputStream ( stream ) ; while ( dataStream . available ( ) > 0 ) { byte [ ] object = new byte [ dataStream . readInt ( ) ] ; dataStream . read ( object ) ; byte [ ] clockBytes = new byte [ dataStream . readInt ( ) ] ; dataStream . read ( clockBytes ) ; VectorClock clock = new VectorClock ( clockBytes ) ; returnList . add ( new Versioned < byte [ ] > ( object , clock ) ) ; } return returnList ; } | Splits up value into multiple versioned values |
24,347 | protected void statusInfoMessage ( final String tag ) { if ( logger . isInfoEnabled ( ) ) { logger . info ( tag + " : [partition: " + currentPartition + ", partitionFetched: " + currentPartitionFetched + "] for store " + storageEngine . getName ( ) ) ; } } | Simple info message for status |
24,348 | private int slopSize ( Versioned < Slop > slopVersioned ) { int nBytes = 0 ; Slop slop = slopVersioned . getValue ( ) ; nBytes += slop . getKey ( ) . length ( ) ; nBytes += ( ( VectorClock ) slopVersioned . getVersion ( ) ) . sizeInBytes ( ) ; switch ( slop . getOperation ( ) ) { case PUT : { nBytes += slop . getValue ( ) . length ; break ; } case DELETE : { break ; } default : logger . error ( "Unknown slop operation: " + slop . getOperation ( ) ) ; } return nBytes ; } | Returns the approximate size of slop to help in throttling |
24,349 | public < K , V > StoreClient < K , V > getStoreClient ( final String storeName , final InconsistencyResolver < Versioned < V > > resolver ) { return new LazyStoreClient < K , V > ( new Callable < StoreClient < K , V > > ( ) { public StoreClient < K , V > call ( ) throws Exception { Store < K , V , Object > clientStore = getRawStore ( storeName , resolver ) ; return new RESTClient < K , V > ( storeName , clientStore ) ; } } , true ) ; } | Creates a REST client used to perform Voldemort operations against the Coordinator |
24,350 | private static int abs ( int a ) { if ( a >= 0 ) return a ; else if ( a != Integer . MIN_VALUE ) return - a ; return Integer . MAX_VALUE ; } | A modified version of abs that always returns a non - negative value . Math . abs returns Integer . MIN_VALUE if a == Integer . MIN_VALUE and this method returns Integer . MAX_VALUE in that case . |
24,351 | public Integer getMasterPartition ( byte [ ] key ) { return abs ( hash . hash ( key ) ) % ( Math . max ( 1 , this . partitionToNode . length ) ) ; } | Obtain the master partition for a given key |
24,352 | protected boolean isSlopDead ( Cluster cluster , Set < String > storeNames , Slop slop ) { if ( ! cluster . getNodeIds ( ) . contains ( slop . getNodeId ( ) ) ) { return true ; } if ( ! storeNames . contains ( slop . getStoreName ( ) ) ) { return true ; } return false ; } | A slop is dead if the destination node or the store does not exist anymore on the cluster . |
24,353 | protected void handleDeadSlop ( SlopStorageEngine slopStorageEngine , Pair < ByteArray , Versioned < Slop > > keyAndVal ) { Versioned < Slop > versioned = keyAndVal . getSecond ( ) ; if ( voldemortConfig . getAutoPurgeDeadSlops ( ) ) { slopStorageEngine . delete ( keyAndVal . getFirst ( ) , versioned . getVersion ( ) ) ; if ( getLogger ( ) . isDebugEnabled ( ) ) { getLogger ( ) . debug ( "Auto purging dead slop :" + versioned . getValue ( ) ) ; } } else { if ( getLogger ( ) . isDebugEnabled ( ) ) { getLogger ( ) . debug ( "Ignoring dead slop :" + versioned . getValue ( ) ) ; } } } | Handle slop for nodes that are no longer part of the cluster . It may not always be the case . For example shrinking a zone or deleting a store . |
24,354 | public void destroy ( SocketDestination dest , ClientRequestExecutor clientRequestExecutor ) throws Exception { clientRequestExecutor . close ( ) ; int numDestroyed = destroyed . incrementAndGet ( ) ; if ( stats != null ) { stats . incrementCount ( dest , ClientSocketStats . Tracked . CONNECTION_DESTROYED_EVENT ) ; } if ( logger . isDebugEnabled ( ) ) logger . debug ( "Destroyed socket " + numDestroyed + " connection to " + dest . getHost ( ) + ":" + dest . getPort ( ) ) ; } | Close the ClientRequestExecutor . |
24,355 | @ SuppressWarnings ( "unchecked" ) public static Properties readSingleClientConfigAvro ( String configAvro ) { Properties props = new Properties ( ) ; try { JsonDecoder decoder = new JsonDecoder ( CLIENT_CONFIG_AVRO_SCHEMA , configAvro ) ; GenericDatumReader < Object > datumReader = new GenericDatumReader < Object > ( CLIENT_CONFIG_AVRO_SCHEMA ) ; Map < Utf8 , Utf8 > flowMap = ( Map < Utf8 , Utf8 > ) datumReader . read ( null , decoder ) ; for ( Utf8 key : flowMap . keySet ( ) ) { props . put ( key . toString ( ) , flowMap . get ( key ) . toString ( ) ) ; } } catch ( Exception e ) { e . printStackTrace ( ) ; } return props ; } | Parses a string that contains single fat client config string in avro format |
24,356 | @ SuppressWarnings ( "unchecked" ) public static Map < String , Properties > readMultipleClientConfigAvro ( String configAvro ) { Map < String , Properties > mapStoreToProps = Maps . newHashMap ( ) ; try { JsonDecoder decoder = new JsonDecoder ( CLIENT_CONFIGS_AVRO_SCHEMA , configAvro ) ; GenericDatumReader < Object > datumReader = new GenericDatumReader < Object > ( CLIENT_CONFIGS_AVRO_SCHEMA ) ; Map < Utf8 , Map < Utf8 , Utf8 > > storeConfigs = ( Map < Utf8 , Map < Utf8 , Utf8 > > ) datumReader . read ( null , decoder ) ; for ( Utf8 storeName : storeConfigs . keySet ( ) ) { Properties props = new Properties ( ) ; Map < Utf8 , Utf8 > singleConfig = storeConfigs . get ( storeName ) ; for ( Utf8 key : singleConfig . keySet ( ) ) { props . put ( key . toString ( ) , singleConfig . get ( key ) . toString ( ) ) ; } if ( storeName == null || storeName . length ( ) == 0 ) { throw new Exception ( "Invalid store name found!" ) ; } mapStoreToProps . put ( storeName . toString ( ) , props ) ; } } catch ( Exception e ) { e . printStackTrace ( ) ; } return mapStoreToProps ; } | Parses a string that contains multiple fat client configs in avro format |
24,357 | public static String writeSingleClientConfigAvro ( Properties props ) { String avroConfig = "" ; Boolean firstProp = true ; for ( String key : props . stringPropertyNames ( ) ) { if ( firstProp ) { firstProp = false ; } else { avroConfig = avroConfig + ",\n" ; } avroConfig = avroConfig + "\t\t\"" + key + "\": \"" + props . getProperty ( key ) + "\"" ; } if ( avroConfig . isEmpty ( ) ) { return "{}" ; } else { return "{\n" + avroConfig + "\n\t}" ; } } | Assembles an avro format string of single store config from store properties |
24,358 | public static String writeMultipleClientConfigAvro ( Map < String , Properties > mapStoreToProps ) { String avroConfig = "" ; Boolean firstStore = true ; for ( String storeName : mapStoreToProps . keySet ( ) ) { if ( firstStore ) { firstStore = false ; } else { avroConfig = avroConfig + ",\n" ; } Properties props = mapStoreToProps . get ( storeName ) ; avroConfig = avroConfig + "\t\"" + storeName + "\": " + writeSingleClientConfigAvro ( props ) ; } return "{\n" + avroConfig + "\n}" ; } | Assembles an avro format string that contains multiple fat client configs from map of store to properties |
24,359 | public static Boolean compareSingleClientConfigAvro ( String configAvro1 , String configAvro2 ) { Properties props1 = readSingleClientConfigAvro ( configAvro1 ) ; Properties props2 = readSingleClientConfigAvro ( configAvro2 ) ; if ( props1 . equals ( props2 ) ) { return true ; } else { return false ; } } | Compares two avro strings which contains single store configs |
24,360 | public static Boolean compareMultipleClientConfigAvro ( String configAvro1 , String configAvro2 ) { Map < String , Properties > mapStoreToProps1 = readMultipleClientConfigAvro ( configAvro1 ) ; Map < String , Properties > mapStoreToProps2 = readMultipleClientConfigAvro ( configAvro2 ) ; Set < String > keySet1 = mapStoreToProps1 . keySet ( ) ; Set < String > keySet2 = mapStoreToProps2 . keySet ( ) ; if ( ! keySet1 . equals ( keySet2 ) ) { return false ; } for ( String storeName : keySet1 ) { Properties props1 = mapStoreToProps1 . get ( storeName ) ; Properties props2 = mapStoreToProps2 . get ( storeName ) ; if ( ! props1 . equals ( props2 ) ) { return false ; } } return true ; } | Compares two avro strings which contains multiple store configs |
24,361 | public static void printHelp ( PrintStream stream ) { stream . println ( ) ; stream . println ( "Voldemort Admin Tool Async-Job Commands" ) ; stream . println ( "---------------------------------------" ) ; stream . println ( "list Get async job list from nodes." ) ; stream . println ( "stop Stop async jobs on one node." ) ; stream . println ( ) ; stream . println ( "To get more information on each command," ) ; stream . println ( "please try \'help async-job <command-name>\'." ) ; stream . println ( ) ; } | Prints command - line help menu . |
24,362 | public void removeStorageEngine ( StorageEngine < ByteArray , byte [ ] , byte [ ] > engine ) { String storeName = engine . getName ( ) ; BdbStorageEngine bdbEngine = ( BdbStorageEngine ) engine ; synchronized ( lock ) { if ( useOneEnvPerStore ) { Environment environment = this . environments . get ( storeName ) ; if ( environment == null ) { return ; } if ( this . unreservedStores . remove ( environment ) ) { logger . info ( "Removed environment for store name: " + storeName + " from unreserved stores" ) ; } else { logger . info ( "No environment found in unreserved stores for store name: " + storeName ) ; } File bdbDir = environment . getHome ( ) ; if ( bdbDir . exists ( ) && bdbDir . isDirectory ( ) ) { String bdbDirPath = bdbDir . getPath ( ) ; try { FileUtils . deleteDirectory ( bdbDir ) ; logger . info ( "Successfully deleted BDB directory : " + bdbDirPath + " for store name: " + storeName ) ; } catch ( IOException e ) { logger . error ( "Unable to delete BDB directory: " + bdbDirPath + " for store name: " + storeName ) ; } } BdbEnvironmentStats bdbEnvStats = bdbEngine . getBdbEnvironmentStats ( ) ; this . aggBdbStats . unTrackEnvironment ( bdbEnvStats ) ; if ( voldemortConfig . isJmxEnabled ( ) ) { ObjectName name = JmxUtils . createObjectName ( JmxUtils . getPackageName ( bdbEnvStats . getClass ( ) ) , storeName ) ; JmxUtils . unregisterMbean ( name ) ; } environment . close ( ) ; this . environments . remove ( storeName ) ; logger . info ( "Successfully closed the environment for store name : " + storeName ) ; } } } | Clean up the environment object for the given storage engine |
24,363 | @ JmxOperation ( description = "Forcefully invoke the log cleaning" ) public void cleanLogs ( ) { synchronized ( lock ) { try { for ( Environment environment : environments . values ( ) ) { environment . cleanLog ( ) ; } } catch ( DatabaseException e ) { throw new VoldemortException ( e ) ; } } } | Forceful cleanup the logs |
24,364 | public void update ( StoreDefinition storeDef ) { if ( ! useOneEnvPerStore ) throw new VoldemortException ( "Memory foot print can be set only when using different environments per store" ) ; String storeName = storeDef . getName ( ) ; Environment environment = environments . get ( storeName ) ; if ( ! unreservedStores . contains ( environment ) && storeDef . hasMemoryFootprint ( ) ) { EnvironmentMutableConfig mConfig = environment . getMutableConfig ( ) ; long currentCacheSize = mConfig . getCacheSize ( ) ; long newCacheSize = storeDef . getMemoryFootprintMB ( ) * ByteUtils . BYTES_PER_MB ; if ( currentCacheSize != newCacheSize ) { long newReservedCacheSize = this . reservedCacheSize - currentCacheSize + newCacheSize ; if ( ( voldemortConfig . getBdbCacheSize ( ) - newReservedCacheSize ) < voldemortConfig . getBdbMinimumSharedCache ( ) ) { throw new StorageInitializationException ( "Reservation of " + storeDef . getMemoryFootprintMB ( ) + " MB for store " + storeName + " violates minimum shared cache size of " + voldemortConfig . getBdbMinimumSharedCache ( ) ) ; } this . reservedCacheSize = newReservedCacheSize ; adjustCacheSizes ( ) ; mConfig . setCacheSize ( newCacheSize ) ; environment . setMutableConfig ( mConfig ) ; logger . info ( "Setting private cache for store " + storeDef . getName ( ) + " to " + newCacheSize ) ; } } else { throw new VoldemortException ( "Cannot switch between shared and private cache dynamically" ) ; } } | Detect what has changed in the store definition and rewire BDB environments accordingly . |
24,365 | public static HashMap < Integer , List < Integer > > getBalancedNumberOfPrimaryPartitionsPerNode ( final Cluster nextCandidateCluster , Map < Integer , Integer > targetPartitionsPerZone ) { HashMap < Integer , List < Integer > > numPartitionsPerNode = Maps . newHashMap ( ) ; for ( Integer zoneId : nextCandidateCluster . getZoneIds ( ) ) { List < Integer > partitionsOnNode = Utils . distributeEvenlyIntoList ( nextCandidateCluster . getNumberOfNodesInZone ( zoneId ) , targetPartitionsPerZone . get ( zoneId ) ) ; numPartitionsPerNode . put ( zoneId , partitionsOnNode ) ; } return numPartitionsPerNode ; } | Determines how many primary partitions each node within each zone should have . The list of integers returned per zone is the same length as the number of nodes in that zone . |
24,366 | public static Pair < HashMap < Node , Integer > , HashMap < Node , Integer > > getDonorsAndStealersForBalance ( final Cluster nextCandidateCluster , Map < Integer , List < Integer > > numPartitionsPerNodePerZone ) { HashMap < Node , Integer > donorNodes = Maps . newHashMap ( ) ; HashMap < Node , Integer > stealerNodes = Maps . newHashMap ( ) ; HashMap < Integer , Integer > numNodesAssignedInZone = Maps . newHashMap ( ) ; for ( Integer zoneId : nextCandidateCluster . getZoneIds ( ) ) { numNodesAssignedInZone . put ( zoneId , 0 ) ; } for ( Node node : nextCandidateCluster . getNodes ( ) ) { int zoneId = node . getZoneId ( ) ; int offset = numNodesAssignedInZone . get ( zoneId ) ; numNodesAssignedInZone . put ( zoneId , offset + 1 ) ; int numPartitions = numPartitionsPerNodePerZone . get ( zoneId ) . get ( offset ) ; if ( numPartitions < node . getNumberOfPartitions ( ) ) { donorNodes . put ( node , numPartitions ) ; } else if ( numPartitions > node . getNumberOfPartitions ( ) ) { stealerNodes . put ( node , numPartitions ) ; } } for ( Node node : donorNodes . keySet ( ) ) { System . out . println ( "Donor Node: " + node . getId ( ) + ", zoneId " + node . getZoneId ( ) + ", numPartitions " + node . getNumberOfPartitions ( ) + ", target number of partitions " + donorNodes . get ( node ) ) ; } for ( Node node : stealerNodes . keySet ( ) ) { System . out . println ( "Stealer Node: " + node . getId ( ) + ", zoneId " + node . getZoneId ( ) + ", numPartitions " + node . getNumberOfPartitions ( ) + ", target number of partitions " + stealerNodes . get ( node ) ) ; } return new Pair < HashMap < Node , Integer > , HashMap < Node , Integer > > ( donorNodes , stealerNodes ) ; } | Assign target number of partitions per node to specific node IDs . Then separates Nodes into donorNodes and stealerNodes based on whether the node needs to donate or steal primary partitions . |
24,367 | public static Cluster repeatedlyBalanceContiguousPartitionsPerZone ( final Cluster nextCandidateCluster , final int maxContiguousPartitionsPerZone ) { System . out . println ( "Looping to evenly balance partitions across zones while limiting contiguous partitions" ) ; int repeatContigBalance = 10 ; Cluster returnCluster = nextCandidateCluster ; for ( int i = 0 ; i < repeatContigBalance ; i ++ ) { returnCluster = balanceContiguousPartitionsPerZone ( returnCluster , maxContiguousPartitionsPerZone ) ; returnCluster = balancePrimaryPartitions ( returnCluster , false ) ; System . out . println ( "Completed round of balancing contiguous partitions: round " + ( i + 1 ) + " of " + repeatContigBalance ) ; } return returnCluster ; } | Loops over cluster and repeatedly tries to break up contiguous runs of partitions . After each phase of breaking up contiguous partitions random partitions are selected to move between zones to balance the number of partitions in each zone . The second phase may re - introduce contiguous partition runs in another zone . Therefore this overall process is repeated multiple times . |
24,368 | public static Cluster balanceContiguousPartitionsPerZone ( final Cluster nextCandidateCluster , final int maxContiguousPartitionsPerZone ) { System . out . println ( "Balance number of contiguous partitions within a zone." ) ; System . out . println ( "numPartitionsPerZone" ) ; for ( int zoneId : nextCandidateCluster . getZoneIds ( ) ) { System . out . println ( zoneId + " : " + nextCandidateCluster . getNumberOfPartitionsInZone ( zoneId ) ) ; } System . out . println ( "numNodesPerZone" ) ; for ( int zoneId : nextCandidateCluster . getZoneIds ( ) ) { System . out . println ( zoneId + " : " + nextCandidateCluster . getNumberOfNodesInZone ( zoneId ) ) ; } HashMap < Integer , List < Integer > > partitionsToRemoveFromZone = Maps . newHashMap ( ) ; System . out . println ( "Contiguous partitions" ) ; for ( Integer zoneId : nextCandidateCluster . getZoneIds ( ) ) { System . out . println ( "\tZone: " + zoneId ) ; Map < Integer , Integer > partitionToRunLength = PartitionBalanceUtils . getMapOfContiguousPartitions ( nextCandidateCluster , zoneId ) ; List < Integer > partitionsToRemoveFromThisZone = new ArrayList < Integer > ( ) ; for ( Map . Entry < Integer , Integer > entry : partitionToRunLength . entrySet ( ) ) { if ( entry . getValue ( ) > maxContiguousPartitionsPerZone ) { List < Integer > contiguousPartitions = new ArrayList < Integer > ( entry . getValue ( ) ) ; for ( int partitionId = entry . getKey ( ) ; partitionId < entry . getKey ( ) + entry . getValue ( ) ; partitionId ++ ) { contiguousPartitions . add ( partitionId % nextCandidateCluster . getNumberOfPartitions ( ) ) ; } System . out . println ( "Contiguous partitions: " + contiguousPartitions ) ; partitionsToRemoveFromThisZone . addAll ( Utils . removeItemsToSplitListEvenly ( contiguousPartitions , maxContiguousPartitionsPerZone ) ) ; } } partitionsToRemoveFromZone . put ( zoneId , partitionsToRemoveFromThisZone ) ; System . out . println ( "\t\tPartitions to remove: " + partitionsToRemoveFromThisZone ) ; } Cluster returnCluster = Cluster . cloneCluster ( nextCandidateCluster ) ; Random r = new Random ( ) ; for ( int zoneId : returnCluster . getZoneIds ( ) ) { for ( int partitionId : partitionsToRemoveFromZone . get ( zoneId ) ) { List < Integer > otherZoneIds = new ArrayList < Integer > ( ) ; for ( int otherZoneId : returnCluster . getZoneIds ( ) ) { if ( otherZoneId != zoneId ) { otherZoneIds . add ( otherZoneId ) ; } } int whichOtherZoneId = otherZoneIds . get ( r . nextInt ( otherZoneIds . size ( ) ) ) ; int whichNodeOffset = r . nextInt ( returnCluster . getNumberOfNodesInZone ( whichOtherZoneId ) ) ; int whichNodeId = new ArrayList < Integer > ( returnCluster . getNodeIdsInZone ( whichOtherZoneId ) ) . get ( whichNodeOffset ) ; returnCluster = UpdateClusterUtils . createUpdatedCluster ( returnCluster , whichNodeId , Lists . newArrayList ( partitionId ) ) ; } } return returnCluster ; } | Ensures that no more than maxContiguousPartitionsPerZone partitions are contiguous within a single zone . |
24,369 | public static Cluster swapPartitions ( final Cluster nextCandidateCluster , final int nodeIdA , final int partitionIdA , final int nodeIdB , final int partitionIdB ) { Cluster returnCluster = Cluster . cloneCluster ( nextCandidateCluster ) ; returnCluster = UpdateClusterUtils . createUpdatedCluster ( returnCluster , nodeIdA , Lists . newArrayList ( partitionIdB ) ) ; returnCluster = UpdateClusterUtils . createUpdatedCluster ( returnCluster , nodeIdB , Lists . newArrayList ( partitionIdA ) ) ; return returnCluster ; } | Swaps two specified partitions . |
24,370 | public static Cluster swapRandomPartitionsWithinZone ( final Cluster nextCandidateCluster , final int zoneId ) { Cluster returnCluster = Cluster . cloneCluster ( nextCandidateCluster ) ; Random r = new Random ( ) ; List < Integer > nodeIdsInZone = new ArrayList < Integer > ( nextCandidateCluster . getNodeIdsInZone ( zoneId ) ) ; if ( nodeIdsInZone . size ( ) == 0 ) { return returnCluster ; } int stealerNodeOffset = r . nextInt ( nodeIdsInZone . size ( ) ) ; Integer stealerNodeId = nodeIdsInZone . get ( stealerNodeOffset ) ; List < Integer > stealerPartitions = returnCluster . getNodeById ( stealerNodeId ) . getPartitionIds ( ) ; if ( stealerPartitions . size ( ) == 0 ) { return nextCandidateCluster ; } int stealerPartitionOffset = r . nextInt ( stealerPartitions . size ( ) ) ; int stealerPartitionId = stealerPartitions . get ( stealerPartitionOffset ) ; List < Integer > donorNodeIds = new ArrayList < Integer > ( ) ; donorNodeIds . addAll ( nodeIdsInZone ) ; donorNodeIds . remove ( stealerNodeId ) ; if ( donorNodeIds . isEmpty ( ) ) { return returnCluster ; } int donorIdOffset = r . nextInt ( donorNodeIds . size ( ) ) ; Integer donorNodeId = donorNodeIds . get ( donorIdOffset ) ; List < Integer > donorPartitions = returnCluster . getNodeById ( donorNodeId ) . getPartitionIds ( ) ; int donorPartitionOffset = r . nextInt ( donorPartitions . size ( ) ) ; int donorPartitionId = donorPartitions . get ( donorPartitionOffset ) ; return swapPartitions ( returnCluster , stealerNodeId , stealerPartitionId , donorNodeId , donorPartitionId ) ; } | Within a single zone swaps one random partition on one random node with another random partition on different random node . |
24,371 | public static Cluster randomShufflePartitions ( final Cluster nextCandidateCluster , final int randomSwapAttempts , final int randomSwapSuccesses , final List < Integer > randomSwapZoneIds , List < StoreDefinition > storeDefs ) { List < Integer > zoneIds = null ; if ( randomSwapZoneIds . isEmpty ( ) ) { zoneIds = new ArrayList < Integer > ( nextCandidateCluster . getZoneIds ( ) ) ; } else { zoneIds = new ArrayList < Integer > ( randomSwapZoneIds ) ; } List < Integer > nodeIds = new ArrayList < Integer > ( ) ; Cluster returnCluster = Cluster . cloneCluster ( nextCandidateCluster ) ; double currentUtility = new PartitionBalance ( returnCluster , storeDefs ) . getUtility ( ) ; int successes = 0 ; for ( int i = 0 ; i < randomSwapAttempts ; i ++ ) { int zoneIdOffset = i % zoneIds . size ( ) ; Set < Integer > nodeIdSet = nextCandidateCluster . getNodeIdsInZone ( zoneIds . get ( zoneIdOffset ) ) ; nodeIds = new ArrayList < Integer > ( nodeIdSet ) ; Collections . shuffle ( zoneIds , new Random ( System . currentTimeMillis ( ) ) ) ; Cluster shuffleResults = swapRandomPartitionsAmongNodes ( returnCluster , nodeIds ) ; double nextUtility = new PartitionBalance ( shuffleResults , storeDefs ) . getUtility ( ) ; if ( nextUtility < currentUtility ) { System . out . println ( "Swap improved max-min ratio: " + currentUtility + " -> " + nextUtility + " (improvement " + successes + " on swap attempt " + i + ")" ) ; successes ++ ; returnCluster = shuffleResults ; currentUtility = nextUtility ; } if ( successes >= randomSwapSuccesses ) { break ; } } return returnCluster ; } | Randomly shuffle partitions between nodes within every zone . |
24,372 | public static Cluster swapGreedyRandomPartitions ( final Cluster nextCandidateCluster , final List < Integer > nodeIds , final int greedySwapMaxPartitionsPerNode , final int greedySwapMaxPartitionsPerZone , List < StoreDefinition > storeDefs ) { System . out . println ( "GreedyRandom : nodeIds:" + nodeIds ) ; Cluster returnCluster = Cluster . cloneCluster ( nextCandidateCluster ) ; double currentUtility = new PartitionBalance ( returnCluster , storeDefs ) . getUtility ( ) ; int nodeIdA = - 1 ; int nodeIdB = - 1 ; int partitionIdA = - 1 ; int partitionIdB = - 1 ; for ( int nodeIdAPrime : nodeIds ) { System . out . println ( "GreedyRandom : processing nodeId:" + nodeIdAPrime ) ; List < Integer > partitionIdsAPrime = new ArrayList < Integer > ( ) ; partitionIdsAPrime . addAll ( returnCluster . getNodeById ( nodeIdAPrime ) . getPartitionIds ( ) ) ; Collections . shuffle ( partitionIdsAPrime ) ; int maxPartitionsInAPrime = Math . min ( greedySwapMaxPartitionsPerNode , partitionIdsAPrime . size ( ) ) ; for ( int offsetAPrime = 0 ; offsetAPrime < maxPartitionsInAPrime ; offsetAPrime ++ ) { Integer partitionIdAPrime = partitionIdsAPrime . get ( offsetAPrime ) ; List < Pair < Integer , Integer > > partitionIdsZone = new ArrayList < Pair < Integer , Integer > > ( ) ; for ( int nodeIdBPrime : nodeIds ) { if ( nodeIdBPrime == nodeIdAPrime ) continue ; for ( Integer partitionIdBPrime : returnCluster . getNodeById ( nodeIdBPrime ) . getPartitionIds ( ) ) { partitionIdsZone . add ( new Pair < Integer , Integer > ( nodeIdBPrime , partitionIdBPrime ) ) ; } } Collections . shuffle ( partitionIdsZone ) ; int maxPartitionsInZone = Math . min ( greedySwapMaxPartitionsPerZone , partitionIdsZone . size ( ) ) ; for ( int offsetZone = 0 ; offsetZone < maxPartitionsInZone ; offsetZone ++ ) { Integer nodeIdBPrime = partitionIdsZone . get ( offsetZone ) . getFirst ( ) ; Integer partitionIdBPrime = partitionIdsZone . get ( offsetZone ) . getSecond ( ) ; Cluster swapResult = swapPartitions ( returnCluster , nodeIdAPrime , partitionIdAPrime , nodeIdBPrime , partitionIdBPrime ) ; double swapUtility = new PartitionBalance ( swapResult , storeDefs ) . getUtility ( ) ; if ( swapUtility < currentUtility ) { currentUtility = swapUtility ; System . out . println ( " -> " + currentUtility ) ; nodeIdA = nodeIdAPrime ; partitionIdA = partitionIdAPrime ; nodeIdB = nodeIdBPrime ; partitionIdB = partitionIdBPrime ; } } } } if ( nodeIdA == - 1 ) { return returnCluster ; } return swapPartitions ( returnCluster , nodeIdA , partitionIdA , nodeIdB , partitionIdB ) ; } | For each node in specified zones tries swapping some minimum number of random partitions per node with some minimum number of random partitions from other specified nodes . Chooses the best swap in each iteration . Large values of the greedSwapMaxPartitions ... arguments make this method equivalent to comparing every possible swap . This may get very expensive . |
24,373 | public static Cluster greedyShufflePartitions ( final Cluster nextCandidateCluster , final int greedyAttempts , final int greedySwapMaxPartitionsPerNode , final int greedySwapMaxPartitionsPerZone , List < Integer > greedySwapZoneIds , List < StoreDefinition > storeDefs ) { List < Integer > zoneIds = null ; if ( greedySwapZoneIds . isEmpty ( ) ) { zoneIds = new ArrayList < Integer > ( nextCandidateCluster . getZoneIds ( ) ) ; } else { zoneIds = new ArrayList < Integer > ( greedySwapZoneIds ) ; } List < Integer > nodeIds = new ArrayList < Integer > ( ) ; Cluster returnCluster = Cluster . cloneCluster ( nextCandidateCluster ) ; double currentUtility = new PartitionBalance ( returnCluster , storeDefs ) . getUtility ( ) ; for ( int i = 0 ; i < greedyAttempts ; i ++ ) { int zoneIdOffset = i % zoneIds . size ( ) ; Set < Integer > nodeIdSet = nextCandidateCluster . getNodeIdsInZone ( zoneIds . get ( zoneIdOffset ) ) ; nodeIds = new ArrayList < Integer > ( nodeIdSet ) ; Collections . shuffle ( zoneIds , new Random ( System . currentTimeMillis ( ) ) ) ; Cluster shuffleResults = swapGreedyRandomPartitions ( returnCluster , nodeIds , greedySwapMaxPartitionsPerNode , greedySwapMaxPartitionsPerZone , storeDefs ) ; double nextUtility = new PartitionBalance ( shuffleResults , storeDefs ) . getUtility ( ) ; System . out . println ( "Swap improved max-min ratio: " + currentUtility + " -> " + nextUtility + " (swap attempt " + i + " in zone " + zoneIds . get ( zoneIdOffset ) + ")" ) ; returnCluster = shuffleResults ; currentUtility = nextUtility ; } return returnCluster ; } | Within a single zone tries swapping some minimum number of random partitions per node with some minimum number of random partitions from other nodes within the zone . Chooses the best swap in each iteration . Large values of the greedSwapMaxPartitions ... arguments make this method equivalent to comparing every possible swap . This is very expensive . |
24,374 | protected void stopInner ( ) { if ( this . nettyServerChannel != null ) { this . nettyServerChannel . close ( ) ; } if ( allChannels != null ) { allChannels . close ( ) . awaitUninterruptibly ( ) ; } this . bootstrap . releaseExternalResources ( ) ; } | Closes the Netty Channel and releases all resources |
24,375 | protected int parseZoneId ( ) { int result = - 1 ; String zoneIdStr = this . request . getHeader ( RestMessageHeaders . X_VOLD_ZONE_ID ) ; if ( zoneIdStr != null ) { try { int zoneId = Integer . parseInt ( zoneIdStr ) ; if ( zoneId < 0 ) { logger . error ( "ZoneId cannot be negative. Assuming the default zone id." ) ; } else { result = zoneId ; } } catch ( NumberFormatException nfe ) { logger . error ( "Exception when validating request. Incorrect zone id parameter. Cannot parse this to int: " + zoneIdStr , nfe ) ; } } return result ; } | Retrieve and validate the zone id value from the REST request . X - VOLD - Zone - Id is the zone id header . |
24,376 | protected void registerRequest ( RestRequestValidator requestValidator , ChannelHandlerContext ctx , MessageEvent messageEvent ) { CompositeVoldemortRequest < ByteArray , byte [ ] > requestObject = requestValidator . constructCompositeVoldemortRequestObject ( ) ; if ( requestObject != null ) { long now = System . currentTimeMillis ( ) ; if ( requestObject . getRequestOriginTimeInMs ( ) + requestObject . getRoutingTimeoutInMs ( ) <= now ) { RestErrorHandler . writeErrorResponse ( messageEvent , HttpResponseStatus . REQUEST_TIMEOUT , "current time: " + now + "\torigin time: " + requestObject . getRequestOriginTimeInMs ( ) + "\ttimeout in ms: " + requestObject . getRoutingTimeoutInMs ( ) ) ; return ; } else { Store store = getStore ( requestValidator . getStoreName ( ) , requestValidator . getParsedRoutingType ( ) ) ; if ( store != null ) { VoldemortStoreRequest voldemortStoreRequest = new VoldemortStoreRequest ( requestObject , store , parseZoneId ( ) ) ; Channels . fireMessageReceived ( ctx , voldemortStoreRequest ) ; } else { logger . error ( "Error when getting store. Non Existing store name." ) ; RestErrorHandler . writeErrorResponse ( messageEvent , HttpResponseStatus . BAD_REQUEST , "Non Existing store name. Critical error." ) ; return ; } } } } | Constructs a valid request and passes it on to the next handler . It also creates the Store object corresponding to the store name specified in the REST request . |
24,377 | private Pair < Cluster , List < StoreDefinition > > getCurrentClusterState ( ) { Versioned < Cluster > currentVersionedCluster = adminClient . rebalanceOps . getLatestCluster ( Utils . nodeListToNodeIdList ( Lists . newArrayList ( adminClient . getAdminClientCluster ( ) . getNodes ( ) ) ) ) ; Cluster cluster = currentVersionedCluster . getValue ( ) ; List < StoreDefinition > storeDefs = adminClient . rebalanceOps . getCurrentStoreDefinitions ( cluster ) ; return new Pair < Cluster , List < StoreDefinition > > ( cluster , storeDefs ) ; } | Probe the existing cluster to retrieve the current cluster xml and stores xml . |
24,378 | private void executePlan ( RebalancePlan rebalancePlan ) { logger . info ( "Starting to execute rebalance Plan!" ) ; int batchCount = 0 ; int partitionStoreCount = 0 ; long totalTimeMs = 0 ; List < RebalanceBatchPlan > entirePlan = rebalancePlan . getPlan ( ) ; int numBatches = entirePlan . size ( ) ; int numPartitionStores = rebalancePlan . getPartitionStoresMoved ( ) ; for ( RebalanceBatchPlan batchPlan : entirePlan ) { logger . info ( "======== REBALANCING BATCH " + ( batchCount + 1 ) + " ========" ) ; RebalanceUtils . printBatchLog ( batchCount , logger , batchPlan . toString ( ) ) ; long startTimeMs = System . currentTimeMillis ( ) ; executeBatch ( batchCount , batchPlan ) ; totalTimeMs += ( System . currentTimeMillis ( ) - startTimeMs ) ; batchCount ++ ; partitionStoreCount += batchPlan . getPartitionStoreMoves ( ) ; batchStatusLog ( batchCount , numBatches , partitionStoreCount , numPartitionStores , totalTimeMs ) ; } } | Executes the rebalance plan . Does so batch - by - batch . Between each batch status is dumped to logger . info . |
24,379 | private void batchStatusLog ( int batchCount , int numBatches , int partitionStoreCount , int numPartitionStores , long totalTimeMs ) { double rate = 1 ; long estimatedTimeMs = 0 ; if ( numPartitionStores > 0 ) { rate = partitionStoreCount / numPartitionStores ; estimatedTimeMs = ( long ) ( totalTimeMs / rate ) - totalTimeMs ; } StringBuilder sb = new StringBuilder ( ) ; sb . append ( "Batch Complete!" ) . append ( Utils . NEWLINE ) . append ( "\tbatches moved: " ) . append ( batchCount ) . append ( " out of " ) . append ( numBatches ) . append ( Utils . NEWLINE ) . append ( "\tPartition stores moved: " ) . append ( partitionStoreCount ) . append ( " out of " ) . append ( numPartitionStores ) . append ( Utils . NEWLINE ) . append ( "\tPercent done: " ) . append ( decimalFormatter . format ( rate * 100.0 ) ) . append ( Utils . NEWLINE ) . append ( "\tEstimated time left: " ) . append ( estimatedTimeMs ) . append ( " ms (" ) . append ( TimeUnit . MILLISECONDS . toHours ( estimatedTimeMs ) ) . append ( " hours)" ) ; RebalanceUtils . printBatchLog ( batchCount , logger , sb . toString ( ) ) ; } | Pretty print a progress update after each batch complete . |
24,380 | private void executeBatch ( int batchId , final RebalanceBatchPlan batchPlan ) { final Cluster batchCurrentCluster = batchPlan . getCurrentCluster ( ) ; final List < StoreDefinition > batchCurrentStoreDefs = batchPlan . getCurrentStoreDefs ( ) ; final Cluster batchFinalCluster = batchPlan . getFinalCluster ( ) ; final List < StoreDefinition > batchFinalStoreDefs = batchPlan . getFinalStoreDefs ( ) ; try { final List < RebalanceTaskInfo > rebalanceTaskInfoList = batchPlan . getBatchPlan ( ) ; if ( rebalanceTaskInfoList . isEmpty ( ) ) { RebalanceUtils . printBatchLog ( batchId , logger , "Skipping batch " + batchId + " since it is empty." ) ; adminClient . rebalanceOps . rebalanceStateChange ( batchCurrentCluster , batchFinalCluster , batchCurrentStoreDefs , batchFinalStoreDefs , rebalanceTaskInfoList , false , true , false , false , true ) ; return ; } RebalanceUtils . printBatchLog ( batchId , logger , "Starting batch " + batchId + "." ) ; List < StoreDefinition > readOnlyStoreDefs = StoreDefinitionUtils . filterStores ( batchFinalStoreDefs , true ) ; List < StoreDefinition > readWriteStoreDefs = StoreDefinitionUtils . filterStores ( batchFinalStoreDefs , false ) ; boolean hasReadOnlyStores = readOnlyStoreDefs != null && readOnlyStoreDefs . size ( ) > 0 ; boolean hasReadWriteStores = readWriteStoreDefs != null && readWriteStoreDefs . size ( ) > 0 ; boolean finishedReadOnlyPhase = false ; List < RebalanceTaskInfo > filteredRebalancePartitionPlanList = RebalanceUtils . filterTaskPlanWithStores ( rebalanceTaskInfoList , readOnlyStoreDefs ) ; rebalanceStateChange ( batchId , batchCurrentCluster , batchCurrentStoreDefs , batchFinalCluster , batchFinalStoreDefs , filteredRebalancePartitionPlanList , hasReadOnlyStores , hasReadWriteStores , finishedReadOnlyPhase ) ; if ( hasReadOnlyStores ) { RebalanceBatchPlanProgressBar progressBar = batchPlan . getProgressBar ( batchId ) ; executeSubBatch ( batchId , progressBar , batchCurrentCluster , batchCurrentStoreDefs , filteredRebalancePartitionPlanList , hasReadOnlyStores , hasReadWriteStores , finishedReadOnlyPhase ) ; } finishedReadOnlyPhase = true ; filteredRebalancePartitionPlanList = RebalanceUtils . filterTaskPlanWithStores ( rebalanceTaskInfoList , readWriteStoreDefs ) ; rebalanceStateChange ( batchId , batchCurrentCluster , batchCurrentStoreDefs , batchFinalCluster , batchFinalStoreDefs , filteredRebalancePartitionPlanList , hasReadOnlyStores , hasReadWriteStores , finishedReadOnlyPhase ) ; if ( hasReadWriteStores ) { proxyPause ( ) ; RebalanceBatchPlanProgressBar progressBar = batchPlan . getProgressBar ( batchId ) ; executeSubBatch ( batchId , progressBar , batchCurrentCluster , batchCurrentStoreDefs , filteredRebalancePartitionPlanList , hasReadOnlyStores , hasReadWriteStores , finishedReadOnlyPhase ) ; } RebalanceUtils . printBatchLog ( batchId , logger , "Successfully terminated batch " + batchId + "." ) ; } catch ( Exception e ) { RebalanceUtils . printErrorLog ( batchId , logger , "Error in batch " + batchId + " - " + e . getMessage ( ) , e ) ; throw new VoldemortException ( "Rebalance failed on batch " + batchId , e ) ; } } | Executes a batch plan . |
24,381 | private void proxyPause ( ) { logger . info ( "Pausing after cluster state has changed to allow proxy bridges to be established. " + "Will start rebalancing work on servers in " + proxyPauseSec + " seconds." ) ; try { Thread . sleep ( TimeUnit . SECONDS . toMillis ( proxyPauseSec ) ) ; } catch ( InterruptedException e ) { logger . warn ( "Sleep interrupted in proxy pause." ) ; } } | Pause between cluster change in metadata and starting server rebalancing work . |
24,382 | private void executeSubBatch ( final int batchId , RebalanceBatchPlanProgressBar progressBar , final Cluster batchRollbackCluster , final List < StoreDefinition > batchRollbackStoreDefs , final List < RebalanceTaskInfo > rebalanceTaskPlanList , boolean hasReadOnlyStores , boolean hasReadWriteStores , boolean finishedReadOnlyStores ) { RebalanceUtils . printBatchLog ( batchId , logger , "Submitting rebalance tasks " ) ; ExecutorService service = RebalanceUtils . createExecutors ( maxParallelRebalancing ) ; final List < RebalanceTask > failedTasks = Lists . newArrayList ( ) ; final List < RebalanceTask > incompleteTasks = Lists . newArrayList ( ) ; Map < Integer , Semaphore > donorPermits = new HashMap < Integer , Semaphore > ( ) ; for ( Node node : batchRollbackCluster . getNodes ( ) ) { donorPermits . put ( node . getId ( ) , new Semaphore ( 1 ) ) ; } try { List < RebalanceTask > allTasks = executeTasks ( batchId , progressBar , service , rebalanceTaskPlanList , donorPermits ) ; RebalanceUtils . printBatchLog ( batchId , logger , "All rebalance tasks submitted" ) ; RebalanceUtils . executorShutDown ( service , Long . MAX_VALUE ) ; RebalanceUtils . printBatchLog ( batchId , logger , "Finished waiting for executors" ) ; List < Exception > failures = Lists . newArrayList ( ) ; for ( RebalanceTask task : allTasks ) { if ( task . hasException ( ) ) { failedTasks . add ( task ) ; failures . add ( task . getError ( ) ) ; } else if ( ! task . isComplete ( ) ) { incompleteTasks . add ( task ) ; } } if ( failedTasks . size ( ) > 0 ) { throw new VoldemortRebalancingException ( "Rebalance task terminated unsuccessfully on tasks " + failedTasks , failures ) ; } if ( incompleteTasks . size ( ) > 0 ) { throw new VoldemortException ( "Rebalance tasks are still incomplete / running " + incompleteTasks ) ; } } catch ( VoldemortRebalancingException e ) { logger . error ( "Failure while migrating partitions for rebalance task " + batchId ) ; if ( hasReadOnlyStores && hasReadWriteStores && finishedReadOnlyStores ) { adminClient . rebalanceOps . rebalanceStateChange ( null , batchRollbackCluster , null , batchRollbackStoreDefs , null , true , true , false , false , false ) ; } else if ( hasReadWriteStores && finishedReadOnlyStores ) { adminClient . rebalanceOps . rebalanceStateChange ( null , batchRollbackCluster , null , batchRollbackStoreDefs , null , false , true , false , false , false ) ; } throw e ; } finally { if ( ! service . isShutdown ( ) ) { RebalanceUtils . printErrorLog ( batchId , logger , "Could not shutdown service cleanly for rebalance task " + batchId , null ) ; service . shutdownNow ( ) ; } } } | The smallest granularity of rebalancing where - in we move partitions for a sub - set of stores . Finally at the end of the movement the node is removed out of rebalance state |
24,383 | public static ConsistencyLevel determineConsistency ( Map < Value , Set < ClusterNode > > versionNodeSetMap , int replicationFactor ) { boolean fullyConsistent = true ; Value latestVersion = null ; for ( Map . Entry < Value , Set < ClusterNode > > versionNodeSetEntry : versionNodeSetMap . entrySet ( ) ) { Value value = versionNodeSetEntry . getKey ( ) ; if ( latestVersion == null ) { latestVersion = value ; } else if ( value . isTimeStampLaterThan ( latestVersion ) ) { latestVersion = value ; } Set < ClusterNode > nodeSet = versionNodeSetEntry . getValue ( ) ; fullyConsistent = fullyConsistent && ( nodeSet . size ( ) == replicationFactor ) ; } if ( fullyConsistent ) { return ConsistencyLevel . FULL ; } else { if ( latestVersion != null && versionNodeSetMap . get ( latestVersion ) . size ( ) == replicationFactor ) { return ConsistencyLevel . LATEST_CONSISTENT ; } return ConsistencyLevel . INCONSISTENT ; } } | Determine the consistency level of a key |
24,384 | public static void cleanIneligibleKeys ( Map < ByteArray , Map < Value , Set < ClusterNode > > > keyVersionNodeSetMap , int requiredWrite ) { Set < ByteArray > keysToDelete = new HashSet < ByteArray > ( ) ; for ( Map . Entry < ByteArray , Map < Value , Set < ClusterNode > > > entry : keyVersionNodeSetMap . entrySet ( ) ) { Set < Value > valuesToDelete = new HashSet < Value > ( ) ; ByteArray key = entry . getKey ( ) ; Map < Value , Set < ClusterNode > > valueNodeSetMap = entry . getValue ( ) ; for ( Map . Entry < Value , Set < ClusterNode > > versionNodeSetEntry : valueNodeSetMap . entrySet ( ) ) { Set < ClusterNode > nodeSet = versionNodeSetEntry . getValue ( ) ; if ( nodeSet . size ( ) < requiredWrite ) { valuesToDelete . add ( versionNodeSetEntry . getKey ( ) ) ; } } for ( Value v : valuesToDelete ) { valueNodeSetMap . remove ( v ) ; } if ( valueNodeSetMap . size ( ) == 0 ) { keysToDelete . add ( key ) ; } } for ( ByteArray k : keysToDelete ) { keyVersionNodeSetMap . remove ( k ) ; } } | Determine if a key version is invalid by comparing the version s existence and required writes configuration |
24,385 | public static String keyVersionToString ( ByteArray key , Map < Value , Set < ClusterNode > > versionMap , String storeName , Integer partitionId ) { StringBuilder record = new StringBuilder ( ) ; for ( Map . Entry < Value , Set < ClusterNode > > versionSet : versionMap . entrySet ( ) ) { Value value = versionSet . getKey ( ) ; Set < ClusterNode > nodeSet = versionSet . getValue ( ) ; record . append ( "BAD_KEY," ) ; record . append ( storeName + "," ) ; record . append ( partitionId + "," ) ; record . append ( ByteUtils . toHexString ( key . get ( ) ) + "," ) ; record . append ( nodeSet . toString ( ) . replace ( ", " , ";" ) + "," ) ; record . append ( value . toString ( ) ) ; } return record . toString ( ) ; } | Convert a key - version - nodeSet information to string |
24,386 | public void sendResponse ( StoreStats performanceStats , boolean isFromLocalZone , long startTimeInMs ) throws Exception { ChannelBuffer responseContent = ChannelBuffers . dynamicBuffer ( this . responseValue . length ) ; responseContent . writeBytes ( responseValue ) ; HttpResponse response = new DefaultHttpResponse ( HTTP_1_1 , OK ) ; response . setHeader ( CONTENT_TYPE , "binary" ) ; response . setHeader ( CONTENT_TRANSFER_ENCODING , "binary" ) ; response . setContent ( responseContent ) ; response . setHeader ( CONTENT_LENGTH , response . getContent ( ) . readableBytes ( ) ) ; if ( logger . isDebugEnabled ( ) ) { logger . debug ( "Response = " + response ) ; } this . messageEvent . getChannel ( ) . write ( response ) ; if ( performanceStats != null && isFromLocalZone ) { recordStats ( performanceStats , startTimeInMs , Tracked . GET ) ; } } | Sends a normal HTTP response containing the serialization information in a XML format |
24,387 | public FailureDetectorConfig setCluster ( Cluster cluster ) { Utils . notNull ( cluster ) ; this . cluster = cluster ; if ( this . connectionVerifier instanceof AdminConnectionVerifier ) { ( ( AdminConnectionVerifier ) connectionVerifier ) . setCluster ( cluster ) ; } return this ; } | Look at the comments on cluster variable to see why this is problematic |
24,388 | public synchronized FailureDetectorConfig setNodes ( Collection < Node > nodes ) { Utils . notNull ( nodes ) ; this . nodes = new HashSet < Node > ( nodes ) ; return this ; } | Assigns a list of nodes in the cluster represented by this failure detector configuration . |
24,389 | public boolean hasNodeWithId ( int nodeId ) { Node node = nodesById . get ( nodeId ) ; if ( node == null ) { return false ; } return true ; } | Given a cluster and a node id checks if the node exists |
24,390 | public static Cluster cloneCluster ( Cluster cluster ) { return new Cluster ( cluster . getName ( ) , new ArrayList < Node > ( cluster . getNodes ( ) ) , new ArrayList < Zone > ( cluster . getZones ( ) ) ) ; } | Clones the cluster by constructing a new one with same name partition layout and nodes . |
24,391 | public AdminClient checkout ( ) { if ( isClosed . get ( ) ) { throw new IllegalStateException ( "Pool is closing" ) ; } AdminClient client ; while ( ( client = clientCache . poll ( ) ) != null ) { if ( ! client . isClusterModified ( ) ) { return client ; } else { client . close ( ) ; } } return createAdminClient ( ) ; } | get an AdminClient from the cache if exists if not create new one and return it . This method is non - blocking . |
24,392 | public void checkin ( AdminClient client ) { if ( isClosed . get ( ) ) { throw new IllegalStateException ( "Pool is closing" ) ; } if ( client == null ) { throw new IllegalArgumentException ( "client is null" ) ; } boolean isCheckedIn = clientCache . offer ( client ) ; if ( ! isCheckedIn ) { client . close ( ) ; } } | submit the adminClient after usage is completed . Behavior is undefined if checkin is called with objects not retrieved from checkout . |
24,393 | public void close ( ) { boolean isPreviouslyClosed = isClosed . getAndSet ( true ) ; if ( isPreviouslyClosed ) { return ; } AdminClient client ; while ( ( client = clientCache . poll ( ) ) != null ) { client . close ( ) ; } } | close the AdminPool if no long required . After closed all public methods will throw IllegalStateException |
24,394 | public static String compressedListOfPartitionsInZone ( final Cluster cluster , int zoneId ) { Map < Integer , Integer > idToRunLength = PartitionBalanceUtils . getMapOfContiguousPartitions ( cluster , zoneId ) ; StringBuilder sb = new StringBuilder ( ) ; sb . append ( "[" ) ; boolean first = true ; Set < Integer > sortedInitPartitionIds = new TreeSet < Integer > ( idToRunLength . keySet ( ) ) ; for ( int initPartitionId : sortedInitPartitionIds ) { if ( ! first ) { sb . append ( ", " ) ; } else { first = false ; } int runLength = idToRunLength . get ( initPartitionId ) ; if ( runLength == 1 ) { sb . append ( initPartitionId ) ; } else { int endPartitionId = ( initPartitionId + runLength - 1 ) % cluster . getNumberOfPartitions ( ) ; sb . append ( initPartitionId ) . append ( "-" ) . append ( endPartitionId ) ; } } sb . append ( "]" ) ; return sb . toString ( ) ; } | Compress contiguous partitions into format e - i instead of e f g h i . This helps illustrate contiguous partitions within a zone . |
24,395 | public static Map < Integer , Integer > getMapOfContiguousPartitions ( final Cluster cluster , int zoneId ) { List < Integer > partitionIds = new ArrayList < Integer > ( cluster . getPartitionIdsInZone ( zoneId ) ) ; Map < Integer , Integer > partitionIdToRunLength = Maps . newHashMap ( ) ; if ( partitionIds . isEmpty ( ) ) { return partitionIdToRunLength ; } int lastPartitionId = partitionIds . get ( 0 ) ; int initPartitionId = lastPartitionId ; for ( int offset = 1 ; offset < partitionIds . size ( ) ; offset ++ ) { int partitionId = partitionIds . get ( offset ) ; if ( partitionId == lastPartitionId + 1 ) { lastPartitionId = partitionId ; continue ; } int runLength = lastPartitionId - initPartitionId + 1 ; partitionIdToRunLength . put ( initPartitionId , runLength ) ; initPartitionId = partitionId ; lastPartitionId = initPartitionId ; } int runLength = lastPartitionId - initPartitionId + 1 ; if ( lastPartitionId == cluster . getNumberOfPartitions ( ) - 1 && partitionIdToRunLength . containsKey ( 0 ) ) { partitionIdToRunLength . put ( initPartitionId , runLength + partitionIdToRunLength . get ( 0 ) ) ; partitionIdToRunLength . remove ( 0 ) ; } else { partitionIdToRunLength . put ( initPartitionId , runLength ) ; } return partitionIdToRunLength ; } | Determines run length for each initial partition ID . Note that a contiguous run may wrap around the end of the ring . |
24,396 | public static Map < Integer , Integer > getMapOfContiguousPartitionRunLengths ( final Cluster cluster , int zoneId ) { Map < Integer , Integer > idToRunLength = getMapOfContiguousPartitions ( cluster , zoneId ) ; Map < Integer , Integer > runLengthToCount = Maps . newHashMap ( ) ; if ( idToRunLength . isEmpty ( ) ) { return runLengthToCount ; } for ( int runLength : idToRunLength . values ( ) ) { if ( ! runLengthToCount . containsKey ( runLength ) ) { runLengthToCount . put ( runLength , 0 ) ; } runLengthToCount . put ( runLength , runLengthToCount . get ( runLength ) + 1 ) ; } return runLengthToCount ; } | Determines a histogram of contiguous runs of partitions within a zone . I . e . for each run length of contiguous partitions how many such runs are there . |
24,397 | public static String getPrettyMapOfContiguousPartitionRunLengths ( final Cluster cluster , int zoneId ) { Map < Integer , Integer > runLengthToCount = getMapOfContiguousPartitionRunLengths ( cluster , zoneId ) ; String prettyHistogram = "[" ; boolean first = true ; Set < Integer > runLengths = new TreeSet < Integer > ( runLengthToCount . keySet ( ) ) ; for ( int runLength : runLengths ) { if ( first ) { first = false ; } else { prettyHistogram += ", " ; } prettyHistogram += "{" + runLength + " : " + runLengthToCount . get ( runLength ) + "}" ; } prettyHistogram += "]" ; return prettyHistogram ; } | Pretty prints the output of getMapOfContiguousPartitionRunLengths |
24,398 | public static String getHotPartitionsDueToContiguity ( final Cluster cluster , int hotContiguityCutoff ) { StringBuilder sb = new StringBuilder ( ) ; for ( int zoneId : cluster . getZoneIds ( ) ) { Map < Integer , Integer > idToRunLength = getMapOfContiguousPartitions ( cluster , zoneId ) ; for ( Integer initialPartitionId : idToRunLength . keySet ( ) ) { int runLength = idToRunLength . get ( initialPartitionId ) ; if ( runLength < hotContiguityCutoff ) continue ; int hotPartitionId = ( initialPartitionId + runLength ) % cluster . getNumberOfPartitions ( ) ; Node hotNode = cluster . getNodeForPartitionId ( hotPartitionId ) ; sb . append ( "\tNode " + hotNode . getId ( ) + " (" + hotNode . getHost ( ) + ") has hot primary partition " + hotPartitionId + " that follows contiguous run of length " + runLength + Utils . NEWLINE ) ; } } return sb . toString ( ) ; } | Returns a pretty printed string of nodes that host specific hot partitions where hot is defined as following a contiguous run of partitions of some length in another zone . |
24,399 | public static String analyzeInvalidMetadataRate ( final Cluster currentCluster , List < StoreDefinition > currentStoreDefs , final Cluster finalCluster , List < StoreDefinition > finalStoreDefs ) { StringBuilder sb = new StringBuilder ( ) ; sb . append ( "Dump of invalid metadata rates per zone" ) . append ( Utils . NEWLINE ) ; HashMap < StoreDefinition , Integer > uniqueStores = StoreDefinitionUtils . getUniqueStoreDefinitionsWithCounts ( currentStoreDefs ) ; for ( StoreDefinition currentStoreDef : uniqueStores . keySet ( ) ) { sb . append ( "Store exemplar: " + currentStoreDef . getName ( ) ) . append ( Utils . NEWLINE ) . append ( "\tThere are " + uniqueStores . get ( currentStoreDef ) + " other similar stores." ) . append ( Utils . NEWLINE ) ; StoreRoutingPlan currentSRP = new StoreRoutingPlan ( currentCluster , currentStoreDef ) ; StoreDefinition finalStoreDef = StoreUtils . getStoreDef ( finalStoreDefs , currentStoreDef . getName ( ) ) ; StoreRoutingPlan finalSRP = new StoreRoutingPlan ( finalCluster , finalStoreDef ) ; for ( int zoneId : currentCluster . getZoneIds ( ) ) { int zonePrimariesCount = 0 ; int invalidMetadata = 0 ; for ( int nodeId : currentCluster . getNodeIdsInZone ( zoneId ) ) { for ( int zonePrimaryPartitionId : currentSRP . getZonePrimaryPartitionIds ( nodeId ) ) { zonePrimariesCount ++ ; if ( ! finalSRP . getZoneNAryPartitionIds ( nodeId ) . contains ( zonePrimaryPartitionId ) ) { invalidMetadata ++ ; } } } float rate = invalidMetadata / ( float ) zonePrimariesCount ; sb . append ( "\tZone " + zoneId ) . append ( " : total zone primaries " + zonePrimariesCount ) . append ( ", # that trigger invalid metadata " + invalidMetadata ) . append ( " => " + rate ) . append ( Utils . NEWLINE ) ; } } return sb . toString ( ) ; } | Compares current cluster with final cluster . Uses pertinent store defs for each cluster to determine if a node that hosts a zone - primary in the current cluster will no longer host any zone - nary in the final cluster . This check is the precondition for a server returning an invalid metadata exception to a client on a normal - case put or get . Normal - case being that the zone - primary receives the pseudo - master put or the get operation . |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.