idx
int64
0
165k
question
stringlengths
73
4.15k
target
stringlengths
5
918
len_question
int64
21
890
len_target
int64
3
255
147,800
public void close ( ) { boolean isPreviouslyClosed = isClosed . getAndSet ( true ) ; if ( isPreviouslyClosed ) { return ; } AdminClient client ; while ( ( client = clientCache . poll ( ) ) != null ) { client . close ( ) ; } }
close the AdminPool if no long required . After closed all public methods will throw IllegalStateException
62
19
147,801
public static String compressedListOfPartitionsInZone ( final Cluster cluster , int zoneId ) { Map < Integer , Integer > idToRunLength = PartitionBalanceUtils . getMapOfContiguousPartitions ( cluster , zoneId ) ; StringBuilder sb = new StringBuilder ( ) ; sb . append ( "[" ) ; boolean first = true ; Set < Integer > sortedInitPartitionIds = new TreeSet < Integer > ( idToRunLength . keySet ( ) ) ; for ( int initPartitionId : sortedInitPartitionIds ) { if ( ! first ) { sb . append ( ", " ) ; } else { first = false ; } int runLength = idToRunLength . get ( initPartitionId ) ; if ( runLength == 1 ) { sb . append ( initPartitionId ) ; } else { int endPartitionId = ( initPartitionId + runLength - 1 ) % cluster . getNumberOfPartitions ( ) ; sb . append ( initPartitionId ) . append ( "-" ) . append ( endPartitionId ) ; } } sb . append ( "]" ) ; return sb . toString ( ) ; }
Compress contiguous partitions into format e - i instead of e f g h i . This helps illustrate contiguous partitions within a zone .
259
26
147,802
public static Map < Integer , Integer > getMapOfContiguousPartitions ( final Cluster cluster , int zoneId ) { List < Integer > partitionIds = new ArrayList < Integer > ( cluster . getPartitionIdsInZone ( zoneId ) ) ; Map < Integer , Integer > partitionIdToRunLength = Maps . newHashMap ( ) ; if ( partitionIds . isEmpty ( ) ) { return partitionIdToRunLength ; } int lastPartitionId = partitionIds . get ( 0 ) ; int initPartitionId = lastPartitionId ; for ( int offset = 1 ; offset < partitionIds . size ( ) ; offset ++ ) { int partitionId = partitionIds . get ( offset ) ; if ( partitionId == lastPartitionId + 1 ) { lastPartitionId = partitionId ; continue ; } int runLength = lastPartitionId - initPartitionId + 1 ; partitionIdToRunLength . put ( initPartitionId , runLength ) ; initPartitionId = partitionId ; lastPartitionId = initPartitionId ; } int runLength = lastPartitionId - initPartitionId + 1 ; if ( lastPartitionId == cluster . getNumberOfPartitions ( ) - 1 && partitionIdToRunLength . containsKey ( 0 ) ) { // special case of contiguity that wraps around the ring. partitionIdToRunLength . put ( initPartitionId , runLength + partitionIdToRunLength . get ( 0 ) ) ; partitionIdToRunLength . remove ( 0 ) ; } else { partitionIdToRunLength . put ( initPartitionId , runLength ) ; } return partitionIdToRunLength ; }
Determines run length for each initial partition ID . Note that a contiguous run may wrap around the end of the ring .
360
25
147,803
public static Map < Integer , Integer > getMapOfContiguousPartitionRunLengths ( final Cluster cluster , int zoneId ) { Map < Integer , Integer > idToRunLength = getMapOfContiguousPartitions ( cluster , zoneId ) ; Map < Integer , Integer > runLengthToCount = Maps . newHashMap ( ) ; if ( idToRunLength . isEmpty ( ) ) { return runLengthToCount ; } for ( int runLength : idToRunLength . values ( ) ) { if ( ! runLengthToCount . containsKey ( runLength ) ) { runLengthToCount . put ( runLength , 0 ) ; } runLengthToCount . put ( runLength , runLengthToCount . get ( runLength ) + 1 ) ; } return runLengthToCount ; }
Determines a histogram of contiguous runs of partitions within a zone . I . e . for each run length of contiguous partitions how many such runs are there .
170
33
147,804
public static String getPrettyMapOfContiguousPartitionRunLengths ( final Cluster cluster , int zoneId ) { Map < Integer , Integer > runLengthToCount = getMapOfContiguousPartitionRunLengths ( cluster , zoneId ) ; String prettyHistogram = "[" ; boolean first = true ; Set < Integer > runLengths = new TreeSet < Integer > ( runLengthToCount . keySet ( ) ) ; for ( int runLength : runLengths ) { if ( first ) { first = false ; } else { prettyHistogram += ", " ; } prettyHistogram += "{" + runLength + " : " + runLengthToCount . get ( runLength ) + "}" ; } prettyHistogram += "]" ; return prettyHistogram ; }
Pretty prints the output of getMapOfContiguousPartitionRunLengths
164
15
147,805
public static String getHotPartitionsDueToContiguity ( final Cluster cluster , int hotContiguityCutoff ) { StringBuilder sb = new StringBuilder ( ) ; for ( int zoneId : cluster . getZoneIds ( ) ) { Map < Integer , Integer > idToRunLength = getMapOfContiguousPartitions ( cluster , zoneId ) ; for ( Integer initialPartitionId : idToRunLength . keySet ( ) ) { int runLength = idToRunLength . get ( initialPartitionId ) ; if ( runLength < hotContiguityCutoff ) continue ; int hotPartitionId = ( initialPartitionId + runLength ) % cluster . getNumberOfPartitions ( ) ; Node hotNode = cluster . getNodeForPartitionId ( hotPartitionId ) ; sb . append ( "\tNode " + hotNode . getId ( ) + " (" + hotNode . getHost ( ) + ") has hot primary partition " + hotPartitionId + " that follows contiguous run of length " + runLength + Utils . NEWLINE ) ; } } return sb . toString ( ) ; }
Returns a pretty printed string of nodes that host specific hot partitions where hot is defined as following a contiguous run of partitions of some length in another zone .
246
30
147,806
public static String analyzeInvalidMetadataRate ( final Cluster currentCluster , List < StoreDefinition > currentStoreDefs , final Cluster finalCluster , List < StoreDefinition > finalStoreDefs ) { StringBuilder sb = new StringBuilder ( ) ; sb . append ( "Dump of invalid metadata rates per zone" ) . append ( Utils . NEWLINE ) ; HashMap < StoreDefinition , Integer > uniqueStores = StoreDefinitionUtils . getUniqueStoreDefinitionsWithCounts ( currentStoreDefs ) ; for ( StoreDefinition currentStoreDef : uniqueStores . keySet ( ) ) { sb . append ( "Store exemplar: " + currentStoreDef . getName ( ) ) . append ( Utils . NEWLINE ) . append ( "\tThere are " + uniqueStores . get ( currentStoreDef ) + " other similar stores." ) . append ( Utils . NEWLINE ) ; StoreRoutingPlan currentSRP = new StoreRoutingPlan ( currentCluster , currentStoreDef ) ; StoreDefinition finalStoreDef = StoreUtils . getStoreDef ( finalStoreDefs , currentStoreDef . getName ( ) ) ; StoreRoutingPlan finalSRP = new StoreRoutingPlan ( finalCluster , finalStoreDef ) ; // Only care about existing zones for ( int zoneId : currentCluster . getZoneIds ( ) ) { int zonePrimariesCount = 0 ; int invalidMetadata = 0 ; // Examine nodes in current cluster in existing zone. for ( int nodeId : currentCluster . getNodeIdsInZone ( zoneId ) ) { // For every zone-primary in current cluster for ( int zonePrimaryPartitionId : currentSRP . getZonePrimaryPartitionIds ( nodeId ) ) { zonePrimariesCount ++ ; // Determine if original zone-primary node is still some // form of n-ary in final cluster. If not, // InvalidMetadataException will fire. if ( ! finalSRP . getZoneNAryPartitionIds ( nodeId ) . contains ( zonePrimaryPartitionId ) ) { invalidMetadata ++ ; } } } float rate = invalidMetadata / ( float ) zonePrimariesCount ; sb . append ( "\tZone " + zoneId ) . append ( " : total zone primaries " + zonePrimariesCount ) . append ( ", # that trigger invalid metadata " + invalidMetadata ) . append ( " => " + rate ) . append ( Utils . NEWLINE ) ; } } return sb . toString ( ) ; }
Compares current cluster with final cluster . Uses pertinent store defs for each cluster to determine if a node that hosts a zone - primary in the current cluster will no longer host any zone - nary in the final cluster . This check is the precondition for a server returning an invalid metadata exception to a client on a normal - case put or get . Normal - case being that the zone - primary receives the pseudo - master put or the get operation .
543
92
147,807
public static < K , V > QueuedKeyedResourcePool < K , V > create ( ResourceFactory < K , V > factory , ResourcePoolConfig config ) { return new QueuedKeyedResourcePool < K , V > ( factory , config ) ; }
Create a new queued pool with key type K request type R and value type V .
55
18
147,808
public static < K , V > QueuedKeyedResourcePool < K , V > create ( ResourceFactory < K , V > factory ) { return create ( factory , new ResourcePoolConfig ( ) ) ; }
Create a new queued pool using the defaults for key of type K request of type R and value of Type V .
44
24
147,809
public V internalNonBlockingGet ( K key ) throws Exception { Pool < V > resourcePool = getResourcePoolForKey ( key ) ; return attemptNonBlockingCheckout ( key , resourcePool ) ; }
Used only for unit testing . Please do not use this method in other ways .
45
16
147,810
private AsyncResourceRequest < V > getNextUnexpiredResourceRequest ( Queue < AsyncResourceRequest < V > > requestQueue ) { AsyncResourceRequest < V > resourceRequest = requestQueue . poll ( ) ; while ( resourceRequest != null ) { if ( resourceRequest . getDeadlineNs ( ) < System . nanoTime ( ) ) { resourceRequest . handleTimeout ( ) ; resourceRequest = requestQueue . poll ( ) ; } else { break ; } } return resourceRequest ; }
Pops resource requests off the queue until queue is empty or an unexpired resource request is found . Invokes . handleTimeout on all expired resource requests popped off the queue .
106
35
147,811
private boolean processQueue ( K key ) { Queue < AsyncResourceRequest < V >> requestQueue = getRequestQueueForKey ( key ) ; if ( requestQueue . isEmpty ( ) ) { return false ; } // Attempt to get a resource. Pool < V > resourcePool = getResourcePoolForKey ( key ) ; V resource = null ; Exception ex = null ; try { // Must attempt non-blocking checkout to ensure resources are // created for the pool. resource = attemptNonBlockingCheckout ( key , resourcePool ) ; } catch ( Exception e ) { destroyResource ( key , resourcePool , resource ) ; ex = e ; resource = null ; } // Neither we got a resource, nor an exception. So no requests can be // processed return if ( resource == null && ex == null ) { return false ; } // With resource in hand, process the resource requests AsyncResourceRequest < V > resourceRequest = getNextUnexpiredResourceRequest ( requestQueue ) ; if ( resourceRequest == null ) { if ( resource != null ) { // Did not use the resource! Directly check in via super to // avoid // circular call to processQueue(). try { super . checkin ( key , resource ) ; } catch ( Exception e ) { logger . error ( "Exception checking in resource: " , e ) ; } } else { // Poor exception, no request to tag this exception onto // drop it on the floor and continue as usual. } return false ; } else { // We have a request here. if ( resource != null ) { resourceRequest . useResource ( resource ) ; } else { resourceRequest . handleException ( ex ) ; } return true ; } }
Attempts to checkout a resource so that one queued request can be serviced .
352
16
147,812
@ Override public void checkin ( K key , V resource ) { super . checkin ( key , resource ) ; // NB: Blocking checkout calls for synchronous requests get the resource // checked in above before processQueueLoop() attempts checkout below. // There is therefore a risk that asynchronous requests will be starved. processQueueLoop ( key ) ; }
Check the given resource back into the pool
73
8
147,813
protected void destroyRequest ( AsyncResourceRequest < V > resourceRequest ) { if ( resourceRequest != null ) { try { // To hand control back to the owner of the // AsyncResourceRequest, treat "destroy" as an exception since // there is no resource to pass into useResource, and the // timeout has not expired. Exception e = new UnreachableStoreException ( "Client request was terminated while waiting in the queue." ) ; resourceRequest . handleException ( e ) ; } catch ( Exception ex ) { logger . error ( "Exception while destroying resource request:" , ex ) ; } } }
A safe wrapper to destroy the given resource request .
126
10
147,814
private void destroyRequestQueue ( Queue < AsyncResourceRequest < V > > requestQueue ) { if ( requestQueue != null ) { AsyncResourceRequest < V > resourceRequest = requestQueue . poll ( ) ; while ( resourceRequest != null ) { destroyRequest ( resourceRequest ) ; resourceRequest = requestQueue . poll ( ) ; } } }
Destroys all resource requests in requestQueue .
74
10
147,815
public int getRegisteredResourceRequestCount ( K key ) { if ( requestQueueMap . containsKey ( key ) ) { Queue < AsyncResourceRequest < V >> requestQueue = getRequestQueueForExistingKey ( key ) ; // FYI: .size() is not constant time in the next call. ;) if ( requestQueue != null ) { return requestQueue . size ( ) ; } } return 0 ; }
Count the number of queued resource requests for a specific pool .
88
13
147,816
public int getRegisteredResourceRequestCount ( ) { int count = 0 ; for ( Entry < K , Queue < AsyncResourceRequest < V > > > entry : this . requestQueueMap . entrySet ( ) ) { // FYI: .size() is not constant time in the next call. ;) count += entry . getValue ( ) . size ( ) ; } return count ; }
Count the total number of queued resource requests for all queues . The result is approximate in the face of concurrency since individual queues can change size during the aggregate count .
82
34
147,817
protected void populateTasksByStealer ( List < StealerBasedRebalanceTask > sbTaskList ) { // Setup mapping of stealers to work for this run. for ( StealerBasedRebalanceTask task : sbTaskList ) { if ( task . getStealInfos ( ) . size ( ) != 1 ) { throw new VoldemortException ( "StealerBasedRebalanceTasks should have a list of RebalancePartitionsInfo of length 1." ) ; } RebalanceTaskInfo stealInfo = task . getStealInfos ( ) . get ( 0 ) ; int stealerId = stealInfo . getStealerId ( ) ; if ( ! this . tasksByStealer . containsKey ( stealerId ) ) { this . tasksByStealer . put ( stealerId , new ArrayList < StealerBasedRebalanceTask > ( ) ) ; } this . tasksByStealer . get ( stealerId ) . add ( task ) ; } if ( tasksByStealer . isEmpty ( ) ) { return ; } // Shuffle order of each stealer's work list. This randomization // helps to get rid of any "patterns" in how rebalancing tasks were // added to the task list passed in. for ( List < StealerBasedRebalanceTask > taskList : tasksByStealer . values ( ) ) { Collections . shuffle ( taskList ) ; } }
Go over the task list and create a map of stealerId - > Tasks
303
17
147,818
protected synchronized StealerBasedRebalanceTask scheduleNextTask ( boolean executeService ) { // Make sure there is work left to do. if ( doneSignal . getCount ( ) == 0 ) { logger . info ( "All tasks completion signaled... returning" ) ; return null ; } // Limit number of tasks outstanding. if ( this . numTasksExecuting >= maxParallelRebalancing ) { logger . info ( "Executing more tasks than [" + this . numTasksExecuting + "] the parallel allowed " + maxParallelRebalancing ) ; return null ; } // Shuffle list of stealer IDs each time a new task to schedule needs to // be found. Randomizing the order should avoid prioritizing one // specific stealer's work ahead of all others. List < Integer > stealerIds = new ArrayList < Integer > ( tasksByStealer . keySet ( ) ) ; Collections . shuffle ( stealerIds ) ; for ( int stealerId : stealerIds ) { if ( nodeIdsWithWork . contains ( stealerId ) ) { logger . info ( "Stealer " + stealerId + " is already working... continuing" ) ; continue ; } for ( StealerBasedRebalanceTask sbTask : tasksByStealer . get ( stealerId ) ) { int donorId = sbTask . getStealInfos ( ) . get ( 0 ) . getDonorId ( ) ; if ( nodeIdsWithWork . contains ( donorId ) ) { logger . info ( "Stealer " + stealerId + " Donor " + donorId + " is already working... continuing" ) ; continue ; } // Book keeping addNodesToWorkerList ( Arrays . asList ( stealerId , donorId ) ) ; numTasksExecuting ++ ; // Remove this task from list thus destroying list being // iterated over. This is safe because returning directly out of // this branch. tasksByStealer . get ( stealerId ) . remove ( sbTask ) ; try { if ( executeService ) { logger . info ( "Stealer " + stealerId + " Donor " + donorId + " going to schedule work" ) ; service . execute ( sbTask ) ; } } catch ( RejectedExecutionException ree ) { logger . error ( "Stealer " + stealerId + "Rebalancing task rejected by executor service." , ree ) ; throw new VoldemortRebalancingException ( "Stealer " + stealerId + "Rebalancing task rejected by executor service." ) ; } return sbTask ; } } printRemainingTasks ( stealerIds ) ; return null ; }
Schedule at most one task .
579
7
147,819
public synchronized void addNodesToWorkerList ( List < Integer > nodeIds ) { // Bookkeeping for nodes that will be involved in the next task nodeIdsWithWork . addAll ( nodeIds ) ; logger . info ( "Node IDs with work: " + nodeIdsWithWork + " Newly added nodes " + nodeIds ) ; }
Add nodes to the workers list
77
6
147,820
public synchronized void doneTask ( int stealerId , int donorId ) { removeNodesFromWorkerList ( Arrays . asList ( stealerId , donorId ) ) ; numTasksExecuting -- ; doneSignal . countDown ( ) ; // Try and schedule more tasks now that resources may be available to do // so. scheduleMoreTasks ( ) ; }
Method must be invoked upon completion of a rebalancing task . It is the task s responsibility to do so .
80
23
147,821
private List < Long > collectLongMetric ( String metricGetterName ) { List < Long > vals = new ArrayList < Long > ( ) ; for ( BdbEnvironmentStats envStats : environmentStatsTracked ) { vals . add ( ( Long ) ReflectUtils . callMethod ( envStats , BdbEnvironmentStats . class , metricGetterName , new Class < ? > [ 0 ] , new Object [ 0 ] ) ) ; } return vals ; }
Calls the provided metric getter on all the tracked environments and obtains their values
101
17
147,822
public static Iterable < String > toHexStrings ( Iterable < ByteArray > arrays ) { ArrayList < String > ret = new ArrayList < String > ( ) ; for ( ByteArray array : arrays ) ret . ( ByteUtils . toHexString ( array . get ( ) ) ) ; return ret ; }
Translate the each ByteArray in an iterable into a hexadecimal string
70
17
147,823
public String getPublicConfigValue ( String key ) throws ConfigurationException { if ( ! allProps . containsKey ( key ) ) { throw new UndefinedPropertyException ( "The requested config key does not exist." ) ; } if ( restrictedConfigs . contains ( key ) ) { throw new ConfigurationException ( "The requested config key is not publicly available!" ) ; } return allProps . get ( key ) ; }
This is a generic function for retrieving any config value . The returned value is the one the server is operating with no matter whether it comes from defaults or from the user - supplied configuration .
88
37
147,824
private void checkRateLimit ( String quotaKey , Tracked trackedOp ) { String quotaValue = null ; try { if ( ! metadataStore . getQuotaEnforcingEnabledUnlocked ( ) ) { return ; } quotaValue = quotaStore . cacheGet ( quotaKey ) ; // Store may not have any quotas if ( quotaValue == null ) { return ; } // But, if it does float currentRate = getThroughput ( trackedOp ) ; float allowedRate = Float . parseFloat ( quotaValue ) ; // TODO the histogram should be reasonably accurate to do all // these things.. (ghost qps and all) // Report the current quota usage level quotaStats . reportQuotaUsed ( trackedOp , Utils . safeGetPercentage ( currentRate , allowedRate ) ) ; // check if we have exceeded rate. if ( currentRate > allowedRate ) { quotaStats . reportRateLimitedOp ( trackedOp ) ; throw new QuotaExceededException ( "Exceeded rate limit for " + quotaKey + ". Maximum allowed : " + allowedRate + " Current: " + currentRate ) ; } } catch ( NumberFormatException nfe ) { // move on, if we cannot parse quota value properly logger . debug ( "Invalid formatting of quota value for key " + quotaKey + " : " + quotaValue ) ; } }
Ensure the current throughput levels for the tracked operation does not exceed set quota limits . Throws an exception if exceeded quota .
282
25
147,825
public synchronized void submitOperation ( int requestId , AsyncOperation operation ) { if ( this . operations . containsKey ( requestId ) ) throw new VoldemortException ( "Request " + requestId + " already submitted to the system" ) ; this . operations . put ( requestId , operation ) ; scheduler . scheduleNow ( operation ) ; logger . debug ( "Handling async operation " + requestId ) ; }
Submit a operations . Throw a run time exception if the operations is already submitted
87
15
147,826
public synchronized boolean isComplete ( int requestId , boolean remove ) { if ( ! operations . containsKey ( requestId ) ) throw new VoldemortException ( "No operation with id " + requestId + " found" ) ; if ( operations . get ( requestId ) . getStatus ( ) . isComplete ( ) ) { if ( logger . isDebugEnabled ( ) ) logger . debug ( "Operation complete " + requestId ) ; if ( remove ) operations . remove ( requestId ) ; return true ; } return false ; }
Check if the an operation is done or not .
110
10
147,827
@ JmxOperation ( description = "Retrieve operation status" ) public String getStatus ( int id ) { try { return getOperationStatus ( id ) . toString ( ) ; } catch ( VoldemortException e ) { return "No operation with id " + id + " found" ; } }
Wrap getOperationStatus to avoid throwing exception over JMX
62
12
147,828
public List < Integer > getAsyncOperationList ( boolean showCompleted ) { /** * Create a copy using an immutable set to avoid a * {@link java.util.ConcurrentModificationException} */ Set < Integer > keySet = ImmutableSet . copyOf ( operations . keySet ( ) ) ; if ( showCompleted ) return new ArrayList < Integer > ( keySet ) ; List < Integer > keyList = new ArrayList < Integer > ( ) ; for ( int key : keySet ) { AsyncOperation operation = operations . get ( key ) ; if ( operation != null && ! operation . getStatus ( ) . isComplete ( ) ) keyList . add ( key ) ; } return keyList ; }
Get list of asynchronous operations on this node . By default only the pending operations are returned .
151
18
147,829
@ JmxOperation public String stopAsyncOperation ( int requestId ) { try { stopOperation ( requestId ) ; } catch ( VoldemortException e ) { return e . getMessage ( ) ; } return "Stopping operation " + requestId ; }
Wrapper to avoid throwing an exception over JMX
52
10
147,830
@ Override public void updateStoreDefinition ( StoreDefinition storeDef ) { this . storeDef = storeDef ; if ( storeDef . hasRetentionPeriod ( ) ) this . retentionTimeMs = storeDef . getRetentionDays ( ) * Time . MS_PER_DAY ; }
Updates the store definition object and the retention time based on the updated store definition
61
16
147,831
private List < Versioned < byte [ ] > > filterExpiredEntries ( ByteArray key , List < Versioned < byte [ ] > > vals ) { Iterator < Versioned < byte [ ] > > valsIterator = vals . iterator ( ) ; while ( valsIterator . hasNext ( ) ) { Versioned < byte [ ] > val = valsIterator . next ( ) ; VectorClock clock = ( VectorClock ) val . getVersion ( ) ; // omit if expired if ( clock . getTimestamp ( ) < ( time . getMilliseconds ( ) - this . retentionTimeMs ) ) { valsIterator . remove ( ) ; // delete stale value if configured if ( deleteExpiredEntries ) { getInnerStore ( ) . delete ( key , clock ) ; } } } return vals ; }
Performs the filtering of the expired entries based on retention time . Optionally deletes them also
178
19
147,832
private synchronized void flushData ( ) { BufferedWriter writer = null ; try { writer = new BufferedWriter ( new FileWriter ( new File ( this . inputPath ) ) ) ; for ( String key : this . metadataMap . keySet ( ) ) { writer . write ( NEW_PROPERTY_SEPARATOR + key . toString ( ) + "]" + NEW_LINE ) ; writer . write ( this . metadataMap . get ( key ) . toString ( ) ) ; writer . write ( "" + NEW_LINE + "" + NEW_LINE ) ; } writer . flush ( ) ; } catch ( IOException e ) { logger . error ( "IO exception while flushing data to file backed storage: " + e . getMessage ( ) ) ; } try { if ( writer != null ) writer . close ( ) ; } catch ( Exception e ) { logger . error ( "Error while flushing data to file backed storage: " + e . getMessage ( ) ) ; } }
Flush the in - memory data to the file
211
10
147,833
public static String getSerializedVectorClock ( VectorClock vc ) { VectorClockWrapper vcWrapper = new VectorClockWrapper ( vc ) ; String serializedVC = "" ; try { serializedVC = mapper . writeValueAsString ( vcWrapper ) ; } catch ( Exception e ) { e . printStackTrace ( ) ; } return serializedVC ; }
Function to serialize the given Vector clock into a string . If something goes wrong it returns an empty string .
84
22
147,834
public static String getSerializedVectorClocks ( List < VectorClock > vectorClocks ) { List < VectorClockWrapper > vectorClockWrappers = new ArrayList < VectorClockWrapper > ( ) ; for ( VectorClock vc : vectorClocks ) { vectorClockWrappers . add ( new VectorClockWrapper ( vc ) ) ; } String serializedVC = "" ; try { serializedVC = mapper . writeValueAsString ( vectorClockWrappers ) ; } catch ( Exception e ) { e . printStackTrace ( ) ; } return serializedVC ; }
Function to serialize the given list of Vector clocks into a string . If something goes wrong it returns an empty string .
125
24
147,835
public static String constructSerializerInfoXml ( StoreDefinition storeDefinition ) { Element store = new Element ( StoreDefinitionsMapper . STORE_ELMT ) ; store . addContent ( new Element ( StoreDefinitionsMapper . STORE_NAME_ELMT ) . setText ( storeDefinition . getName ( ) ) ) ; Element keySerializer = new Element ( StoreDefinitionsMapper . STORE_KEY_SERIALIZER_ELMT ) ; StoreDefinitionsMapper . addSerializer ( keySerializer , storeDefinition . getKeySerializer ( ) ) ; store . addContent ( keySerializer ) ; Element valueSerializer = new Element ( StoreDefinitionsMapper . STORE_VALUE_SERIALIZER_ELMT ) ; StoreDefinitionsMapper . addSerializer ( valueSerializer , storeDefinition . getValueSerializer ( ) ) ; store . addContent ( valueSerializer ) ; XMLOutputter serializer = new XMLOutputter ( Format . getPrettyFormat ( ) ) ; return serializer . outputString ( store ) ; }
Given a storedefinition constructs the xml string to be sent out in response to a schemata fetch request
228
22
147,836
public void updateMetadataVersions ( ) { Properties versionProps = MetadataVersionStoreUtils . getProperties ( this . systemStoreRepository . getMetadataVersionStore ( ) ) ; Long newVersion = fetchNewVersion ( SystemStoreConstants . CLUSTER_VERSION_KEY , null , versionProps ) ; if ( newVersion != null ) { this . currentClusterVersion = newVersion ; } }
Fetch the latest versions for cluster metadata
89
8
147,837
public static void validateClusterStores ( final Cluster cluster , final List < StoreDefinition > storeDefs ) { // Constructing a StoreRoutingPlan has the (desirable in this // case) side-effect of verifying that the store definition is congruent // with the cluster definition. If there are issues, exceptions are // thrown. for ( StoreDefinition storeDefinition : storeDefs ) { new StoreRoutingPlan ( cluster , storeDefinition ) ; } return ; }
Verify store definitions are congruent with cluster definition .
99
12
147,838
public static void validateCurrentFinalCluster ( final Cluster currentCluster , final Cluster finalCluster ) { validateClusterPartitionCounts ( currentCluster , finalCluster ) ; validateClusterNodeState ( currentCluster , finalCluster ) ; return ; }
A final cluster ought to be a super set of current cluster . I . e . existing node IDs ought to map to same server but partition layout can have changed and there may exist new nodes .
57
39
147,839
public static void validateInterimFinalCluster ( final Cluster interimCluster , final Cluster finalCluster ) { validateClusterPartitionCounts ( interimCluster , finalCluster ) ; validateClusterZonesSame ( interimCluster , finalCluster ) ; validateClusterNodeCounts ( interimCluster , finalCluster ) ; validateClusterNodeState ( interimCluster , finalCluster ) ; return ; }
Interim and final clusters ought to have same partition counts same zones and same node state . Partitions per node may of course differ .
90
27
147,840
public static void validateClusterPartitionCounts ( final Cluster lhs , final Cluster rhs ) { if ( lhs . getNumberOfPartitions ( ) != rhs . getNumberOfPartitions ( ) ) throw new VoldemortException ( "Total number of partitions should be equal [ lhs cluster (" + lhs . getNumberOfPartitions ( ) + ") not equal to rhs cluster (" + rhs . getNumberOfPartitions ( ) + ") ]" ) ; }
Confirms that both clusters have the same number of total partitions .
104
13
147,841
public static void validateClusterPartitionState ( final Cluster subsetCluster , final Cluster supersetCluster ) { if ( ! supersetCluster . getNodeIds ( ) . containsAll ( subsetCluster . getNodeIds ( ) ) ) { throw new VoldemortException ( "Superset cluster does not contain all nodes from subset cluster[ subset cluster node ids (" + subsetCluster . getNodeIds ( ) + ") are not a subset of superset cluster node ids (" + supersetCluster . getNodeIds ( ) + ") ]" ) ; } for ( int nodeId : subsetCluster . getNodeIds ( ) ) { Node supersetNode = supersetCluster . getNodeById ( nodeId ) ; Node subsetNode = subsetCluster . getNodeById ( nodeId ) ; if ( ! supersetNode . getPartitionIds ( ) . equals ( subsetNode . getPartitionIds ( ) ) ) { throw new VoldemortRebalancingException ( "Partition IDs do not match between clusters for nodes with id " + nodeId + " : subset cluster has " + subsetNode . getPartitionIds ( ) + " and superset cluster has " + supersetNode . getPartitionIds ( ) ) ; } } Set < Integer > nodeIds = supersetCluster . getNodeIds ( ) ; nodeIds . removeAll ( subsetCluster . getNodeIds ( ) ) ; for ( int nodeId : nodeIds ) { Node supersetNode = supersetCluster . getNodeById ( nodeId ) ; if ( ! supersetNode . getPartitionIds ( ) . isEmpty ( ) ) { throw new VoldemortRebalancingException ( "New node " + nodeId + " in superset cluster already has partitions: " + supersetNode . getPartitionIds ( ) ) ; } } }
Confirm that all nodes shared between clusters host exact same partition IDs and that nodes only in the super set cluster have no partition IDs .
407
27
147,842
public static void validateClusterZonesSame ( final Cluster lhs , final Cluster rhs ) { Set < Zone > lhsSet = new HashSet < Zone > ( lhs . getZones ( ) ) ; Set < Zone > rhsSet = new HashSet < Zone > ( rhs . getZones ( ) ) ; if ( ! lhsSet . equals ( rhsSet ) ) throw new VoldemortException ( "Zones are not the same [ lhs cluster zones (" + lhs . getZones ( ) + ") not equal to rhs cluster zones (" + rhs . getZones ( ) + ") ]" ) ; }
Confirms that both clusters have the same set of zones defined .
140
13
147,843
public static void validateClusterNodeCounts ( final Cluster lhs , final Cluster rhs ) { if ( ! lhs . getNodeIds ( ) . equals ( rhs . getNodeIds ( ) ) ) { throw new VoldemortException ( "Node ids are not the same [ lhs cluster node ids (" + lhs . getNodeIds ( ) + ") not equal to rhs cluster node ids (" + rhs . getNodeIds ( ) + ") ]" ) ; } }
Confirms that both clusters have the same number of nodes by comparing set of node Ids between clusters .
111
21
147,844
public static Cluster vacateZone ( Cluster currentCluster , int dropZoneId ) { Cluster returnCluster = Cluster . cloneCluster ( currentCluster ) ; // Go over each node in the zone being dropped for ( Integer nodeId : currentCluster . getNodeIdsInZone ( dropZoneId ) ) { // For each node grab all the partitions it hosts for ( Integer partitionId : currentCluster . getNodeById ( nodeId ) . getPartitionIds ( ) ) { // Now for each partition find a new home..which would be a node // in one of the existing zones int finalZoneId = - 1 ; int finalNodeId = - 1 ; int adjacentPartitionId = partitionId ; do { adjacentPartitionId = ( adjacentPartitionId + 1 ) % currentCluster . getNumberOfPartitions ( ) ; finalNodeId = currentCluster . getNodeForPartitionId ( adjacentPartitionId ) . getId ( ) ; finalZoneId = currentCluster . getZoneForPartitionId ( adjacentPartitionId ) . getId ( ) ; if ( adjacentPartitionId == partitionId ) { logger . error ( "PartitionId " + partitionId + "stays unchanged \n" ) ; } else { logger . info ( "PartitionId " + partitionId + " goes together with partition " + adjacentPartitionId + " on node " + finalNodeId + " in zone " + finalZoneId ) ; returnCluster = UpdateClusterUtils . createUpdatedCluster ( returnCluster , finalNodeId , Lists . newArrayList ( partitionId ) ) ; } } while ( finalZoneId == dropZoneId ) ; } } return returnCluster ; }
Given the current cluster and a zone id that needs to be dropped this method will remove all partitions from the zone that is being dropped and move it to the existing zones . The partitions are moved intelligently so as not to avoid any data movement in the existing zones .
366
53
147,845
public static Cluster dropZone ( Cluster intermediateCluster , int dropZoneId ) { // Filter out nodes that don't belong to the zone being dropped Set < Node > survivingNodes = new HashSet < Node > ( ) ; for ( int nodeId : intermediateCluster . getNodeIds ( ) ) { if ( intermediateCluster . getNodeById ( nodeId ) . getZoneId ( ) != dropZoneId ) { survivingNodes . add ( intermediateCluster . getNodeById ( nodeId ) ) ; } } // Filter out dropZoneId from all zones Set < Zone > zones = new HashSet < Zone > ( ) ; for ( int zoneId : intermediateCluster . getZoneIds ( ) ) { if ( zoneId == dropZoneId ) { continue ; } List < Integer > proximityList = intermediateCluster . getZoneById ( zoneId ) . getProximityList ( ) ; proximityList . remove ( new Integer ( dropZoneId ) ) ; zones . add ( new Zone ( zoneId , proximityList ) ) ; } return new Cluster ( intermediateCluster . getName ( ) , Utils . asSortedList ( survivingNodes ) , Utils . asSortedList ( zones ) ) ; }
Given a interim cluster with a previously vacated zone constructs a new cluster object with the drop zone completely removed
262
20
147,846
public static List < Integer > getStolenPrimaryPartitions ( final Cluster currentCluster , final Cluster finalCluster , final int stealNodeId ) { List < Integer > finalList = new ArrayList < Integer > ( finalCluster . getNodeById ( stealNodeId ) . getPartitionIds ( ) ) ; List < Integer > currentList = new ArrayList < Integer > ( ) ; if ( currentCluster . hasNodeWithId ( stealNodeId ) ) { currentList = currentCluster . getNodeById ( stealNodeId ) . getPartitionIds ( ) ; } else { if ( logger . isDebugEnabled ( ) ) { logger . debug ( "Current cluster does not contain stealer node (cluster : [[[" + currentCluster + "]]], node id " + stealNodeId + ")" ) ; } } finalList . removeAll ( currentList ) ; return finalList ; }
For a particular stealer node find all the primary partitions tuples it will steal .
195
17
147,847
public static List < StoreDefinition > validateRebalanceStore ( List < StoreDefinition > storeDefList ) { List < StoreDefinition > returnList = new ArrayList < StoreDefinition > ( storeDefList . size ( ) ) ; for ( StoreDefinition def : storeDefList ) { if ( ! def . isView ( ) && ! canRebalanceList . contains ( def . getType ( ) ) ) { throw new VoldemortException ( "Rebalance does not support rebalancing of stores of type " + def . getType ( ) + " - " + def . getName ( ) ) ; } else if ( ! def . isView ( ) ) { returnList . add ( def ) ; } else { logger . debug ( "Ignoring view " + def . getName ( ) + " for rebalancing" ) ; } } return returnList ; }
Given a list of store definitions makes sure that rebalance supports all of them . If not it throws an error .
183
24
147,848
public static void dumpClusters ( Cluster currentCluster , Cluster finalCluster , String outputDirName , String filePrefix ) { dumpClusterToFile ( outputDirName , filePrefix + currentClusterFileName , currentCluster ) ; dumpClusterToFile ( outputDirName , filePrefix + finalClusterFileName , finalCluster ) ; }
Given the initial and final cluster dumps it into the output directory
79
12
147,849
public static void dumpClusters ( Cluster currentCluster , Cluster finalCluster , String outputDirName ) { dumpClusters ( currentCluster , finalCluster , outputDirName , "" ) ; }
Given the current and final cluster dumps it into the output directory
43
12
147,850
public static void dumpClusterToFile ( String outputDirName , String fileName , Cluster cluster ) { if ( outputDirName != null ) { File outputDir = new File ( outputDirName ) ; if ( ! outputDir . exists ( ) ) { Utils . mkdirs ( outputDir ) ; } try { FileUtils . writeStringToFile ( new File ( outputDirName , fileName ) , new ClusterMapper ( ) . writeCluster ( cluster ) ) ; } catch ( IOException e ) { logger . error ( "IOException during dumpClusterToFile: " + e ) ; } } }
Prints a cluster xml to a file .
133
9
147,851
public static void dumpStoreDefsToFile ( String outputDirName , String fileName , List < StoreDefinition > storeDefs ) { if ( outputDirName != null ) { File outputDir = new File ( outputDirName ) ; if ( ! outputDir . exists ( ) ) { Utils . mkdirs ( outputDir ) ; } try { FileUtils . writeStringToFile ( new File ( outputDirName , fileName ) , new StoreDefinitionsMapper ( ) . writeStoreList ( storeDefs ) ) ; } catch ( IOException e ) { logger . error ( "IOException during dumpStoreDefsToFile: " + e ) ; } } }
Prints a stores xml to a file .
145
9
147,852
public static void dumpAnalysisToFile ( String outputDirName , String baseFileName , PartitionBalance partitionBalance ) { if ( outputDirName != null ) { File outputDir = new File ( outputDirName ) ; if ( ! outputDir . exists ( ) ) { Utils . mkdirs ( outputDir ) ; } try { FileUtils . writeStringToFile ( new File ( outputDirName , baseFileName + ".analysis" ) , partitionBalance . toString ( ) ) ; } catch ( IOException e ) { logger . error ( "IOException during dumpAnalysisToFile: " + e ) ; } } }
Prints a balance analysis to a file .
134
9
147,853
public static void dumpPlanToFile ( String outputDirName , RebalancePlan plan ) { if ( outputDirName != null ) { File outputDir = new File ( outputDirName ) ; if ( ! outputDir . exists ( ) ) { Utils . mkdirs ( outputDir ) ; } try { FileUtils . writeStringToFile ( new File ( outputDirName , "plan.out" ) , plan . toString ( ) ) ; } catch ( IOException e ) { logger . error ( "IOException during dumpPlanToFile: " + e ) ; } } }
Prints the plan to a file .
125
8
147,854
public static List < RebalanceTaskInfo > filterTaskPlanWithStores ( List < RebalanceTaskInfo > existingPlanList , List < StoreDefinition > storeDefs ) { List < RebalanceTaskInfo > plans = Lists . newArrayList ( ) ; List < String > storeNames = StoreDefinitionUtils . getStoreNames ( storeDefs ) ; for ( RebalanceTaskInfo existingPlan : existingPlanList ) { RebalanceTaskInfo info = RebalanceTaskInfo . create ( existingPlan . toJsonString ( ) ) ; // Filter the plans only for stores given HashMap < String , List < Integer > > storeToPartitions = info . getStoreToPartitionIds ( ) ; HashMap < String , List < Integer > > newStoreToPartitions = Maps . newHashMap ( ) ; for ( String storeName : storeNames ) { if ( storeToPartitions . containsKey ( storeName ) ) newStoreToPartitions . put ( storeName , storeToPartitions . get ( storeName ) ) ; } info . setStoreToPartitionList ( newStoreToPartitions ) ; plans . add ( info ) ; } return plans ; }
Given a list of partition plans and a set of stores copies the store names to every individual plan and creates a new list
248
24
147,855
public static void executorShutDown ( ExecutorService executorService , long timeOutSec ) { try { executorService . shutdown ( ) ; executorService . awaitTermination ( timeOutSec , TimeUnit . SECONDS ) ; } catch ( Exception e ) { logger . warn ( "Error while stoping executor service." , e ) ; } }
Wait to shutdown service
77
4
147,856
public List < T > resolveConflicts ( List < T > values ) { if ( values . size ( ) > 1 ) return values ; else return Collections . singletonList ( values . get ( 0 ) ) ; }
Arbitrarily resolve the inconsistency by choosing the first object if there is one .
46
16
147,857
public static < K , V , T > List < Versioned < V > > get ( Store < K , V , T > storageEngine , K key , T transform ) { Map < K , List < Versioned < V > > > result = storageEngine . getAll ( Collections . singleton ( key ) , Collections . singletonMap ( key , transform ) ) ; if ( result . size ( ) > 0 ) return result . get ( key ) ; else return Collections . emptyList ( ) ; }
Implements get by delegating to getAll .
106
11
147,858
public static < K , V , T > Map < K , List < Versioned < V > > > getAll ( Store < K , V , T > storageEngine , Iterable < K > keys , Map < K , T > transforms ) { Map < K , List < Versioned < V > > > result = newEmptyHashMap ( keys ) ; for ( K key : keys ) { List < Versioned < V >> value = storageEngine . get ( key , transforms != null ? transforms . get ( key ) : null ) ; if ( ! value . isEmpty ( ) ) result . put ( key , value ) ; } return result ; }
Implements getAll by delegating to get .
137
11
147,859
public static < K , V > HashMap < K , V > newEmptyHashMap ( Iterable < ? > iterable ) { if ( iterable instanceof Collection < ? > ) return Maps . newHashMapWithExpectedSize ( ( ( Collection < ? > ) iterable ) . size ( ) ) ; return Maps . newHashMap ( ) ; }
Returns an empty map with expected size matching the iterable size if it s of type Collection . Otherwise an empty map with the default size is returned .
76
30
147,860
public static void assertValidMetadata ( ByteArray key , RoutingStrategy routingStrategy , Node currentNode ) { List < Node > nodes = routingStrategy . routeRequest ( key . get ( ) ) ; for ( Node node : nodes ) { if ( node . getId ( ) == currentNode . getId ( ) ) { return ; } } throw new InvalidMetadataException ( "Client accessing key belonging to partitions " + routingStrategy . getPartitionList ( key . get ( ) ) + " not present at " + currentNode ) ; }
Check if the current node is part of routing request based on cluster . xml or throw an exception .
118
20
147,861
public static void assertValidNode ( MetadataStore metadataStore , Integer nodeId ) { if ( ! metadataStore . getCluster ( ) . hasNodeWithId ( nodeId ) ) { throw new InvalidMetadataException ( "NodeId " + nodeId + " is not or no longer in this cluster" ) ; } }
Check if the the nodeId is present in the cluster managed by the metadata store or throw an exception .
69
21
147,862
@ SuppressWarnings ( "unchecked" ) public static < T > Serializer < T > unsafeGetSerializer ( SerializerFactory serializerFactory , SerializerDefinition serializerDefinition ) { return ( Serializer < T > ) serializerFactory . getSerializer ( serializerDefinition ) ; }
This is a temporary measure until we have a type - safe solution for retrieving serializers from a SerializerFactory . It avoids warnings all over the codebase while making it easy to verify who calls it .
64
41
147,863
public static StoreDefinition getStoreDef ( List < StoreDefinition > list , String name ) { for ( StoreDefinition def : list ) if ( def . getName ( ) . equals ( name ) ) return def ; return null ; }
Get a store definition from the given list of store definitions
48
11
147,864
public static List < String > getStoreNames ( List < StoreDefinition > list , boolean ignoreViews ) { List < String > storeNameSet = new ArrayList < String > ( ) ; for ( StoreDefinition def : list ) if ( ! def . isView ( ) || ! ignoreViews ) storeNameSet . add ( def . getName ( ) ) ; return storeNameSet ; }
Get the list of store names from a list of store definitions
83
12
147,865
private void plan ( ) { // Mapping of stealer node to list of primary partitions being moved final TreeMultimap < Integer , Integer > stealerToStolenPrimaryPartitions = TreeMultimap . create ( ) ; // Output initial and final cluster if ( outputDir != null ) RebalanceUtils . dumpClusters ( currentCluster , finalCluster , outputDir ) ; // Determine which partitions must be stolen for ( Node stealerNode : finalCluster . getNodes ( ) ) { List < Integer > stolenPrimaryPartitions = RebalanceUtils . getStolenPrimaryPartitions ( currentCluster , finalCluster , stealerNode . getId ( ) ) ; if ( stolenPrimaryPartitions . size ( ) > 0 ) { numPrimaryPartitionMoves += stolenPrimaryPartitions . size ( ) ; stealerToStolenPrimaryPartitions . putAll ( stealerNode . getId ( ) , stolenPrimaryPartitions ) ; } } // Determine plan batch-by-batch int batches = 0 ; Cluster batchCurrentCluster = Cluster . cloneCluster ( currentCluster ) ; List < StoreDefinition > batchCurrentStoreDefs = this . currentStoreDefs ; List < StoreDefinition > batchFinalStoreDefs = this . finalStoreDefs ; Cluster batchFinalCluster = RebalanceUtils . getInterimCluster ( this . currentCluster , this . finalCluster ) ; while ( ! stealerToStolenPrimaryPartitions . isEmpty ( ) ) { int partitions = 0 ; List < Entry < Integer , Integer > > partitionsMoved = Lists . newArrayList ( ) ; for ( Entry < Integer , Integer > stealerToPartition : stealerToStolenPrimaryPartitions . entries ( ) ) { partitionsMoved . add ( stealerToPartition ) ; batchFinalCluster = UpdateClusterUtils . createUpdatedCluster ( batchFinalCluster , stealerToPartition . getKey ( ) , Lists . newArrayList ( stealerToPartition . getValue ( ) ) ) ; partitions ++ ; if ( partitions == batchSize ) break ; } // Remove the partitions moved for ( Iterator < Entry < Integer , Integer > > partitionMoved = partitionsMoved . iterator ( ) ; partitionMoved . hasNext ( ) ; ) { Entry < Integer , Integer > entry = partitionMoved . next ( ) ; stealerToStolenPrimaryPartitions . remove ( entry . getKey ( ) , entry . getValue ( ) ) ; } if ( outputDir != null ) RebalanceUtils . dumpClusters ( batchCurrentCluster , batchFinalCluster , outputDir , "batch-" + Integer . toString ( batches ) + "." ) ; // Generate a plan to compute the tasks final RebalanceBatchPlan RebalanceBatchPlan = new RebalanceBatchPlan ( batchCurrentCluster , batchCurrentStoreDefs , batchFinalCluster , batchFinalStoreDefs ) ; batchPlans . add ( RebalanceBatchPlan ) ; numXZonePartitionStoreMoves += RebalanceBatchPlan . getCrossZonePartitionStoreMoves ( ) ; numPartitionStoreMoves += RebalanceBatchPlan . getPartitionStoreMoves ( ) ; nodeMoveMap . add ( RebalanceBatchPlan . getNodeMoveMap ( ) ) ; zoneMoveMap . add ( RebalanceBatchPlan . getZoneMoveMap ( ) ) ; batches ++ ; batchCurrentCluster = Cluster . cloneCluster ( batchFinalCluster ) ; // batchCurrentStoreDefs can only be different from // batchFinalStoreDefs for the initial batch. batchCurrentStoreDefs = batchFinalStoreDefs ; } logger . info ( this ) ; }
Create a plan . The plan consists of batches . Each batch involves the movement of no more than batchSize primary partitions . The movement of a single primary partition may require migration of other n - ary replicas and potentially deletions . Migrating a primary or n - ary partition requires migrating one partition - store for every store hosted at that partition .
795
72
147,866
private String storageOverhead ( Map < Integer , Integer > finalNodeToOverhead ) { double maxOverhead = Double . MIN_VALUE ; PartitionBalance pb = new PartitionBalance ( currentCluster , currentStoreDefs ) ; StringBuilder sb = new StringBuilder ( ) ; sb . append ( "Per-node store-overhead:" ) . append ( Utils . NEWLINE ) ; DecimalFormat doubleDf = new DecimalFormat ( "####.##" ) ; for ( int nodeId : finalCluster . getNodeIds ( ) ) { Node node = finalCluster . getNodeById ( nodeId ) ; String nodeTag = "Node " + String . format ( "%4d" , nodeId ) + " (" + node . getHost ( ) + ")" ; int initialLoad = 0 ; if ( currentCluster . getNodeIds ( ) . contains ( nodeId ) ) { initialLoad = pb . getNaryPartitionCount ( nodeId ) ; } int toLoad = 0 ; if ( finalNodeToOverhead . containsKey ( nodeId ) ) { toLoad = finalNodeToOverhead . get ( nodeId ) ; } double overhead = ( initialLoad + toLoad ) / ( double ) initialLoad ; if ( initialLoad > 0 && maxOverhead < overhead ) { maxOverhead = overhead ; } String loadTag = String . format ( "%6d" , initialLoad ) + " + " + String . format ( "%6d" , toLoad ) + " -> " + String . format ( "%6d" , initialLoad + toLoad ) + " (" + doubleDf . format ( overhead ) + " X)" ; sb . append ( nodeTag + " : " + loadTag ) . append ( Utils . NEWLINE ) ; } sb . append ( Utils . NEWLINE ) . append ( "**** Max per-node storage overhead: " + doubleDf . format ( maxOverhead ) + " X." ) . append ( Utils . NEWLINE ) ; return ( sb . toString ( ) ) ; }
Determines storage overhead and returns pretty printed summary .
451
11
147,867
public static Boolean askConfirm ( Boolean confirm , String opDesc ) throws IOException { if ( confirm ) { System . out . println ( "Confirmed " + opDesc + " in command-line." ) ; return true ; } else { System . out . println ( "Are you sure you want to " + opDesc + "? (yes/no)" ) ; BufferedReader buffer = new BufferedReader ( new InputStreamReader ( System . in ) ) ; String text = buffer . readLine ( ) . toLowerCase ( Locale . ENGLISH ) ; boolean go = text . equals ( "yes" ) || text . equals ( "y" ) ; if ( ! go ) { System . out . println ( "Did not confirm; " + opDesc + " aborted." ) ; } return go ; } }
Utility function that pauses and asks for confirmation on dangerous operations .
175
13
147,868
public static List < String > getValueList ( List < String > valuePairs , String delim ) { List < String > valueList = Lists . newArrayList ( ) ; for ( String valuePair : valuePairs ) { String [ ] value = valuePair . split ( delim , 2 ) ; if ( value . length != 2 ) throw new VoldemortException ( "Invalid argument pair: " + valuePair ) ; valueList . add ( value [ 0 ] ) ; valueList . add ( value [ 1 ] ) ; } return valueList ; }
Utility function that gives list of values from list of value - pair strings .
118
16
147,869
public static < V > Map < V , V > convertListToMap ( List < V > list ) { Map < V , V > map = new HashMap < V , V > ( ) ; if ( list . size ( ) % 2 != 0 ) throw new VoldemortException ( "Failed to convert list to map." ) ; for ( int i = 0 ; i < list . size ( ) ; i += 2 ) { map . put ( list . get ( i ) , list . get ( i + 1 ) ) ; } return map ; }
Utility function that converts a list to a map .
116
11
147,870
public static AdminClient getAdminClient ( String url ) { ClientConfig config = new ClientConfig ( ) . setBootstrapUrls ( url ) . setConnectionTimeout ( 5 , TimeUnit . SECONDS ) ; AdminClientConfig adminConfig = new AdminClientConfig ( ) . setAdminSocketTimeoutSec ( 5 ) ; return new AdminClient ( adminConfig , config ) ; }
Utility function that constructs AdminClient .
79
8
147,871
public static List < Integer > getAllNodeIds ( AdminClient adminClient ) { List < Integer > nodeIds = Lists . newArrayList ( ) ; for ( Integer nodeId : adminClient . getAdminClientCluster ( ) . getNodeIds ( ) ) { nodeIds . add ( nodeId ) ; } return nodeIds ; }
Utility function that fetches node ids .
76
10
147,872
public static List < String > getAllUserStoreNamesOnNode ( AdminClient adminClient , Integer nodeId ) { List < String > storeNames = Lists . newArrayList ( ) ; List < StoreDefinition > storeDefinitionList = adminClient . metadataMgmtOps . getRemoteStoreDefList ( nodeId ) . getValue ( ) ; for ( StoreDefinition storeDefinition : storeDefinitionList ) { storeNames . add ( storeDefinition . getName ( ) ) ; } return storeNames ; }
Utility function that fetches all stores on a node .
104
12
147,873
public static void validateUserStoreNamesOnNode ( AdminClient adminClient , Integer nodeId , List < String > storeNames ) { List < StoreDefinition > storeDefList = adminClient . metadataMgmtOps . getRemoteStoreDefList ( nodeId ) . getValue ( ) ; Map < String , Boolean > existingStoreNames = new HashMap < String , Boolean > ( ) ; for ( StoreDefinition storeDef : storeDefList ) { existingStoreNames . put ( storeDef . getName ( ) , true ) ; } for ( String storeName : storeNames ) { if ( ! Boolean . TRUE . equals ( existingStoreNames . get ( storeName ) ) ) { Utils . croak ( "Store " + storeName + " does not exist!" ) ; } } }
Utility function that checks if store names are valid on a node .
165
14
147,874
public static List < Integer > getAllPartitions ( AdminClient adminClient ) { List < Integer > partIds = Lists . newArrayList ( ) ; partIds = Lists . newArrayList ( ) ; for ( Node node : adminClient . getAdminClientCluster ( ) . getNodes ( ) ) { partIds . addAll ( node . getPartitionIds ( ) ) ; } return partIds ; }
Utility function that fetches partitions .
93
8
147,875
public static List < QuotaType > getQuotaTypes ( List < String > strQuotaTypes ) { if ( strQuotaTypes . size ( ) < 1 ) { throw new VoldemortException ( "Quota type not specified." ) ; } List < QuotaType > quotaTypes ; if ( strQuotaTypes . size ( ) == 1 && strQuotaTypes . get ( 0 ) . equals ( AdminToolUtils . QUOTATYPE_ALL ) ) { quotaTypes = Arrays . asList ( QuotaType . values ( ) ) ; } else { quotaTypes = new ArrayList < QuotaType > ( ) ; for ( String strQuotaType : strQuotaTypes ) { QuotaType type = QuotaType . valueOf ( strQuotaType ) ; quotaTypes . add ( type ) ; } } return quotaTypes ; }
Utility function that fetches quota types .
183
9
147,876
public static File createDir ( String dir ) { // create outdir File directory = null ; if ( dir != null ) { directory = new File ( dir ) ; if ( ! ( directory . exists ( ) || directory . mkdir ( ) ) ) { Utils . croak ( "Can't find or create directory " + dir ) ; } } return directory ; }
Utility function that creates directory .
77
7
147,877
public static Map < String , StoreDefinition > getSystemStoreDefMap ( ) { Map < String , StoreDefinition > sysStoreDefMap = Maps . newHashMap ( ) ; List < StoreDefinition > storesDefs = SystemStoreConstants . getAllSystemStoreDefs ( ) ; for ( StoreDefinition def : storesDefs ) { sysStoreDefMap . put ( def . getName ( ) , def ) ; } return sysStoreDefMap ; }
Utility function that fetches system store definitions
96
9
147,878
public static Map < String , StoreDefinition > getUserStoreDefMapOnNode ( AdminClient adminClient , Integer nodeId ) { List < StoreDefinition > storeDefinitionList = adminClient . metadataMgmtOps . getRemoteStoreDefList ( nodeId ) . getValue ( ) ; Map < String , StoreDefinition > storeDefinitionMap = Maps . newHashMap ( ) ; for ( StoreDefinition storeDefinition : storeDefinitionList ) { storeDefinitionMap . put ( storeDefinition . getName ( ) , storeDefinition ) ; } return storeDefinitionMap ; }
Utility function that fetches user defined store definitions
116
10
147,879
public static RebalanceTaskInfo decodeRebalanceTaskInfoMap ( VAdminProto . RebalanceTaskInfoMap rebalanceTaskInfoMap ) { RebalanceTaskInfo rebalanceTaskInfo = new RebalanceTaskInfo ( rebalanceTaskInfoMap . getStealerId ( ) , rebalanceTaskInfoMap . getDonorId ( ) , decodeStoreToPartitionIds ( rebalanceTaskInfoMap . getPerStorePartitionIdsList ( ) ) , new ClusterMapper ( ) . readCluster ( new StringReader ( rebalanceTaskInfoMap . getInitialCluster ( ) ) ) ) ; return rebalanceTaskInfo ; }
Given a protobuf rebalance - partition info converts it into our rebalance - partition info
143
21
147,880
public static RebalanceTaskInfoMap encodeRebalanceTaskInfoMap ( RebalanceTaskInfo stealInfo ) { return RebalanceTaskInfoMap . newBuilder ( ) . setStealerId ( stealInfo . getStealerId ( ) ) . setDonorId ( stealInfo . getDonorId ( ) ) . addAllPerStorePartitionIds ( ProtoUtils . encodeStoreToPartitionsTuple ( stealInfo . getStoreToPartitionIds ( ) ) ) . setInitialCluster ( new ClusterMapper ( ) . writeCluster ( stealInfo . getInitialCluster ( ) ) ) . build ( ) ; }
Given a rebalance - task info convert it into the protobuf equivalent
136
16
147,881
public Versioned < E > getVersionedById ( int id ) { Versioned < VListNode < E >> listNode = getListNode ( id ) ; if ( listNode == null ) throw new IndexOutOfBoundsException ( ) ; return new Versioned < E > ( listNode . getValue ( ) . getValue ( ) , listNode . getVersion ( ) ) ; }
Get the ver
83
3
147,882
public E setById ( final int id , final E element ) { VListKey < K > key = new VListKey < K > ( _key , id ) ; UpdateElementById < K , E > updateElementAction = new UpdateElementById < K , E > ( key , element ) ; if ( ! _storeClient . applyUpdate ( updateElementAction ) ) throw new ObsoleteVersionException ( "update failed" ) ; return updateElementAction . getResult ( ) ; }
Put the given value to the appropriate id in the stack using the version of the current list node identified by that id .
102
24
147,883
private void allClustersEqual ( final List < String > clusterUrls ) { Validate . notEmpty ( clusterUrls , "clusterUrls cannot be null" ) ; // If only one clusterUrl return immediately if ( clusterUrls . size ( ) == 1 ) return ; AdminClient adminClientLhs = adminClientPerCluster . get ( clusterUrls . get ( 0 ) ) ; Cluster clusterLhs = adminClientLhs . getAdminClientCluster ( ) ; for ( int index = 1 ; index < clusterUrls . size ( ) ; index ++ ) { AdminClient adminClientRhs = adminClientPerCluster . get ( clusterUrls . get ( index ) ) ; Cluster clusterRhs = adminClientRhs . getAdminClientCluster ( ) ; if ( ! areTwoClustersEqual ( clusterLhs , clusterRhs ) ) throw new VoldemortException ( "Cluster " + clusterLhs . getName ( ) + " is not the same as " + clusterRhs . getName ( ) ) ; } }
Check if all cluster objects in the list are congruent .
226
13
147,884
private synchronized JsonSchema getInputPathJsonSchema ( ) throws IOException { if ( inputPathJsonSchema == null ) { // No need to query Hadoop more than once as this shouldn't change mid-run, // thus, we can lazily initialize and cache the result. inputPathJsonSchema = HadoopUtils . getSchemaFromPath ( getInputPath ( ) ) ; } return inputPathJsonSchema ; }
Get the Json Schema of the input path assuming the path contains just one schema version in all files under that path .
100
25
147,885
private synchronized Schema getInputPathAvroSchema ( ) throws IOException { if ( inputPathAvroSchema == null ) { // No need to query Hadoop more than once as this shouldn't change mid-run, // thus, we can lazily initialize and cache the result. inputPathAvroSchema = AvroUtils . getAvroSchemaFromPath ( getInputPath ( ) ) ; } return inputPathAvroSchema ; }
Get the Avro Schema of the input path assuming the path contains just one schema version in all files under that path .
99
25
147,886
public String getRecordSchema ( ) throws IOException { Schema schema = getInputPathAvroSchema ( ) ; String recSchema = schema . toString ( ) ; return recSchema ; }
Get the schema for the Avro Record from the object container file
44
13
147,887
public String getKeySchema ( ) throws IOException { Schema schema = getInputPathAvroSchema ( ) ; String keySchema = schema . getField ( keyFieldName ) . schema ( ) . toString ( ) ; return keySchema ; }
Extract schema of the key field
56
7
147,888
public String getValueSchema ( ) throws IOException { Schema schema = getInputPathAvroSchema ( ) ; String valueSchema = schema . getField ( valueFieldName ) . schema ( ) . toString ( ) ; return valueSchema ; }
Extract schema of the value field
56
7
147,889
private void verifyOrAddStore ( String clusterURL , String keySchema , String valueSchema ) { String newStoreDefXml = VoldemortUtils . getStoreDefXml ( storeName , props . getInt ( BUILD_REPLICATION_FACTOR , 2 ) , props . getInt ( BUILD_REQUIRED_READS , 1 ) , props . getInt ( BUILD_REQUIRED_WRITES , 1 ) , props . getNullableInt ( BUILD_PREFERRED_READS ) , props . getNullableInt ( BUILD_PREFERRED_WRITES ) , props . getString ( PUSH_FORCE_SCHEMA_KEY , keySchema ) , props . getString ( PUSH_FORCE_SCHEMA_VALUE , valueSchema ) , description , owners ) ; log . info ( "Verifying store against cluster URL: " + clusterURL + "\n" + newStoreDefXml . toString ( ) ) ; StoreDefinition newStoreDef = VoldemortUtils . getStoreDef ( newStoreDefXml ) ; try { adminClientPerCluster . get ( clusterURL ) . storeMgmtOps . verifyOrAddStore ( newStoreDef , "BnP config/data" , enableStoreCreation , this . storeVerificationExecutorService ) ; } catch ( UnreachableStoreException e ) { log . info ( "verifyOrAddStore() failed on some nodes for clusterURL: " + clusterURL + " (this is harmless)." , e ) ; // When we can't reach some node, we just skip it and won't create the store on it. // Next time BnP is run while the node is up, it will get the store created. } // Other exceptions need to bubble up! storeDef = newStoreDef ; }
For each node checks if the store exists and then verifies that the remote schema matches the new one . If the remote store doesn t exist it creates it .
396
32
147,890
public void syncInternalStateFromFileSystem ( boolean alsoSyncRemoteState ) { // Make sure versions missing from the file-system are cleaned up from the internal state for ( Long version : versionToEnabledMap . keySet ( ) ) { File [ ] existingVersionDirs = ReadOnlyUtils . getVersionDirs ( rootDir , version , version ) ; if ( existingVersionDirs . length == 0 ) { removeVersion ( version , alsoSyncRemoteState ) ; } } // Make sure we have all versions on the file-system in the internal state File [ ] versionDirs = ReadOnlyUtils . getVersionDirs ( rootDir ) ; if ( versionDirs != null ) { for ( File versionDir : versionDirs ) { long versionNumber = ReadOnlyUtils . getVersionId ( versionDir ) ; boolean versionEnabled = isVersionEnabled ( versionDir ) ; versionToEnabledMap . put ( versionNumber , versionEnabled ) ; } } // Identify the current version (based on a symlink in the file-system) File currentVersionDir = ReadOnlyUtils . getCurrentVersion ( rootDir ) ; if ( currentVersionDir != null ) { currentVersion = ReadOnlyUtils . getVersionId ( currentVersionDir ) ; } else { currentVersion = - 1 ; // Should we throw instead? } logger . info ( "Successfully synced internal state from local file-system: " + this . toString ( ) ) ; }
Compares the StoreVersionManager s internal state with the content on the file - system of the rootDir provided at construction time .
309
26
147,891
private void persistDisabledVersion ( long version ) throws PersistenceFailureException { File disabledMarker = getDisabledMarkerFile ( version ) ; try { disabledMarker . createNewFile ( ) ; } catch ( IOException e ) { throw new PersistenceFailureException ( "Failed to create the disabled marker at path: " + disabledMarker . getAbsolutePath ( ) + "\nThe store/version " + "will remain disabled only until the next restart." , e ) ; } }
Places a disabled marker file in the directory of the specified version .
106
14
147,892
private void persistEnabledVersion ( long version ) throws PersistenceFailureException { File disabledMarker = getDisabledMarkerFile ( version ) ; if ( disabledMarker . exists ( ) ) { if ( ! disabledMarker . delete ( ) ) { throw new PersistenceFailureException ( "Failed to create the disabled marker at path: " + disabledMarker . getAbsolutePath ( ) + "\nThe store/version " + "will remain enabled only until the next restart." ) ; } } }
Deletes the disabled marker file in the directory of the specified version .
107
14
147,893
private File getDisabledMarkerFile ( long version ) throws PersistenceFailureException { File [ ] versionDirArray = ReadOnlyUtils . getVersionDirs ( rootDir , version , version ) ; if ( versionDirArray . length == 0 ) { throw new PersistenceFailureException ( "getDisabledMarkerFile did not find the requested version directory" + " on disk. Version: " + version + ", rootDir: " + rootDir ) ; } File disabledMarkerFile = new File ( versionDirArray [ 0 ] , DISABLED_MARKER_NAME ) ; return disabledMarkerFile ; }
Gets the . disabled file for a given version of this store . That file may or may not exist .
132
22
147,894
public Double getAvgEventValue ( ) { resetIfNeeded ( ) ; synchronized ( this ) { long eventsLastInterval = numEventsLastInterval - numEventsLastLastInterval ; if ( eventsLastInterval > 0 ) return ( ( totalEventValueLastInterval - totalEventValueLastLastInterval ) * 1.0 ) / eventsLastInterval ; else return 0.0 ; } }
Returns the average event value in the current interval
86
9
147,895
@ SuppressWarnings ( "unchecked" ) public static void executeCommand ( String [ ] args ) throws IOException { OptionParser parser = getParser ( ) ; // declare parameters List < String > metaKeys = null ; String url = null ; // parse command-line input args = AdminToolUtils . copyArrayAddFirst ( args , "--" + OPT_HEAD_META_CHECK ) ; OptionSet options = parser . parse ( args ) ; if ( options . has ( AdminParserUtils . OPT_HELP ) ) { printHelp ( System . out ) ; return ; } // check required options and/or conflicting options AdminParserUtils . checkRequired ( options , OPT_HEAD_META_CHECK ) ; AdminParserUtils . checkRequired ( options , AdminParserUtils . OPT_URL ) ; // load parameters metaKeys = ( List < String > ) options . valuesOf ( OPT_HEAD_META_CHECK ) ; url = ( String ) options . valueOf ( AdminParserUtils . OPT_URL ) ; // execute command if ( metaKeys . size ( ) == 0 || ( metaKeys . size ( ) == 1 && metaKeys . get ( 0 ) . equals ( METAKEY_ALL ) ) ) { metaKeys = Lists . newArrayList ( ) ; metaKeys . add ( MetadataStore . CLUSTER_KEY ) ; metaKeys . add ( MetadataStore . STORES_KEY ) ; metaKeys . add ( MetadataStore . SERVER_STATE_KEY ) ; } AdminClient adminClient = AdminToolUtils . getAdminClient ( url ) ; doMetaCheck ( adminClient , metaKeys ) ; }
Parses command - line and checks if metadata is consistent across all nodes .
356
16
147,896
@ SuppressWarnings ( "unchecked" ) public static void executeCommand ( String [ ] args ) throws IOException { OptionParser parser = getParser ( ) ; // declare parameters String url = null ; List < Integer > nodeIds = null ; Boolean allNodes = true ; Boolean confirm = false ; // parse command-line input OptionSet options = parser . parse ( args ) ; if ( options . has ( AdminParserUtils . OPT_HELP ) ) { printHelp ( System . out ) ; return ; } // check required options and/or conflicting options AdminParserUtils . checkRequired ( options , AdminParserUtils . OPT_URL ) ; AdminParserUtils . checkOptional ( options , AdminParserUtils . OPT_NODE , AdminParserUtils . OPT_ALL_NODES ) ; // load parameters url = ( String ) options . valueOf ( AdminParserUtils . OPT_URL ) ; if ( options . has ( AdminParserUtils . OPT_NODE ) ) { nodeIds = ( List < Integer > ) options . valuesOf ( AdminParserUtils . OPT_NODE ) ; allNodes = false ; } if ( options . has ( AdminParserUtils . OPT_CONFIRM ) ) { confirm = true ; } // print summary System . out . println ( "Remove metadata related to rebalancing" ) ; System . out . println ( "Location:" ) ; System . out . println ( " bootstrap url = " + url ) ; if ( allNodes ) { System . out . println ( " node = all nodes" ) ; } else { System . out . println ( " node = " + Joiner . on ( ", " ) . join ( nodeIds ) ) ; } // execute command if ( ! AdminToolUtils . askConfirm ( confirm , "remove metadata related to rebalancing" ) ) { return ; } AdminClient adminClient = AdminToolUtils . getAdminClient ( url ) ; if ( allNodes ) { nodeIds = AdminToolUtils . getAllNodeIds ( adminClient ) ; } AdminToolUtils . assertServerNotInRebalancingState ( adminClient , nodeIds ) ; doMetaClearRebalance ( adminClient , nodeIds ) ; }
Parses command - line and removes metadata related to rebalancing .
488
15
147,897
public static void doMetaClearRebalance ( AdminClient adminClient , List < Integer > nodeIds ) { AdminToolUtils . assertServerNotInOfflineState ( adminClient , nodeIds ) ; System . out . println ( "Setting " + MetadataStore . SERVER_STATE_KEY + " to " + MetadataStore . VoldemortState . NORMAL_SERVER ) ; doMetaSet ( adminClient , nodeIds , MetadataStore . SERVER_STATE_KEY , MetadataStore . VoldemortState . NORMAL_SERVER . toString ( ) ) ; RebalancerState state = RebalancerState . create ( "[]" ) ; System . out . println ( "Cleaning up " + MetadataStore . REBALANCING_STEAL_INFO + " to " + state . toJsonString ( ) ) ; doMetaSet ( adminClient , nodeIds , MetadataStore . REBALANCING_STEAL_INFO , state . toJsonString ( ) ) ; System . out . println ( "Cleaning up " + MetadataStore . REBALANCING_SOURCE_CLUSTER_XML + " to empty string" ) ; doMetaSet ( adminClient , nodeIds , MetadataStore . REBALANCING_SOURCE_CLUSTER_XML , "" ) ; }
Removes metadata related to rebalancing .
290
9
147,898
@ SuppressWarnings ( "unchecked" ) public static void executeCommand ( String [ ] args ) throws IOException { OptionParser parser = getParser ( ) ; // declare parameters List < String > metaKeys = null ; String url = null ; String dir = null ; List < Integer > nodeIds = null ; Boolean allNodes = true ; Boolean verbose = false ; // parse command-line input args = AdminToolUtils . copyArrayAddFirst ( args , "--" + OPT_HEAD_META_GET ) ; OptionSet options = parser . parse ( args ) ; if ( options . has ( AdminParserUtils . OPT_HELP ) ) { printHelp ( System . out ) ; return ; } // check required options and/or conflicting options AdminParserUtils . checkRequired ( options , OPT_HEAD_META_GET ) ; AdminParserUtils . checkRequired ( options , AdminParserUtils . OPT_URL ) ; AdminParserUtils . checkOptional ( options , AdminParserUtils . OPT_NODE , AdminParserUtils . OPT_ALL_NODES ) ; // load parameters metaKeys = ( List < String > ) options . valuesOf ( OPT_HEAD_META_GET ) ; url = ( String ) options . valueOf ( AdminParserUtils . OPT_URL ) ; if ( options . has ( AdminParserUtils . OPT_DIR ) ) { dir = ( String ) options . valueOf ( AdminParserUtils . OPT_DIR ) ; } if ( options . has ( AdminParserUtils . OPT_NODE ) ) { nodeIds = ( List < Integer > ) options . valuesOf ( AdminParserUtils . OPT_NODE ) ; allNodes = false ; } if ( options . has ( OPT_VERBOSE ) ) { verbose = true ; } // execute command File directory = AdminToolUtils . createDir ( dir ) ; AdminClient adminClient = AdminToolUtils . getAdminClient ( url ) ; if ( allNodes ) { nodeIds = AdminToolUtils . getAllNodeIds ( adminClient ) ; } if ( metaKeys . size ( ) == 1 && metaKeys . get ( 0 ) . equals ( METAKEY_ALL ) ) { metaKeys = Lists . newArrayList ( ) ; for ( Object key : MetadataStore . METADATA_KEYS ) { metaKeys . add ( ( String ) key ) ; } } doMetaGet ( adminClient , nodeIds , metaKeys , directory , verbose ) ; }
Parses command - line and gets metadata .
547
10
147,899
@ SuppressWarnings ( "unchecked" ) public static void executeCommand ( String [ ] args ) throws IOException { OptionParser parser = getParser ( ) ; // declare parameters List < String > metaKeys = null ; String url = null ; List < Integer > nodeIds = null ; Boolean allNodes = true ; List < String > storeNames = null ; // parse command-line input args = AdminToolUtils . copyArrayAddFirst ( args , "--" + OPT_HEAD_META_GET_RO ) ; OptionSet options = parser . parse ( args ) ; if ( options . has ( AdminParserUtils . OPT_HELP ) ) { printHelp ( System . out ) ; return ; } // check required options and/or conflicting options AdminParserUtils . checkRequired ( options , OPT_HEAD_META_GET_RO ) ; AdminParserUtils . checkRequired ( options , AdminParserUtils . OPT_URL ) ; AdminParserUtils . checkOptional ( options , AdminParserUtils . OPT_NODE , AdminParserUtils . OPT_ALL_NODES ) ; AdminParserUtils . checkRequired ( options , AdminParserUtils . OPT_STORE ) ; // load parameters metaKeys = ( List < String > ) options . valuesOf ( OPT_HEAD_META_GET_RO ) ; url = ( String ) options . valueOf ( AdminParserUtils . OPT_URL ) ; if ( options . has ( AdminParserUtils . OPT_NODE ) ) { nodeIds = ( List < Integer > ) options . valuesOf ( AdminParserUtils . OPT_NODE ) ; allNodes = false ; } storeNames = ( List < String > ) options . valuesOf ( AdminParserUtils . OPT_STORE ) ; // execute command AdminClient adminClient = AdminToolUtils . getAdminClient ( url ) ; if ( allNodes ) { nodeIds = AdminToolUtils . getAllNodeIds ( adminClient ) ; } if ( metaKeys . size ( ) == 1 && metaKeys . get ( 0 ) . equals ( METAKEY_ALL ) ) { metaKeys = Lists . newArrayList ( ) ; metaKeys . add ( KEY_MAX_VERSION ) ; metaKeys . add ( KEY_CURRENT_VERSION ) ; metaKeys . add ( KEY_STORAGE_FORMAT ) ; } doMetaGetRO ( adminClient , nodeIds , storeNames , metaKeys ) ; }
Parses command - line and gets read - only metadata .
535
13