idx int64 0 41.2k | question stringlengths 83 4.15k | target stringlengths 5 715 |
|---|---|---|
24,400 | public static < K , V > QueuedKeyedResourcePool < K , V > create ( ResourceFactory < K , V > factory , ResourcePoolConfig config ) { return new QueuedKeyedResourcePool < K , V > ( factory , config ) ; } | Create a new queued pool with key type K request type R and value type V . |
24,401 | public static < K , V > QueuedKeyedResourcePool < K , V > create ( ResourceFactory < K , V > factory ) { return create ( factory , new ResourcePoolConfig ( ) ) ; } | Create a new queued pool using the defaults for key of type K request of type R and value of Type V . |
24,402 | public V internalNonBlockingGet ( K key ) throws Exception { Pool < V > resourcePool = getResourcePoolForKey ( key ) ; return attemptNonBlockingCheckout ( key , resourcePool ) ; } | Used only for unit testing . Please do not use this method in other ways . |
24,403 | private AsyncResourceRequest < V > getNextUnexpiredResourceRequest ( Queue < AsyncResourceRequest < V > > requestQueue ) { AsyncResourceRequest < V > resourceRequest = requestQueue . poll ( ) ; while ( resourceRequest != null ) { if ( resourceRequest . getDeadlineNs ( ) < System . nanoTime ( ) ) { resourceRequest . handleTimeout ( ) ; resourceRequest = requestQueue . poll ( ) ; } else { break ; } } return resourceRequest ; } | Pops resource requests off the queue until queue is empty or an unexpired resource request is found . Invokes . handleTimeout on all expired resource requests popped off the queue . |
24,404 | private boolean processQueue ( K key ) { Queue < AsyncResourceRequest < V > > requestQueue = getRequestQueueForKey ( key ) ; if ( requestQueue . isEmpty ( ) ) { return false ; } Pool < V > resourcePool = getResourcePoolForKey ( key ) ; V resource = null ; Exception ex = null ; try { resource = attemptNonBlockingCheckout ( key , resourcePool ) ; } catch ( Exception e ) { destroyResource ( key , resourcePool , resource ) ; ex = e ; resource = null ; } if ( resource == null && ex == null ) { return false ; } AsyncResourceRequest < V > resourceRequest = getNextUnexpiredResourceRequest ( requestQueue ) ; if ( resourceRequest == null ) { if ( resource != null ) { try { super . checkin ( key , resource ) ; } catch ( Exception e ) { logger . error ( "Exception checking in resource: " , e ) ; } } else { } return false ; } else { if ( resource != null ) { resourceRequest . useResource ( resource ) ; } else { resourceRequest . handleException ( ex ) ; } return true ; } } | Attempts to checkout a resource so that one queued request can be serviced . |
24,405 | public void checkin ( K key , V resource ) { super . checkin ( key , resource ) ; processQueueLoop ( key ) ; } | Check the given resource back into the pool |
24,406 | protected void destroyRequest ( AsyncResourceRequest < V > resourceRequest ) { if ( resourceRequest != null ) { try { Exception e = new UnreachableStoreException ( "Client request was terminated while waiting in the queue." ) ; resourceRequest . handleException ( e ) ; } catch ( Exception ex ) { logger . error ( "Exception while destroying resource request:" , ex ) ; } } } | A safe wrapper to destroy the given resource request . |
24,407 | private void destroyRequestQueue ( Queue < AsyncResourceRequest < V > > requestQueue ) { if ( requestQueue != null ) { AsyncResourceRequest < V > resourceRequest = requestQueue . poll ( ) ; while ( resourceRequest != null ) { destroyRequest ( resourceRequest ) ; resourceRequest = requestQueue . poll ( ) ; } } } | Destroys all resource requests in requestQueue . |
24,408 | public int getRegisteredResourceRequestCount ( K key ) { if ( requestQueueMap . containsKey ( key ) ) { Queue < AsyncResourceRequest < V > > requestQueue = getRequestQueueForExistingKey ( key ) ; if ( requestQueue != null ) { return requestQueue . size ( ) ; } } return 0 ; } | Count the number of queued resource requests for a specific pool . |
24,409 | public int getRegisteredResourceRequestCount ( ) { int count = 0 ; for ( Entry < K , Queue < AsyncResourceRequest < V > > > entry : this . requestQueueMap . entrySet ( ) ) { count += entry . getValue ( ) . size ( ) ; } return count ; } | Count the total number of queued resource requests for all queues . The result is approximate in the face of concurrency since individual queues can change size during the aggregate count . |
24,410 | protected void populateTasksByStealer ( List < StealerBasedRebalanceTask > sbTaskList ) { for ( StealerBasedRebalanceTask task : sbTaskList ) { if ( task . getStealInfos ( ) . size ( ) != 1 ) { throw new VoldemortException ( "StealerBasedRebalanceTasks should have a list of RebalancePartitionsInfo of length 1." ) ; } RebalanceTaskInfo stealInfo = task . getStealInfos ( ) . get ( 0 ) ; int stealerId = stealInfo . getStealerId ( ) ; if ( ! this . tasksByStealer . containsKey ( stealerId ) ) { this . tasksByStealer . put ( stealerId , new ArrayList < StealerBasedRebalanceTask > ( ) ) ; } this . tasksByStealer . get ( stealerId ) . add ( task ) ; } if ( tasksByStealer . isEmpty ( ) ) { return ; } for ( List < StealerBasedRebalanceTask > taskList : tasksByStealer . values ( ) ) { Collections . shuffle ( taskList ) ; } } | Go over the task list and create a map of stealerId - > Tasks |
24,411 | protected synchronized StealerBasedRebalanceTask scheduleNextTask ( boolean executeService ) { if ( doneSignal . getCount ( ) == 0 ) { logger . info ( "All tasks completion signaled... returning" ) ; return null ; } if ( this . numTasksExecuting >= maxParallelRebalancing ) { logger . info ( "Executing more tasks than [" + this . numTasksExecuting + "] the parallel allowed " + maxParallelRebalancing ) ; return null ; } List < Integer > stealerIds = new ArrayList < Integer > ( tasksByStealer . keySet ( ) ) ; Collections . shuffle ( stealerIds ) ; for ( int stealerId : stealerIds ) { if ( nodeIdsWithWork . contains ( stealerId ) ) { logger . info ( "Stealer " + stealerId + " is already working... continuing" ) ; continue ; } for ( StealerBasedRebalanceTask sbTask : tasksByStealer . get ( stealerId ) ) { int donorId = sbTask . getStealInfos ( ) . get ( 0 ) . getDonorId ( ) ; if ( nodeIdsWithWork . contains ( donorId ) ) { logger . info ( "Stealer " + stealerId + " Donor " + donorId + " is already working... continuing" ) ; continue ; } addNodesToWorkerList ( Arrays . asList ( stealerId , donorId ) ) ; numTasksExecuting ++ ; tasksByStealer . get ( stealerId ) . remove ( sbTask ) ; try { if ( executeService ) { logger . info ( "Stealer " + stealerId + " Donor " + donorId + " going to schedule work" ) ; service . execute ( sbTask ) ; } } catch ( RejectedExecutionException ree ) { logger . error ( "Stealer " + stealerId + "Rebalancing task rejected by executor service." , ree ) ; throw new VoldemortRebalancingException ( "Stealer " + stealerId + "Rebalancing task rejected by executor service." ) ; } return sbTask ; } } printRemainingTasks ( stealerIds ) ; return null ; } | Schedule at most one task . |
24,412 | public synchronized void addNodesToWorkerList ( List < Integer > nodeIds ) { nodeIdsWithWork . addAll ( nodeIds ) ; logger . info ( "Node IDs with work: " + nodeIdsWithWork + " Newly added nodes " + nodeIds ) ; } | Add nodes to the workers list |
24,413 | public synchronized void doneTask ( int stealerId , int donorId ) { removeNodesFromWorkerList ( Arrays . asList ( stealerId , donorId ) ) ; numTasksExecuting -- ; doneSignal . countDown ( ) ; scheduleMoreTasks ( ) ; } | Method must be invoked upon completion of a rebalancing task . It is the task s responsibility to do so . |
24,414 | private List < Long > collectLongMetric ( String metricGetterName ) { List < Long > vals = new ArrayList < Long > ( ) ; for ( BdbEnvironmentStats envStats : environmentStatsTracked ) { vals . add ( ( Long ) ReflectUtils . callMethod ( envStats , BdbEnvironmentStats . class , metricGetterName , new Class < ? > [ 0 ] , new Object [ 0 ] ) ) ; } return vals ; } | Calls the provided metric getter on all the tracked environments and obtains their values |
24,415 | public static Iterable < String > toHexStrings ( Iterable < ByteArray > arrays ) { ArrayList < String > ret = new ArrayList < String > ( ) ; for ( ByteArray array : arrays ) ret . add ( ByteUtils . toHexString ( array . get ( ) ) ) ; return ret ; } | Translate the each ByteArray in an iterable into a hexadecimal string |
24,416 | public void sendResponse ( StoreStats performanceStats , boolean isFromLocalZone , long startTimeInMs ) throws Exception { MimeMessage message = new MimeMessage ( Session . getDefaultInstance ( new Properties ( ) ) ) ; MimeMultipart multiPart = new MimeMultipart ( ) ; ByteArrayOutputStream outputStream = new ByteArrayOutputStream ( ) ; String base64Key = RestUtils . encodeVoldemortKey ( key . get ( ) ) ; String contentLocationKey = "/" + this . storeName + "/" + base64Key ; for ( Versioned < byte [ ] > versionedValue : versionedValues ) { byte [ ] responseValue = versionedValue . getValue ( ) ; VectorClock vectorClock = ( VectorClock ) versionedValue . getVersion ( ) ; String eTag = RestUtils . getSerializedVectorClock ( vectorClock ) ; numVectorClockEntries += vectorClock . getVersionMap ( ) . size ( ) ; MimeBodyPart body = new MimeBodyPart ( ) ; try { body . addHeader ( CONTENT_TYPE , "application/octet-stream" ) ; body . addHeader ( CONTENT_TRANSFER_ENCODING , "binary" ) ; body . addHeader ( RestMessageHeaders . X_VOLD_VECTOR_CLOCK , eTag ) ; body . setContent ( responseValue , "application/octet-stream" ) ; body . addHeader ( RestMessageHeaders . CONTENT_LENGTH , Integer . toString ( responseValue . length ) ) ; multiPart . addBodyPart ( body ) ; } catch ( MessagingException me ) { logger . error ( "Exception while constructing body part" , me ) ; outputStream . close ( ) ; throw me ; } } message . setContent ( multiPart ) ; message . saveChanges ( ) ; try { multiPart . writeTo ( outputStream ) ; } catch ( Exception e ) { logger . error ( "Exception while writing multipart to output stream" , e ) ; outputStream . close ( ) ; throw e ; } ChannelBuffer responseContent = ChannelBuffers . dynamicBuffer ( ) ; responseContent . writeBytes ( outputStream . toByteArray ( ) ) ; HttpResponse response = new DefaultHttpResponse ( HTTP_1_1 , OK ) ; response . setHeader ( CONTENT_TYPE , "multipart/binary" ) ; response . setHeader ( CONTENT_TRANSFER_ENCODING , "binary" ) ; response . setHeader ( CONTENT_LOCATION , contentLocationKey ) ; response . setContent ( responseContent ) ; response . setHeader ( CONTENT_LENGTH , response . getContent ( ) . readableBytes ( ) ) ; if ( logger . isDebugEnabled ( ) ) { String keyStr = RestUtils . getKeyHexString ( this . key ) ; debugLog ( "GET" , this . storeName , keyStr , startTimeInMs , System . currentTimeMillis ( ) , numVectorClockEntries ) ; } this . messageEvent . getChannel ( ) . write ( response ) ; if ( performanceStats != null && isFromLocalZone ) { recordStats ( performanceStats , startTimeInMs , Tracked . GET ) ; } outputStream . close ( ) ; } | Sends a multipart response . Each body part represents a versioned value of the given key . |
24,417 | public String getPublicConfigValue ( String key ) throws ConfigurationException { if ( ! allProps . containsKey ( key ) ) { throw new UndefinedPropertyException ( "The requested config key does not exist." ) ; } if ( restrictedConfigs . contains ( key ) ) { throw new ConfigurationException ( "The requested config key is not publicly available!" ) ; } return allProps . get ( key ) ; } | This is a generic function for retrieving any config value . The returned value is the one the server is operating with no matter whether it comes from defaults or from the user - supplied configuration . |
24,418 | private void checkRateLimit ( String quotaKey , Tracked trackedOp ) { String quotaValue = null ; try { if ( ! metadataStore . getQuotaEnforcingEnabledUnlocked ( ) ) { return ; } quotaValue = quotaStore . cacheGet ( quotaKey ) ; if ( quotaValue == null ) { return ; } float currentRate = getThroughput ( trackedOp ) ; float allowedRate = Float . parseFloat ( quotaValue ) ; quotaStats . reportQuotaUsed ( trackedOp , Utils . safeGetPercentage ( currentRate , allowedRate ) ) ; if ( currentRate > allowedRate ) { quotaStats . reportRateLimitedOp ( trackedOp ) ; throw new QuotaExceededException ( "Exceeded rate limit for " + quotaKey + ". Maximum allowed : " + allowedRate + " Current: " + currentRate ) ; } } catch ( NumberFormatException nfe ) { logger . debug ( "Invalid formatting of quota value for key " + quotaKey + " : " + quotaValue ) ; } } | Ensure the current throughput levels for the tracked operation does not exceed set quota limits . Throws an exception if exceeded quota . |
24,419 | public synchronized void submitOperation ( int requestId , AsyncOperation operation ) { if ( this . operations . containsKey ( requestId ) ) throw new VoldemortException ( "Request " + requestId + " already submitted to the system" ) ; this . operations . put ( requestId , operation ) ; scheduler . scheduleNow ( operation ) ; logger . debug ( "Handling async operation " + requestId ) ; } | Submit a operations . Throw a run time exception if the operations is already submitted |
24,420 | public synchronized boolean isComplete ( int requestId , boolean remove ) { if ( ! operations . containsKey ( requestId ) ) throw new VoldemortException ( "No operation with id " + requestId + " found" ) ; if ( operations . get ( requestId ) . getStatus ( ) . isComplete ( ) ) { if ( logger . isDebugEnabled ( ) ) logger . debug ( "Operation complete " + requestId ) ; if ( remove ) operations . remove ( requestId ) ; return true ; } return false ; } | Check if the an operation is done or not . |
24,421 | @ JmxOperation ( description = "Retrieve operation status" ) public String getStatus ( int id ) { try { return getOperationStatus ( id ) . toString ( ) ; } catch ( VoldemortException e ) { return "No operation with id " + id + " found" ; } } | Wrap getOperationStatus to avoid throwing exception over JMX |
24,422 | public List < Integer > getAsyncOperationList ( boolean showCompleted ) { Set < Integer > keySet = ImmutableSet . copyOf ( operations . keySet ( ) ) ; if ( showCompleted ) return new ArrayList < Integer > ( keySet ) ; List < Integer > keyList = new ArrayList < Integer > ( ) ; for ( int key : keySet ) { AsyncOperation operation = operations . get ( key ) ; if ( operation != null && ! operation . getStatus ( ) . isComplete ( ) ) keyList . add ( key ) ; } return keyList ; } | Get list of asynchronous operations on this node . By default only the pending operations are returned . |
24,423 | public String stopAsyncOperation ( int requestId ) { try { stopOperation ( requestId ) ; } catch ( VoldemortException e ) { return e . getMessage ( ) ; } return "Stopping operation " + requestId ; } | Wrapper to avoid throwing an exception over JMX |
24,424 | public void updateStoreDefinition ( StoreDefinition storeDef ) { this . storeDef = storeDef ; if ( storeDef . hasRetentionPeriod ( ) ) this . retentionTimeMs = storeDef . getRetentionDays ( ) * Time . MS_PER_DAY ; } | Updates the store definition object and the retention time based on the updated store definition |
24,425 | private List < Versioned < byte [ ] > > filterExpiredEntries ( ByteArray key , List < Versioned < byte [ ] > > vals ) { Iterator < Versioned < byte [ ] > > valsIterator = vals . iterator ( ) ; while ( valsIterator . hasNext ( ) ) { Versioned < byte [ ] > val = valsIterator . next ( ) ; VectorClock clock = ( VectorClock ) val . getVersion ( ) ; if ( clock . getTimestamp ( ) < ( time . getMilliseconds ( ) - this . retentionTimeMs ) ) { valsIterator . remove ( ) ; if ( deleteExpiredEntries ) { getInnerStore ( ) . delete ( key , clock ) ; } } } return vals ; } | Performs the filtering of the expired entries based on retention time . Optionally deletes them also |
24,426 | private synchronized void flushData ( ) { BufferedWriter writer = null ; try { writer = new BufferedWriter ( new FileWriter ( new File ( this . inputPath ) ) ) ; for ( String key : this . metadataMap . keySet ( ) ) { writer . write ( NEW_PROPERTY_SEPARATOR + key . toString ( ) + "]" + NEW_LINE ) ; writer . write ( this . metadataMap . get ( key ) . toString ( ) ) ; writer . write ( "" + NEW_LINE + "" + NEW_LINE ) ; } writer . flush ( ) ; } catch ( IOException e ) { logger . error ( "IO exception while flushing data to file backed storage: " + e . getMessage ( ) ) ; } try { if ( writer != null ) writer . close ( ) ; } catch ( Exception e ) { logger . error ( "Error while flushing data to file backed storage: " + e . getMessage ( ) ) ; } } | Flush the in - memory data to the file |
24,427 | public static String getSerializedVectorClock ( VectorClock vc ) { VectorClockWrapper vcWrapper = new VectorClockWrapper ( vc ) ; String serializedVC = "" ; try { serializedVC = mapper . writeValueAsString ( vcWrapper ) ; } catch ( Exception e ) { e . printStackTrace ( ) ; } return serializedVC ; } | Function to serialize the given Vector clock into a string . If something goes wrong it returns an empty string . |
24,428 | public static String getSerializedVectorClocks ( List < VectorClock > vectorClocks ) { List < VectorClockWrapper > vectorClockWrappers = new ArrayList < VectorClockWrapper > ( ) ; for ( VectorClock vc : vectorClocks ) { vectorClockWrappers . add ( new VectorClockWrapper ( vc ) ) ; } String serializedVC = "" ; try { serializedVC = mapper . writeValueAsString ( vectorClockWrappers ) ; } catch ( Exception e ) { e . printStackTrace ( ) ; } return serializedVC ; } | Function to serialize the given list of Vector clocks into a string . If something goes wrong it returns an empty string . |
24,429 | public static String constructSerializerInfoXml ( StoreDefinition storeDefinition ) { Element store = new Element ( StoreDefinitionsMapper . STORE_ELMT ) ; store . addContent ( new Element ( StoreDefinitionsMapper . STORE_NAME_ELMT ) . setText ( storeDefinition . getName ( ) ) ) ; Element keySerializer = new Element ( StoreDefinitionsMapper . STORE_KEY_SERIALIZER_ELMT ) ; StoreDefinitionsMapper . addSerializer ( keySerializer , storeDefinition . getKeySerializer ( ) ) ; store . addContent ( keySerializer ) ; Element valueSerializer = new Element ( StoreDefinitionsMapper . STORE_VALUE_SERIALIZER_ELMT ) ; StoreDefinitionsMapper . addSerializer ( valueSerializer , storeDefinition . getValueSerializer ( ) ) ; store . addContent ( valueSerializer ) ; XMLOutputter serializer = new XMLOutputter ( Format . getPrettyFormat ( ) ) ; return serializer . outputString ( store ) ; } | Given a storedefinition constructs the xml string to be sent out in response to a schemata fetch request |
24,430 | public void updateMetadataVersions ( ) { Properties versionProps = MetadataVersionStoreUtils . getProperties ( this . systemStoreRepository . getMetadataVersionStore ( ) ) ; Long newVersion = fetchNewVersion ( SystemStoreConstants . CLUSTER_VERSION_KEY , null , versionProps ) ; if ( newVersion != null ) { this . currentClusterVersion = newVersion ; } } | Fetch the latest versions for cluster metadata |
24,431 | public static void validateClusterStores ( final Cluster cluster , final List < StoreDefinition > storeDefs ) { for ( StoreDefinition storeDefinition : storeDefs ) { new StoreRoutingPlan ( cluster , storeDefinition ) ; } return ; } | Verify store definitions are congruent with cluster definition . |
24,432 | public static void validateCurrentFinalCluster ( final Cluster currentCluster , final Cluster finalCluster ) { validateClusterPartitionCounts ( currentCluster , finalCluster ) ; validateClusterNodeState ( currentCluster , finalCluster ) ; return ; } | A final cluster ought to be a super set of current cluster . I . e . existing node IDs ought to map to same server but partition layout can have changed and there may exist new nodes . |
24,433 | public static void validateInterimFinalCluster ( final Cluster interimCluster , final Cluster finalCluster ) { validateClusterPartitionCounts ( interimCluster , finalCluster ) ; validateClusterZonesSame ( interimCluster , finalCluster ) ; validateClusterNodeCounts ( interimCluster , finalCluster ) ; validateClusterNodeState ( interimCluster , finalCluster ) ; return ; } | Interim and final clusters ought to have same partition counts same zones and same node state . Partitions per node may of course differ . |
24,434 | public static void validateClusterPartitionCounts ( final Cluster lhs , final Cluster rhs ) { if ( lhs . getNumberOfPartitions ( ) != rhs . getNumberOfPartitions ( ) ) throw new VoldemortException ( "Total number of partitions should be equal [ lhs cluster (" + lhs . getNumberOfPartitions ( ) + ") not equal to rhs cluster (" + rhs . getNumberOfPartitions ( ) + ") ]" ) ; } | Confirms that both clusters have the same number of total partitions . |
24,435 | public static void validateClusterPartitionState ( final Cluster subsetCluster , final Cluster supersetCluster ) { if ( ! supersetCluster . getNodeIds ( ) . containsAll ( subsetCluster . getNodeIds ( ) ) ) { throw new VoldemortException ( "Superset cluster does not contain all nodes from subset cluster[ subset cluster node ids (" + subsetCluster . getNodeIds ( ) + ") are not a subset of superset cluster node ids (" + supersetCluster . getNodeIds ( ) + ") ]" ) ; } for ( int nodeId : subsetCluster . getNodeIds ( ) ) { Node supersetNode = supersetCluster . getNodeById ( nodeId ) ; Node subsetNode = subsetCluster . getNodeById ( nodeId ) ; if ( ! supersetNode . getPartitionIds ( ) . equals ( subsetNode . getPartitionIds ( ) ) ) { throw new VoldemortRebalancingException ( "Partition IDs do not match between clusters for nodes with id " + nodeId + " : subset cluster has " + subsetNode . getPartitionIds ( ) + " and superset cluster has " + supersetNode . getPartitionIds ( ) ) ; } } Set < Integer > nodeIds = supersetCluster . getNodeIds ( ) ; nodeIds . removeAll ( subsetCluster . getNodeIds ( ) ) ; for ( int nodeId : nodeIds ) { Node supersetNode = supersetCluster . getNodeById ( nodeId ) ; if ( ! supersetNode . getPartitionIds ( ) . isEmpty ( ) ) { throw new VoldemortRebalancingException ( "New node " + nodeId + " in superset cluster already has partitions: " + supersetNode . getPartitionIds ( ) ) ; } } } | Confirm that all nodes shared between clusters host exact same partition IDs and that nodes only in the super set cluster have no partition IDs . |
24,436 | public static void validateClusterZonesSame ( final Cluster lhs , final Cluster rhs ) { Set < Zone > lhsSet = new HashSet < Zone > ( lhs . getZones ( ) ) ; Set < Zone > rhsSet = new HashSet < Zone > ( rhs . getZones ( ) ) ; if ( ! lhsSet . equals ( rhsSet ) ) throw new VoldemortException ( "Zones are not the same [ lhs cluster zones (" + lhs . getZones ( ) + ") not equal to rhs cluster zones (" + rhs . getZones ( ) + ") ]" ) ; } | Confirms that both clusters have the same set of zones defined . |
24,437 | public static void validateClusterNodeCounts ( final Cluster lhs , final Cluster rhs ) { if ( ! lhs . getNodeIds ( ) . equals ( rhs . getNodeIds ( ) ) ) { throw new VoldemortException ( "Node ids are not the same [ lhs cluster node ids (" + lhs . getNodeIds ( ) + ") not equal to rhs cluster node ids (" + rhs . getNodeIds ( ) + ") ]" ) ; } } | Confirms that both clusters have the same number of nodes by comparing set of node Ids between clusters . |
24,438 | public static Cluster vacateZone ( Cluster currentCluster , int dropZoneId ) { Cluster returnCluster = Cluster . cloneCluster ( currentCluster ) ; for ( Integer nodeId : currentCluster . getNodeIdsInZone ( dropZoneId ) ) { for ( Integer partitionId : currentCluster . getNodeById ( nodeId ) . getPartitionIds ( ) ) { int finalZoneId = - 1 ; int finalNodeId = - 1 ; int adjacentPartitionId = partitionId ; do { adjacentPartitionId = ( adjacentPartitionId + 1 ) % currentCluster . getNumberOfPartitions ( ) ; finalNodeId = currentCluster . getNodeForPartitionId ( adjacentPartitionId ) . getId ( ) ; finalZoneId = currentCluster . getZoneForPartitionId ( adjacentPartitionId ) . getId ( ) ; if ( adjacentPartitionId == partitionId ) { logger . error ( "PartitionId " + partitionId + "stays unchanged \n" ) ; } else { logger . info ( "PartitionId " + partitionId + " goes together with partition " + adjacentPartitionId + " on node " + finalNodeId + " in zone " + finalZoneId ) ; returnCluster = UpdateClusterUtils . createUpdatedCluster ( returnCluster , finalNodeId , Lists . newArrayList ( partitionId ) ) ; } } while ( finalZoneId == dropZoneId ) ; } } return returnCluster ; } | Given the current cluster and a zone id that needs to be dropped this method will remove all partitions from the zone that is being dropped and move it to the existing zones . The partitions are moved intelligently so as not to avoid any data movement in the existing zones . |
24,439 | public static Cluster dropZone ( Cluster intermediateCluster , int dropZoneId ) { Set < Node > survivingNodes = new HashSet < Node > ( ) ; for ( int nodeId : intermediateCluster . getNodeIds ( ) ) { if ( intermediateCluster . getNodeById ( nodeId ) . getZoneId ( ) != dropZoneId ) { survivingNodes . add ( intermediateCluster . getNodeById ( nodeId ) ) ; } } Set < Zone > zones = new HashSet < Zone > ( ) ; for ( int zoneId : intermediateCluster . getZoneIds ( ) ) { if ( zoneId == dropZoneId ) { continue ; } List < Integer > proximityList = intermediateCluster . getZoneById ( zoneId ) . getProximityList ( ) ; proximityList . remove ( new Integer ( dropZoneId ) ) ; zones . add ( new Zone ( zoneId , proximityList ) ) ; } return new Cluster ( intermediateCluster . getName ( ) , Utils . asSortedList ( survivingNodes ) , Utils . asSortedList ( zones ) ) ; } | Given a interim cluster with a previously vacated zone constructs a new cluster object with the drop zone completely removed |
24,440 | public static List < Integer > getStolenPrimaryPartitions ( final Cluster currentCluster , final Cluster finalCluster , final int stealNodeId ) { List < Integer > finalList = new ArrayList < Integer > ( finalCluster . getNodeById ( stealNodeId ) . getPartitionIds ( ) ) ; List < Integer > currentList = new ArrayList < Integer > ( ) ; if ( currentCluster . hasNodeWithId ( stealNodeId ) ) { currentList = currentCluster . getNodeById ( stealNodeId ) . getPartitionIds ( ) ; } else { if ( logger . isDebugEnabled ( ) ) { logger . debug ( "Current cluster does not contain stealer node (cluster : [[[" + currentCluster + "]]], node id " + stealNodeId + ")" ) ; } } finalList . removeAll ( currentList ) ; return finalList ; } | For a particular stealer node find all the primary partitions tuples it will steal . |
24,441 | public static List < StoreDefinition > validateRebalanceStore ( List < StoreDefinition > storeDefList ) { List < StoreDefinition > returnList = new ArrayList < StoreDefinition > ( storeDefList . size ( ) ) ; for ( StoreDefinition def : storeDefList ) { if ( ! def . isView ( ) && ! canRebalanceList . contains ( def . getType ( ) ) ) { throw new VoldemortException ( "Rebalance does not support rebalancing of stores of type " + def . getType ( ) + " - " + def . getName ( ) ) ; } else if ( ! def . isView ( ) ) { returnList . add ( def ) ; } else { logger . debug ( "Ignoring view " + def . getName ( ) + " for rebalancing" ) ; } } return returnList ; } | Given a list of store definitions makes sure that rebalance supports all of them . If not it throws an error . |
24,442 | public static void dumpClusters ( Cluster currentCluster , Cluster finalCluster , String outputDirName , String filePrefix ) { dumpClusterToFile ( outputDirName , filePrefix + currentClusterFileName , currentCluster ) ; dumpClusterToFile ( outputDirName , filePrefix + finalClusterFileName , finalCluster ) ; } | Given the initial and final cluster dumps it into the output directory |
24,443 | public static void dumpClusters ( Cluster currentCluster , Cluster finalCluster , String outputDirName ) { dumpClusters ( currentCluster , finalCluster , outputDirName , "" ) ; } | Given the current and final cluster dumps it into the output directory |
24,444 | public static void dumpClusterToFile ( String outputDirName , String fileName , Cluster cluster ) { if ( outputDirName != null ) { File outputDir = new File ( outputDirName ) ; if ( ! outputDir . exists ( ) ) { Utils . mkdirs ( outputDir ) ; } try { FileUtils . writeStringToFile ( new File ( outputDirName , fileName ) , new ClusterMapper ( ) . writeCluster ( cluster ) ) ; } catch ( IOException e ) { logger . error ( "IOException during dumpClusterToFile: " + e ) ; } } } | Prints a cluster xml to a file . |
24,445 | public static void dumpStoreDefsToFile ( String outputDirName , String fileName , List < StoreDefinition > storeDefs ) { if ( outputDirName != null ) { File outputDir = new File ( outputDirName ) ; if ( ! outputDir . exists ( ) ) { Utils . mkdirs ( outputDir ) ; } try { FileUtils . writeStringToFile ( new File ( outputDirName , fileName ) , new StoreDefinitionsMapper ( ) . writeStoreList ( storeDefs ) ) ; } catch ( IOException e ) { logger . error ( "IOException during dumpStoreDefsToFile: " + e ) ; } } } | Prints a stores xml to a file . |
24,446 | public static void dumpAnalysisToFile ( String outputDirName , String baseFileName , PartitionBalance partitionBalance ) { if ( outputDirName != null ) { File outputDir = new File ( outputDirName ) ; if ( ! outputDir . exists ( ) ) { Utils . mkdirs ( outputDir ) ; } try { FileUtils . writeStringToFile ( new File ( outputDirName , baseFileName + ".analysis" ) , partitionBalance . toString ( ) ) ; } catch ( IOException e ) { logger . error ( "IOException during dumpAnalysisToFile: " + e ) ; } } } | Prints a balance analysis to a file . |
24,447 | public static void dumpPlanToFile ( String outputDirName , RebalancePlan plan ) { if ( outputDirName != null ) { File outputDir = new File ( outputDirName ) ; if ( ! outputDir . exists ( ) ) { Utils . mkdirs ( outputDir ) ; } try { FileUtils . writeStringToFile ( new File ( outputDirName , "plan.out" ) , plan . toString ( ) ) ; } catch ( IOException e ) { logger . error ( "IOException during dumpPlanToFile: " + e ) ; } } } | Prints the plan to a file . |
24,448 | public static List < RebalanceTaskInfo > filterTaskPlanWithStores ( List < RebalanceTaskInfo > existingPlanList , List < StoreDefinition > storeDefs ) { List < RebalanceTaskInfo > plans = Lists . newArrayList ( ) ; List < String > storeNames = StoreDefinitionUtils . getStoreNames ( storeDefs ) ; for ( RebalanceTaskInfo existingPlan : existingPlanList ) { RebalanceTaskInfo info = RebalanceTaskInfo . create ( existingPlan . toJsonString ( ) ) ; HashMap < String , List < Integer > > storeToPartitions = info . getStoreToPartitionIds ( ) ; HashMap < String , List < Integer > > newStoreToPartitions = Maps . newHashMap ( ) ; for ( String storeName : storeNames ) { if ( storeToPartitions . containsKey ( storeName ) ) newStoreToPartitions . put ( storeName , storeToPartitions . get ( storeName ) ) ; } info . setStoreToPartitionList ( newStoreToPartitions ) ; plans . add ( info ) ; } return plans ; } | Given a list of partition plans and a set of stores copies the store names to every individual plan and creates a new list |
24,449 | public static void executorShutDown ( ExecutorService executorService , long timeOutSec ) { try { executorService . shutdown ( ) ; executorService . awaitTermination ( timeOutSec , TimeUnit . SECONDS ) ; } catch ( Exception e ) { logger . warn ( "Error while stoping executor service." , e ) ; } } | Wait to shutdown service |
24,450 | public List < T > resolveConflicts ( List < T > values ) { if ( values . size ( ) > 1 ) return values ; else return Collections . singletonList ( values . get ( 0 ) ) ; } | Arbitrarily resolve the inconsistency by choosing the first object if there is one . |
24,451 | public static < K , V , T > List < Versioned < V > > get ( Store < K , V , T > storageEngine , K key , T transform ) { Map < K , List < Versioned < V > > > result = storageEngine . getAll ( Collections . singleton ( key ) , Collections . singletonMap ( key , transform ) ) ; if ( result . size ( ) > 0 ) return result . get ( key ) ; else return Collections . emptyList ( ) ; } | Implements get by delegating to getAll . |
24,452 | public static < K , V , T > Map < K , List < Versioned < V > > > getAll ( Store < K , V , T > storageEngine , Iterable < K > keys , Map < K , T > transforms ) { Map < K , List < Versioned < V > > > result = newEmptyHashMap ( keys ) ; for ( K key : keys ) { List < Versioned < V > > value = storageEngine . get ( key , transforms != null ? transforms . get ( key ) : null ) ; if ( ! value . isEmpty ( ) ) result . put ( key , value ) ; } return result ; } | Implements getAll by delegating to get . |
24,453 | public static < K , V > HashMap < K , V > newEmptyHashMap ( Iterable < ? > iterable ) { if ( iterable instanceof Collection < ? > ) return Maps . newHashMapWithExpectedSize ( ( ( Collection < ? > ) iterable ) . size ( ) ) ; return Maps . newHashMap ( ) ; } | Returns an empty map with expected size matching the iterable size if it s of type Collection . Otherwise an empty map with the default size is returned . |
24,454 | public static void assertValidMetadata ( ByteArray key , RoutingStrategy routingStrategy , Node currentNode ) { List < Node > nodes = routingStrategy . routeRequest ( key . get ( ) ) ; for ( Node node : nodes ) { if ( node . getId ( ) == currentNode . getId ( ) ) { return ; } } throw new InvalidMetadataException ( "Client accessing key belonging to partitions " + routingStrategy . getPartitionList ( key . get ( ) ) + " not present at " + currentNode ) ; } | Check if the current node is part of routing request based on cluster . xml or throw an exception . |
24,455 | public static void assertValidNode ( MetadataStore metadataStore , Integer nodeId ) { if ( ! metadataStore . getCluster ( ) . hasNodeWithId ( nodeId ) ) { throw new InvalidMetadataException ( "NodeId " + nodeId + " is not or no longer in this cluster" ) ; } } | Check if the the nodeId is present in the cluster managed by the metadata store or throw an exception . |
24,456 | @ SuppressWarnings ( "unchecked" ) public static < T > Serializer < T > unsafeGetSerializer ( SerializerFactory serializerFactory , SerializerDefinition serializerDefinition ) { return ( Serializer < T > ) serializerFactory . getSerializer ( serializerDefinition ) ; } | This is a temporary measure until we have a type - safe solution for retrieving serializers from a SerializerFactory . It avoids warnings all over the codebase while making it easy to verify who calls it . |
24,457 | public static StoreDefinition getStoreDef ( List < StoreDefinition > list , String name ) { for ( StoreDefinition def : list ) if ( def . getName ( ) . equals ( name ) ) return def ; return null ; } | Get a store definition from the given list of store definitions |
24,458 | public static List < String > getStoreNames ( List < StoreDefinition > list , boolean ignoreViews ) { List < String > storeNameSet = new ArrayList < String > ( ) ; for ( StoreDefinition def : list ) if ( ! def . isView ( ) || ! ignoreViews ) storeNameSet . add ( def . getName ( ) ) ; return storeNameSet ; } | Get the list of store names from a list of store definitions |
24,459 | private void plan ( ) { final TreeMultimap < Integer , Integer > stealerToStolenPrimaryPartitions = TreeMultimap . create ( ) ; if ( outputDir != null ) RebalanceUtils . dumpClusters ( currentCluster , finalCluster , outputDir ) ; for ( Node stealerNode : finalCluster . getNodes ( ) ) { List < Integer > stolenPrimaryPartitions = RebalanceUtils . getStolenPrimaryPartitions ( currentCluster , finalCluster , stealerNode . getId ( ) ) ; if ( stolenPrimaryPartitions . size ( ) > 0 ) { numPrimaryPartitionMoves += stolenPrimaryPartitions . size ( ) ; stealerToStolenPrimaryPartitions . putAll ( stealerNode . getId ( ) , stolenPrimaryPartitions ) ; } } int batches = 0 ; Cluster batchCurrentCluster = Cluster . cloneCluster ( currentCluster ) ; List < StoreDefinition > batchCurrentStoreDefs = this . currentStoreDefs ; List < StoreDefinition > batchFinalStoreDefs = this . finalStoreDefs ; Cluster batchFinalCluster = RebalanceUtils . getInterimCluster ( this . currentCluster , this . finalCluster ) ; while ( ! stealerToStolenPrimaryPartitions . isEmpty ( ) ) { int partitions = 0 ; List < Entry < Integer , Integer > > partitionsMoved = Lists . newArrayList ( ) ; for ( Entry < Integer , Integer > stealerToPartition : stealerToStolenPrimaryPartitions . entries ( ) ) { partitionsMoved . add ( stealerToPartition ) ; batchFinalCluster = UpdateClusterUtils . createUpdatedCluster ( batchFinalCluster , stealerToPartition . getKey ( ) , Lists . newArrayList ( stealerToPartition . getValue ( ) ) ) ; partitions ++ ; if ( partitions == batchSize ) break ; } for ( Iterator < Entry < Integer , Integer > > partitionMoved = partitionsMoved . iterator ( ) ; partitionMoved . hasNext ( ) ; ) { Entry < Integer , Integer > entry = partitionMoved . next ( ) ; stealerToStolenPrimaryPartitions . remove ( entry . getKey ( ) , entry . getValue ( ) ) ; } if ( outputDir != null ) RebalanceUtils . dumpClusters ( batchCurrentCluster , batchFinalCluster , outputDir , "batch-" + Integer . toString ( batches ) + "." ) ; final RebalanceBatchPlan RebalanceBatchPlan = new RebalanceBatchPlan ( batchCurrentCluster , batchCurrentStoreDefs , batchFinalCluster , batchFinalStoreDefs ) ; batchPlans . add ( RebalanceBatchPlan ) ; numXZonePartitionStoreMoves += RebalanceBatchPlan . getCrossZonePartitionStoreMoves ( ) ; numPartitionStoreMoves += RebalanceBatchPlan . getPartitionStoreMoves ( ) ; nodeMoveMap . add ( RebalanceBatchPlan . getNodeMoveMap ( ) ) ; zoneMoveMap . add ( RebalanceBatchPlan . getZoneMoveMap ( ) ) ; batches ++ ; batchCurrentCluster = Cluster . cloneCluster ( batchFinalCluster ) ; batchCurrentStoreDefs = batchFinalStoreDefs ; } logger . info ( this ) ; } | Create a plan . The plan consists of batches . Each batch involves the movement of no more than batchSize primary partitions . The movement of a single primary partition may require migration of other n - ary replicas and potentially deletions . Migrating a primary or n - ary partition requires migrating one partition - store for every store hosted at that partition . |
24,460 | private String storageOverhead ( Map < Integer , Integer > finalNodeToOverhead ) { double maxOverhead = Double . MIN_VALUE ; PartitionBalance pb = new PartitionBalance ( currentCluster , currentStoreDefs ) ; StringBuilder sb = new StringBuilder ( ) ; sb . append ( "Per-node store-overhead:" ) . append ( Utils . NEWLINE ) ; DecimalFormat doubleDf = new DecimalFormat ( "####.##" ) ; for ( int nodeId : finalCluster . getNodeIds ( ) ) { Node node = finalCluster . getNodeById ( nodeId ) ; String nodeTag = "Node " + String . format ( "%4d" , nodeId ) + " (" + node . getHost ( ) + ")" ; int initialLoad = 0 ; if ( currentCluster . getNodeIds ( ) . contains ( nodeId ) ) { initialLoad = pb . getNaryPartitionCount ( nodeId ) ; } int toLoad = 0 ; if ( finalNodeToOverhead . containsKey ( nodeId ) ) { toLoad = finalNodeToOverhead . get ( nodeId ) ; } double overhead = ( initialLoad + toLoad ) / ( double ) initialLoad ; if ( initialLoad > 0 && maxOverhead < overhead ) { maxOverhead = overhead ; } String loadTag = String . format ( "%6d" , initialLoad ) + " + " + String . format ( "%6d" , toLoad ) + " -> " + String . format ( "%6d" , initialLoad + toLoad ) + " (" + doubleDf . format ( overhead ) + " X)" ; sb . append ( nodeTag + " : " + loadTag ) . append ( Utils . NEWLINE ) ; } sb . append ( Utils . NEWLINE ) . append ( "**** Max per-node storage overhead: " + doubleDf . format ( maxOverhead ) + " X." ) . append ( Utils . NEWLINE ) ; return ( sb . toString ( ) ) ; } | Determines storage overhead and returns pretty printed summary . |
24,461 | public static Boolean askConfirm ( Boolean confirm , String opDesc ) throws IOException { if ( confirm ) { System . out . println ( "Confirmed " + opDesc + " in command-line." ) ; return true ; } else { System . out . println ( "Are you sure you want to " + opDesc + "? (yes/no)" ) ; BufferedReader buffer = new BufferedReader ( new InputStreamReader ( System . in ) ) ; String text = buffer . readLine ( ) . toLowerCase ( Locale . ENGLISH ) ; boolean go = text . equals ( "yes" ) || text . equals ( "y" ) ; if ( ! go ) { System . out . println ( "Did not confirm; " + opDesc + " aborted." ) ; } return go ; } } | Utility function that pauses and asks for confirmation on dangerous operations . |
24,462 | public static List < String > getValueList ( List < String > valuePairs , String delim ) { List < String > valueList = Lists . newArrayList ( ) ; for ( String valuePair : valuePairs ) { String [ ] value = valuePair . split ( delim , 2 ) ; if ( value . length != 2 ) throw new VoldemortException ( "Invalid argument pair: " + valuePair ) ; valueList . add ( value [ 0 ] ) ; valueList . add ( value [ 1 ] ) ; } return valueList ; } | Utility function that gives list of values from list of value - pair strings . |
24,463 | public static < V > Map < V , V > convertListToMap ( List < V > list ) { Map < V , V > map = new HashMap < V , V > ( ) ; if ( list . size ( ) % 2 != 0 ) throw new VoldemortException ( "Failed to convert list to map." ) ; for ( int i = 0 ; i < list . size ( ) ; i += 2 ) { map . put ( list . get ( i ) , list . get ( i + 1 ) ) ; } return map ; } | Utility function that converts a list to a map . |
24,464 | public static AdminClient getAdminClient ( String url ) { ClientConfig config = new ClientConfig ( ) . setBootstrapUrls ( url ) . setConnectionTimeout ( 5 , TimeUnit . SECONDS ) ; AdminClientConfig adminConfig = new AdminClientConfig ( ) . setAdminSocketTimeoutSec ( 5 ) ; return new AdminClient ( adminConfig , config ) ; } | Utility function that constructs AdminClient . |
24,465 | public static List < Integer > getAllNodeIds ( AdminClient adminClient ) { List < Integer > nodeIds = Lists . newArrayList ( ) ; for ( Integer nodeId : adminClient . getAdminClientCluster ( ) . getNodeIds ( ) ) { nodeIds . add ( nodeId ) ; } return nodeIds ; } | Utility function that fetches node ids . |
24,466 | public static List < String > getAllUserStoreNamesOnNode ( AdminClient adminClient , Integer nodeId ) { List < String > storeNames = Lists . newArrayList ( ) ; List < StoreDefinition > storeDefinitionList = adminClient . metadataMgmtOps . getRemoteStoreDefList ( nodeId ) . getValue ( ) ; for ( StoreDefinition storeDefinition : storeDefinitionList ) { storeNames . add ( storeDefinition . getName ( ) ) ; } return storeNames ; } | Utility function that fetches all stores on a node . |
24,467 | public static void validateUserStoreNamesOnNode ( AdminClient adminClient , Integer nodeId , List < String > storeNames ) { List < StoreDefinition > storeDefList = adminClient . metadataMgmtOps . getRemoteStoreDefList ( nodeId ) . getValue ( ) ; Map < String , Boolean > existingStoreNames = new HashMap < String , Boolean > ( ) ; for ( StoreDefinition storeDef : storeDefList ) { existingStoreNames . put ( storeDef . getName ( ) , true ) ; } for ( String storeName : storeNames ) { if ( ! Boolean . TRUE . equals ( existingStoreNames . get ( storeName ) ) ) { Utils . croak ( "Store " + storeName + " does not exist!" ) ; } } } | Utility function that checks if store names are valid on a node . |
24,468 | public static List < Integer > getAllPartitions ( AdminClient adminClient ) { List < Integer > partIds = Lists . newArrayList ( ) ; partIds = Lists . newArrayList ( ) ; for ( Node node : adminClient . getAdminClientCluster ( ) . getNodes ( ) ) { partIds . addAll ( node . getPartitionIds ( ) ) ; } return partIds ; } | Utility function that fetches partitions . |
24,469 | public static List < QuotaType > getQuotaTypes ( List < String > strQuotaTypes ) { if ( strQuotaTypes . size ( ) < 1 ) { throw new VoldemortException ( "Quota type not specified." ) ; } List < QuotaType > quotaTypes ; if ( strQuotaTypes . size ( ) == 1 && strQuotaTypes . get ( 0 ) . equals ( AdminToolUtils . QUOTATYPE_ALL ) ) { quotaTypes = Arrays . asList ( QuotaType . values ( ) ) ; } else { quotaTypes = new ArrayList < QuotaType > ( ) ; for ( String strQuotaType : strQuotaTypes ) { QuotaType type = QuotaType . valueOf ( strQuotaType ) ; quotaTypes . add ( type ) ; } } return quotaTypes ; } | Utility function that fetches quota types . |
24,470 | public static File createDir ( String dir ) { File directory = null ; if ( dir != null ) { directory = new File ( dir ) ; if ( ! ( directory . exists ( ) || directory . mkdir ( ) ) ) { Utils . croak ( "Can't find or create directory " + dir ) ; } } return directory ; } | Utility function that creates directory . |
24,471 | public static Map < String , StoreDefinition > getSystemStoreDefMap ( ) { Map < String , StoreDefinition > sysStoreDefMap = Maps . newHashMap ( ) ; List < StoreDefinition > storesDefs = SystemStoreConstants . getAllSystemStoreDefs ( ) ; for ( StoreDefinition def : storesDefs ) { sysStoreDefMap . put ( def . getName ( ) , def ) ; } return sysStoreDefMap ; } | Utility function that fetches system store definitions |
24,472 | public static Map < String , StoreDefinition > getUserStoreDefMapOnNode ( AdminClient adminClient , Integer nodeId ) { List < StoreDefinition > storeDefinitionList = adminClient . metadataMgmtOps . getRemoteStoreDefList ( nodeId ) . getValue ( ) ; Map < String , StoreDefinition > storeDefinitionMap = Maps . newHashMap ( ) ; for ( StoreDefinition storeDefinition : storeDefinitionList ) { storeDefinitionMap . put ( storeDefinition . getName ( ) , storeDefinition ) ; } return storeDefinitionMap ; } | Utility function that fetches user defined store definitions |
24,473 | public static RebalanceTaskInfo decodeRebalanceTaskInfoMap ( VAdminProto . RebalanceTaskInfoMap rebalanceTaskInfoMap ) { RebalanceTaskInfo rebalanceTaskInfo = new RebalanceTaskInfo ( rebalanceTaskInfoMap . getStealerId ( ) , rebalanceTaskInfoMap . getDonorId ( ) , decodeStoreToPartitionIds ( rebalanceTaskInfoMap . getPerStorePartitionIdsList ( ) ) , new ClusterMapper ( ) . readCluster ( new StringReader ( rebalanceTaskInfoMap . getInitialCluster ( ) ) ) ) ; return rebalanceTaskInfo ; } | Given a protobuf rebalance - partition info converts it into our rebalance - partition info |
24,474 | public static RebalanceTaskInfoMap encodeRebalanceTaskInfoMap ( RebalanceTaskInfo stealInfo ) { return RebalanceTaskInfoMap . newBuilder ( ) . setStealerId ( stealInfo . getStealerId ( ) ) . setDonorId ( stealInfo . getDonorId ( ) ) . addAllPerStorePartitionIds ( ProtoUtils . encodeStoreToPartitionsTuple ( stealInfo . getStoreToPartitionIds ( ) ) ) . setInitialCluster ( new ClusterMapper ( ) . writeCluster ( stealInfo . getInitialCluster ( ) ) ) . build ( ) ; } | Given a rebalance - task info convert it into the protobuf equivalent |
24,475 | public Versioned < E > getVersionedById ( int id ) { Versioned < VListNode < E > > listNode = getListNode ( id ) ; if ( listNode == null ) throw new IndexOutOfBoundsException ( ) ; return new Versioned < E > ( listNode . getValue ( ) . getValue ( ) , listNode . getVersion ( ) ) ; } | Get the ver |
24,476 | public E setById ( final int id , final E element ) { VListKey < K > key = new VListKey < K > ( _key , id ) ; UpdateElementById < K , E > updateElementAction = new UpdateElementById < K , E > ( key , element ) ; if ( ! _storeClient . applyUpdate ( updateElementAction ) ) throw new ObsoleteVersionException ( "update failed" ) ; return updateElementAction . getResult ( ) ; } | Put the given value to the appropriate id in the stack using the version of the current list node identified by that id . |
24,477 | private void allClustersEqual ( final List < String > clusterUrls ) { Validate . notEmpty ( clusterUrls , "clusterUrls cannot be null" ) ; if ( clusterUrls . size ( ) == 1 ) return ; AdminClient adminClientLhs = adminClientPerCluster . get ( clusterUrls . get ( 0 ) ) ; Cluster clusterLhs = adminClientLhs . getAdminClientCluster ( ) ; for ( int index = 1 ; index < clusterUrls . size ( ) ; index ++ ) { AdminClient adminClientRhs = adminClientPerCluster . get ( clusterUrls . get ( index ) ) ; Cluster clusterRhs = adminClientRhs . getAdminClientCluster ( ) ; if ( ! areTwoClustersEqual ( clusterLhs , clusterRhs ) ) throw new VoldemortException ( "Cluster " + clusterLhs . getName ( ) + " is not the same as " + clusterRhs . getName ( ) ) ; } } | Check if all cluster objects in the list are congruent . |
24,478 | private synchronized JsonSchema getInputPathJsonSchema ( ) throws IOException { if ( inputPathJsonSchema == null ) { inputPathJsonSchema = HadoopUtils . getSchemaFromPath ( getInputPath ( ) ) ; } return inputPathJsonSchema ; } | Get the Json Schema of the input path assuming the path contains just one schema version in all files under that path . |
24,479 | private synchronized Schema getInputPathAvroSchema ( ) throws IOException { if ( inputPathAvroSchema == null ) { inputPathAvroSchema = AvroUtils . getAvroSchemaFromPath ( getInputPath ( ) ) ; } return inputPathAvroSchema ; } | Get the Avro Schema of the input path assuming the path contains just one schema version in all files under that path . |
24,480 | public String getRecordSchema ( ) throws IOException { Schema schema = getInputPathAvroSchema ( ) ; String recSchema = schema . toString ( ) ; return recSchema ; } | Get the schema for the Avro Record from the object container file |
24,481 | public String getKeySchema ( ) throws IOException { Schema schema = getInputPathAvroSchema ( ) ; String keySchema = schema . getField ( keyFieldName ) . schema ( ) . toString ( ) ; return keySchema ; } | Extract schema of the key field |
24,482 | public String getValueSchema ( ) throws IOException { Schema schema = getInputPathAvroSchema ( ) ; String valueSchema = schema . getField ( valueFieldName ) . schema ( ) . toString ( ) ; return valueSchema ; } | Extract schema of the value field |
24,483 | private void verifyOrAddStore ( String clusterURL , String keySchema , String valueSchema ) { String newStoreDefXml = VoldemortUtils . getStoreDefXml ( storeName , props . getInt ( BUILD_REPLICATION_FACTOR , 2 ) , props . getInt ( BUILD_REQUIRED_READS , 1 ) , props . getInt ( BUILD_REQUIRED_WRITES , 1 ) , props . getNullableInt ( BUILD_PREFERRED_READS ) , props . getNullableInt ( BUILD_PREFERRED_WRITES ) , props . getString ( PUSH_FORCE_SCHEMA_KEY , keySchema ) , props . getString ( PUSH_FORCE_SCHEMA_VALUE , valueSchema ) , description , owners ) ; log . info ( "Verifying store against cluster URL: " + clusterURL + "\n" + newStoreDefXml . toString ( ) ) ; StoreDefinition newStoreDef = VoldemortUtils . getStoreDef ( newStoreDefXml ) ; try { adminClientPerCluster . get ( clusterURL ) . storeMgmtOps . verifyOrAddStore ( newStoreDef , "BnP config/data" , enableStoreCreation , this . storeVerificationExecutorService ) ; } catch ( UnreachableStoreException e ) { log . info ( "verifyOrAddStore() failed on some nodes for clusterURL: " + clusterURL + " (this is harmless)." , e ) ; } storeDef = newStoreDef ; } | For each node checks if the store exists and then verifies that the remote schema matches the new one . If the remote store doesn t exist it creates it . |
24,484 | public void syncInternalStateFromFileSystem ( boolean alsoSyncRemoteState ) { for ( Long version : versionToEnabledMap . keySet ( ) ) { File [ ] existingVersionDirs = ReadOnlyUtils . getVersionDirs ( rootDir , version , version ) ; if ( existingVersionDirs . length == 0 ) { removeVersion ( version , alsoSyncRemoteState ) ; } } File [ ] versionDirs = ReadOnlyUtils . getVersionDirs ( rootDir ) ; if ( versionDirs != null ) { for ( File versionDir : versionDirs ) { long versionNumber = ReadOnlyUtils . getVersionId ( versionDir ) ; boolean versionEnabled = isVersionEnabled ( versionDir ) ; versionToEnabledMap . put ( versionNumber , versionEnabled ) ; } } File currentVersionDir = ReadOnlyUtils . getCurrentVersion ( rootDir ) ; if ( currentVersionDir != null ) { currentVersion = ReadOnlyUtils . getVersionId ( currentVersionDir ) ; } else { currentVersion = - 1 ; } logger . info ( "Successfully synced internal state from local file-system: " + this . toString ( ) ) ; } | Compares the StoreVersionManager s internal state with the content on the file - system of the rootDir provided at construction time . |
24,485 | private void persistDisabledVersion ( long version ) throws PersistenceFailureException { File disabledMarker = getDisabledMarkerFile ( version ) ; try { disabledMarker . createNewFile ( ) ; } catch ( IOException e ) { throw new PersistenceFailureException ( "Failed to create the disabled marker at path: " + disabledMarker . getAbsolutePath ( ) + "\nThe store/version " + "will remain disabled only until the next restart." , e ) ; } } | Places a disabled marker file in the directory of the specified version . |
24,486 | private void persistEnabledVersion ( long version ) throws PersistenceFailureException { File disabledMarker = getDisabledMarkerFile ( version ) ; if ( disabledMarker . exists ( ) ) { if ( ! disabledMarker . delete ( ) ) { throw new PersistenceFailureException ( "Failed to create the disabled marker at path: " + disabledMarker . getAbsolutePath ( ) + "\nThe store/version " + "will remain enabled only until the next restart." ) ; } } } | Deletes the disabled marker file in the directory of the specified version . |
24,487 | private File getDisabledMarkerFile ( long version ) throws PersistenceFailureException { File [ ] versionDirArray = ReadOnlyUtils . getVersionDirs ( rootDir , version , version ) ; if ( versionDirArray . length == 0 ) { throw new PersistenceFailureException ( "getDisabledMarkerFile did not find the requested version directory" + " on disk. Version: " + version + ", rootDir: " + rootDir ) ; } File disabledMarkerFile = new File ( versionDirArray [ 0 ] , DISABLED_MARKER_NAME ) ; return disabledMarkerFile ; } | Gets the . disabled file for a given version of this store . That file may or may not exist . |
24,488 | public Double getAvgEventValue ( ) { resetIfNeeded ( ) ; synchronized ( this ) { long eventsLastInterval = numEventsLastInterval - numEventsLastLastInterval ; if ( eventsLastInterval > 0 ) return ( ( totalEventValueLastInterval - totalEventValueLastLastInterval ) * 1.0 ) / eventsLastInterval ; else return 0.0 ; } } | Returns the average event value in the current interval |
24,489 | @ SuppressWarnings ( "unchecked" ) public static void executeCommand ( String [ ] args ) throws IOException { OptionParser parser = getParser ( ) ; List < String > metaKeys = null ; String url = null ; args = AdminToolUtils . copyArrayAddFirst ( args , "--" + OPT_HEAD_META_CHECK ) ; OptionSet options = parser . parse ( args ) ; if ( options . has ( AdminParserUtils . OPT_HELP ) ) { printHelp ( System . out ) ; return ; } AdminParserUtils . checkRequired ( options , OPT_HEAD_META_CHECK ) ; AdminParserUtils . checkRequired ( options , AdminParserUtils . OPT_URL ) ; metaKeys = ( List < String > ) options . valuesOf ( OPT_HEAD_META_CHECK ) ; url = ( String ) options . valueOf ( AdminParserUtils . OPT_URL ) ; if ( metaKeys . size ( ) == 0 || ( metaKeys . size ( ) == 1 && metaKeys . get ( 0 ) . equals ( METAKEY_ALL ) ) ) { metaKeys = Lists . newArrayList ( ) ; metaKeys . add ( MetadataStore . CLUSTER_KEY ) ; metaKeys . add ( MetadataStore . STORES_KEY ) ; metaKeys . add ( MetadataStore . SERVER_STATE_KEY ) ; } AdminClient adminClient = AdminToolUtils . getAdminClient ( url ) ; doMetaCheck ( adminClient , metaKeys ) ; } | Parses command - line and checks if metadata is consistent across all nodes . |
24,490 | @ SuppressWarnings ( "unchecked" ) public static void executeCommand ( String [ ] args ) throws IOException { OptionParser parser = getParser ( ) ; String url = null ; List < Integer > nodeIds = null ; Boolean allNodes = true ; Boolean confirm = false ; OptionSet options = parser . parse ( args ) ; if ( options . has ( AdminParserUtils . OPT_HELP ) ) { printHelp ( System . out ) ; return ; } AdminParserUtils . checkRequired ( options , AdminParserUtils . OPT_URL ) ; AdminParserUtils . checkOptional ( options , AdminParserUtils . OPT_NODE , AdminParserUtils . OPT_ALL_NODES ) ; url = ( String ) options . valueOf ( AdminParserUtils . OPT_URL ) ; if ( options . has ( AdminParserUtils . OPT_NODE ) ) { nodeIds = ( List < Integer > ) options . valuesOf ( AdminParserUtils . OPT_NODE ) ; allNodes = false ; } if ( options . has ( AdminParserUtils . OPT_CONFIRM ) ) { confirm = true ; } System . out . println ( "Remove metadata related to rebalancing" ) ; System . out . println ( "Location:" ) ; System . out . println ( " bootstrap url = " + url ) ; if ( allNodes ) { System . out . println ( " node = all nodes" ) ; } else { System . out . println ( " node = " + Joiner . on ( ", " ) . join ( nodeIds ) ) ; } if ( ! AdminToolUtils . askConfirm ( confirm , "remove metadata related to rebalancing" ) ) { return ; } AdminClient adminClient = AdminToolUtils . getAdminClient ( url ) ; if ( allNodes ) { nodeIds = AdminToolUtils . getAllNodeIds ( adminClient ) ; } AdminToolUtils . assertServerNotInRebalancingState ( adminClient , nodeIds ) ; doMetaClearRebalance ( adminClient , nodeIds ) ; } | Parses command - line and removes metadata related to rebalancing . |
24,491 | public static void doMetaClearRebalance ( AdminClient adminClient , List < Integer > nodeIds ) { AdminToolUtils . assertServerNotInOfflineState ( adminClient , nodeIds ) ; System . out . println ( "Setting " + MetadataStore . SERVER_STATE_KEY + " to " + MetadataStore . VoldemortState . NORMAL_SERVER ) ; doMetaSet ( adminClient , nodeIds , MetadataStore . SERVER_STATE_KEY , MetadataStore . VoldemortState . NORMAL_SERVER . toString ( ) ) ; RebalancerState state = RebalancerState . create ( "[]" ) ; System . out . println ( "Cleaning up " + MetadataStore . REBALANCING_STEAL_INFO + " to " + state . toJsonString ( ) ) ; doMetaSet ( adminClient , nodeIds , MetadataStore . REBALANCING_STEAL_INFO , state . toJsonString ( ) ) ; System . out . println ( "Cleaning up " + MetadataStore . REBALANCING_SOURCE_CLUSTER_XML + " to empty string" ) ; doMetaSet ( adminClient , nodeIds , MetadataStore . REBALANCING_SOURCE_CLUSTER_XML , "" ) ; } | Removes metadata related to rebalancing . |
24,492 | @ SuppressWarnings ( "unchecked" ) public static void executeCommand ( String [ ] args ) throws IOException { OptionParser parser = getParser ( ) ; List < String > metaKeys = null ; String url = null ; String dir = null ; List < Integer > nodeIds = null ; Boolean allNodes = true ; Boolean verbose = false ; args = AdminToolUtils . copyArrayAddFirst ( args , "--" + OPT_HEAD_META_GET ) ; OptionSet options = parser . parse ( args ) ; if ( options . has ( AdminParserUtils . OPT_HELP ) ) { printHelp ( System . out ) ; return ; } AdminParserUtils . checkRequired ( options , OPT_HEAD_META_GET ) ; AdminParserUtils . checkRequired ( options , AdminParserUtils . OPT_URL ) ; AdminParserUtils . checkOptional ( options , AdminParserUtils . OPT_NODE , AdminParserUtils . OPT_ALL_NODES ) ; metaKeys = ( List < String > ) options . valuesOf ( OPT_HEAD_META_GET ) ; url = ( String ) options . valueOf ( AdminParserUtils . OPT_URL ) ; if ( options . has ( AdminParserUtils . OPT_DIR ) ) { dir = ( String ) options . valueOf ( AdminParserUtils . OPT_DIR ) ; } if ( options . has ( AdminParserUtils . OPT_NODE ) ) { nodeIds = ( List < Integer > ) options . valuesOf ( AdminParserUtils . OPT_NODE ) ; allNodes = false ; } if ( options . has ( OPT_VERBOSE ) ) { verbose = true ; } File directory = AdminToolUtils . createDir ( dir ) ; AdminClient adminClient = AdminToolUtils . getAdminClient ( url ) ; if ( allNodes ) { nodeIds = AdminToolUtils . getAllNodeIds ( adminClient ) ; } if ( metaKeys . size ( ) == 1 && metaKeys . get ( 0 ) . equals ( METAKEY_ALL ) ) { metaKeys = Lists . newArrayList ( ) ; for ( Object key : MetadataStore . METADATA_KEYS ) { metaKeys . add ( ( String ) key ) ; } } doMetaGet ( adminClient , nodeIds , metaKeys , directory , verbose ) ; } | Parses command - line and gets metadata . |
24,493 | @ SuppressWarnings ( "unchecked" ) public static void executeCommand ( String [ ] args ) throws IOException { OptionParser parser = getParser ( ) ; List < String > metaKeys = null ; String url = null ; List < Integer > nodeIds = null ; Boolean allNodes = true ; List < String > storeNames = null ; args = AdminToolUtils . copyArrayAddFirst ( args , "--" + OPT_HEAD_META_GET_RO ) ; OptionSet options = parser . parse ( args ) ; if ( options . has ( AdminParserUtils . OPT_HELP ) ) { printHelp ( System . out ) ; return ; } AdminParserUtils . checkRequired ( options , OPT_HEAD_META_GET_RO ) ; AdminParserUtils . checkRequired ( options , AdminParserUtils . OPT_URL ) ; AdminParserUtils . checkOptional ( options , AdminParserUtils . OPT_NODE , AdminParserUtils . OPT_ALL_NODES ) ; AdminParserUtils . checkRequired ( options , AdminParserUtils . OPT_STORE ) ; metaKeys = ( List < String > ) options . valuesOf ( OPT_HEAD_META_GET_RO ) ; url = ( String ) options . valueOf ( AdminParserUtils . OPT_URL ) ; if ( options . has ( AdminParserUtils . OPT_NODE ) ) { nodeIds = ( List < Integer > ) options . valuesOf ( AdminParserUtils . OPT_NODE ) ; allNodes = false ; } storeNames = ( List < String > ) options . valuesOf ( AdminParserUtils . OPT_STORE ) ; AdminClient adminClient = AdminToolUtils . getAdminClient ( url ) ; if ( allNodes ) { nodeIds = AdminToolUtils . getAllNodeIds ( adminClient ) ; } if ( metaKeys . size ( ) == 1 && metaKeys . get ( 0 ) . equals ( METAKEY_ALL ) ) { metaKeys = Lists . newArrayList ( ) ; metaKeys . add ( KEY_MAX_VERSION ) ; metaKeys . add ( KEY_CURRENT_VERSION ) ; metaKeys . add ( KEY_STORAGE_FORMAT ) ; } doMetaGetRO ( adminClient , nodeIds , storeNames , metaKeys ) ; } | Parses command - line and gets read - only metadata . |
24,494 | public static void doMetaGetRO ( AdminClient adminClient , Collection < Integer > nodeIds , List < String > storeNames , List < String > metaKeys ) throws IOException { for ( String key : metaKeys ) { System . out . println ( "Metadata: " + key ) ; if ( ! key . equals ( KEY_MAX_VERSION ) && ! key . equals ( KEY_CURRENT_VERSION ) && ! key . equals ( KEY_STORAGE_FORMAT ) ) { System . out . println ( " Invalid read-only metadata key: " + key ) ; } else { for ( Integer nodeId : nodeIds ) { String hostName = adminClient . getAdminClientCluster ( ) . getNodeById ( nodeId ) . getHost ( ) ; System . out . println ( " Node: " + hostName + ":" + nodeId ) ; if ( key . equals ( KEY_MAX_VERSION ) ) { Map < String , Long > mapStoreToROVersion = adminClient . readonlyOps . getROMaxVersion ( nodeId , storeNames ) ; for ( String storeName : mapStoreToROVersion . keySet ( ) ) { System . out . println ( " " + storeName + ":" + mapStoreToROVersion . get ( storeName ) ) ; } } else if ( key . equals ( KEY_CURRENT_VERSION ) ) { Map < String , Long > mapStoreToROVersion = adminClient . readonlyOps . getROCurrentVersion ( nodeId , storeNames ) ; for ( String storeName : mapStoreToROVersion . keySet ( ) ) { System . out . println ( " " + storeName + ":" + mapStoreToROVersion . get ( storeName ) ) ; } } else if ( key . equals ( KEY_STORAGE_FORMAT ) ) { Map < String , String > mapStoreToROFormat = adminClient . readonlyOps . getROStorageFormat ( nodeId , storeNames ) ; for ( String storeName : mapStoreToROFormat . keySet ( ) ) { System . out . println ( " " + storeName + ":" + mapStoreToROFormat . get ( storeName ) ) ; } } } } System . out . println ( ) ; } } | Gets read - only metadata . |
24,495 | public static void doMetaUpdateVersionsOnStores ( AdminClient adminClient , List < StoreDefinition > oldStoreDefs , List < StoreDefinition > newStoreDefs ) { Set < String > storeNamesUnion = new HashSet < String > ( ) ; Map < String , StoreDefinition > oldStoreDefinitionMap = new HashMap < String , StoreDefinition > ( ) ; Map < String , StoreDefinition > newStoreDefinitionMap = new HashMap < String , StoreDefinition > ( ) ; List < String > storesChanged = new ArrayList < String > ( ) ; for ( StoreDefinition storeDef : oldStoreDefs ) { String storeName = storeDef . getName ( ) ; storeNamesUnion . add ( storeName ) ; oldStoreDefinitionMap . put ( storeName , storeDef ) ; } for ( StoreDefinition storeDef : newStoreDefs ) { String storeName = storeDef . getName ( ) ; storeNamesUnion . add ( storeName ) ; newStoreDefinitionMap . put ( storeName , storeDef ) ; } for ( String storeName : storeNamesUnion ) { StoreDefinition oldStoreDef = oldStoreDefinitionMap . get ( storeName ) ; StoreDefinition newStoreDef = newStoreDefinitionMap . get ( storeName ) ; if ( oldStoreDef == null && newStoreDef != null || oldStoreDef != null && newStoreDef == null || oldStoreDef != null && newStoreDef != null && ! oldStoreDef . equals ( newStoreDef ) ) { storesChanged . add ( storeName ) ; } } System . out . println ( "Updating metadata version for the following stores: " + storesChanged ) ; try { adminClient . metadataMgmtOps . updateMetadataversion ( adminClient . getAdminClientCluster ( ) . getNodeIds ( ) , storesChanged ) ; } catch ( Exception e ) { System . err . println ( "Error while updating metadata version for the specified store." ) ; } } | Updates metadata versions on stores . |
24,496 | public static void executeCommand ( String [ ] args ) throws IOException { OptionParser parser = getParser ( ) ; String url = null ; Boolean confirm = false ; OptionSet options = parser . parse ( args ) ; if ( options . has ( AdminParserUtils . OPT_HELP ) ) { printHelp ( System . out ) ; return ; } AdminParserUtils . checkRequired ( options , AdminParserUtils . OPT_URL ) ; url = ( String ) options . valueOf ( AdminParserUtils . OPT_URL ) ; if ( options . has ( AdminParserUtils . OPT_CONFIRM ) ) { confirm = true ; } System . out . println ( "Synchronize metadata versions across all nodes" ) ; System . out . println ( "Location:" ) ; System . out . println ( " bootstrap url = " + url ) ; System . out . println ( " node = all nodes" ) ; AdminClient adminClient = AdminToolUtils . getAdminClient ( url ) ; AdminToolUtils . assertServerNotInRebalancingState ( adminClient ) ; Versioned < Properties > versionedProps = mergeAllVersions ( adminClient ) ; printVersions ( versionedProps ) ; if ( ! AdminToolUtils . askConfirm ( confirm , "do you want to synchronize metadata versions to all node" ) ) return ; adminClient . metadataMgmtOps . setMetadataVersion ( versionedProps ) ; } | Parses command - line and synchronizes metadata versions across all nodes . |
24,497 | public static void executeCommand ( String [ ] args ) throws IOException { OptionParser parser = getParser ( ) ; String url = null ; OptionSet options = parser . parse ( args ) ; if ( options . has ( AdminParserUtils . OPT_HELP ) ) { printHelp ( System . out ) ; return ; } AdminParserUtils . checkRequired ( options , AdminParserUtils . OPT_URL ) ; url = ( String ) options . valueOf ( AdminParserUtils . OPT_URL ) ; AdminClient adminClient = AdminToolUtils . getAdminClient ( url ) ; doMetaCheckVersion ( adminClient ) ; } | Parses command - line and verifies metadata versions on all the cluster nodes |
24,498 | private Integer getKeyPartitionId ( byte [ ] key ) { Integer keyPartitionId = storeInstance . getNodesPartitionIdForKey ( nodeId , key ) ; Utils . notNull ( keyPartitionId ) ; return keyPartitionId ; } | Given the key figures out which partition on the local node hosts the key . |
24,499 | protected boolean isItemAccepted ( byte [ ] key ) { boolean entryAccepted = false ; if ( ! fetchOrphaned ) { if ( isKeyNeeded ( key ) ) { entryAccepted = true ; } } else { if ( ! StoreRoutingPlan . checkKeyBelongsToNode ( key , nodeId , initialCluster , storeDef ) ) { entryAccepted = true ; } } return entryAccepted ; } | Determines if entry is accepted . For normal usage this means confirming that the key is needed . For orphan usage this simply means confirming the key belongs to the node . |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.