idx
int64
0
41.2k
question
stringlengths
83
4.15k
target
stringlengths
5
715
24,200
@ SuppressWarnings ( "unchecked" ) public void updateStoreDefinitions ( Versioned < byte [ ] > valueBytes ) { writeLock . lock ( ) ; try { Versioned < String > value = new Versioned < String > ( ByteUtils . getString ( valueBytes . getValue ( ) , "UTF-8" ) , valueBytes . getVersion ( ) ) ; Versioned < Object > valueObject = convertStringToObject ( STORES_KEY , value ) ; StoreDefinitionsMapper mapper = new StoreDefinitionsMapper ( ) ; List < StoreDefinition > storeDefinitions = ( List < StoreDefinition > ) valueObject . getValue ( ) ; StoreDefinitionUtils . validateSchemasAsNeeded ( storeDefinitions ) ; StoreDefinitionUtils . validateNewStoreDefsAreNonBreaking ( getStoreDefList ( ) , storeDefinitions ) ; for ( StoreDefinition storeDef : storeDefinitions ) { if ( ! this . storeNames . contains ( storeDef . getName ( ) ) ) { throw new VoldemortException ( "Cannot update a store which does not exist !" ) ; } String storeDefStr = mapper . writeStore ( storeDef ) ; Versioned < String > versionedValueStr = new Versioned < String > ( storeDefStr , value . getVersion ( ) ) ; this . storeDefinitionsStorageEngine . put ( storeDef . getName ( ) , versionedValueStr , "" ) ; this . metadataCache . put ( storeDef . getName ( ) , new Versioned < Object > ( storeDefStr , value . getVersion ( ) ) ) ; } initStoreDefinitions ( value . getVersion ( ) ) ; updateRoutingStrategies ( getCluster ( ) , getStoreDefList ( ) ) ; } finally { writeLock . unlock ( ) ; } }
Function to update store definitions . Unlike the put method this function does not delete any existing state . It only updates the state of the stores specified in the given stores . xml
24,201
public void put ( ByteArray keyBytes , Versioned < byte [ ] > valueBytes , byte [ ] transforms ) throws VoldemortException { writeLock . lock ( ) ; try { String key = ByteUtils . getString ( keyBytes . get ( ) , "UTF-8" ) ; Versioned < String > value = new Versioned < String > ( ByteUtils . getString ( valueBytes . getValue ( ) , "UTF-8" ) , valueBytes . getVersion ( ) ) ; Versioned < Object > valueObject = convertStringToObject ( key , value ) ; this . put ( key , valueObject ) ; } finally { writeLock . unlock ( ) ; } }
A write through put to inner - store .
24,202
private HashMap < String , StoreDefinition > makeStoreDefinitionMap ( List < StoreDefinition > storeDefs ) { HashMap < String , StoreDefinition > storeDefMap = new HashMap < String , StoreDefinition > ( ) ; for ( StoreDefinition storeDef : storeDefs ) storeDefMap . put ( storeDef . getName ( ) , storeDef ) ; return storeDefMap ; }
Returns the list of store defs as a map
24,203
private void updateRoutingStrategies ( Cluster cluster , List < StoreDefinition > storeDefs ) { writeLock . lock ( ) ; try { VectorClock clock = new VectorClock ( ) ; if ( metadataCache . containsKey ( ROUTING_STRATEGY_KEY ) ) clock = ( VectorClock ) metadataCache . get ( ROUTING_STRATEGY_KEY ) . getVersion ( ) ; logger . info ( "Updating routing strategy for all stores" ) ; HashMap < String , StoreDefinition > storeDefMap = makeStoreDefinitionMap ( storeDefs ) ; HashMap < String , RoutingStrategy > routingStrategyMap = createRoutingStrategyMap ( cluster , storeDefMap ) ; this . metadataCache . put ( ROUTING_STRATEGY_KEY , new Versioned < Object > ( routingStrategyMap , clock . incremented ( getNodeId ( ) , System . currentTimeMillis ( ) ) ) ) ; for ( String storeName : storeNameTolisteners . keySet ( ) ) { RoutingStrategy updatedRoutingStrategy = routingStrategyMap . get ( storeName ) ; if ( updatedRoutingStrategy != null ) { try { for ( MetadataStoreListener listener : storeNameTolisteners . get ( storeName ) ) { listener . updateRoutingStrategy ( updatedRoutingStrategy ) ; listener . updateStoreDefinition ( storeDefMap . get ( storeName ) ) ; } } catch ( Exception e ) { if ( logger . isEnabledFor ( Level . WARN ) ) logger . warn ( e , e ) ; } } } } finally { writeLock . unlock ( ) ; } }
Changes to cluster OR store definition metadata results in routing strategies changing . These changes need to be propagated to all the listeners .
24,204
public void addRebalancingState ( final RebalanceTaskInfo stealInfo ) { writeLock . lock ( ) ; try { if ( ByteUtils . getString ( get ( SERVER_STATE_KEY , null ) . get ( 0 ) . getValue ( ) , "UTF-8" ) . compareTo ( VoldemortState . NORMAL_SERVER . toString ( ) ) == 0 ) { put ( SERVER_STATE_KEY , VoldemortState . REBALANCING_MASTER_SERVER ) ; initCache ( SERVER_STATE_KEY ) ; } RebalancerState rebalancerState = getRebalancerState ( ) ; if ( ! rebalancerState . update ( stealInfo ) ) { throw new VoldemortException ( "Could not add steal information " + stealInfo + " since a plan for the same donor node " + stealInfo . getDonorId ( ) + " ( " + rebalancerState . find ( stealInfo . getDonorId ( ) ) + " ) already exists" ) ; } put ( MetadataStore . REBALANCING_STEAL_INFO , rebalancerState ) ; initCache ( REBALANCING_STEAL_INFO ) ; } finally { writeLock . unlock ( ) ; } }
Add the steal information to the rebalancer state
24,205
public void deleteRebalancingState ( RebalanceTaskInfo stealInfo ) { writeLock . lock ( ) ; try { RebalancerState rebalancerState = getRebalancerState ( ) ; if ( ! rebalancerState . remove ( stealInfo ) ) throw new IllegalArgumentException ( "Couldn't find " + stealInfo + " in " + rebalancerState + " while deleting" ) ; if ( rebalancerState . isEmpty ( ) ) { logger . debug ( "Cleaning all rebalancing state" ) ; cleanAllRebalancingState ( ) ; } else { put ( REBALANCING_STEAL_INFO , rebalancerState ) ; initCache ( REBALANCING_STEAL_INFO ) ; } } finally { writeLock . unlock ( ) ; } }
Delete the partition steal information from the rebalancer state
24,206
public void setOfflineState ( boolean setToOffline ) { writeLock . lock ( ) ; try { String currentState = ByteUtils . getString ( get ( SERVER_STATE_KEY , null ) . get ( 0 ) . getValue ( ) , "UTF-8" ) ; if ( setToOffline ) { if ( currentState . equals ( VoldemortState . NORMAL_SERVER . toString ( ) ) ) { put ( SERVER_STATE_KEY , VoldemortState . OFFLINE_SERVER ) ; initCache ( SERVER_STATE_KEY ) ; put ( SLOP_STREAMING_ENABLED_KEY , false ) ; initCache ( SLOP_STREAMING_ENABLED_KEY ) ; put ( PARTITION_STREAMING_ENABLED_KEY , false ) ; initCache ( PARTITION_STREAMING_ENABLED_KEY ) ; put ( READONLY_FETCH_ENABLED_KEY , false ) ; initCache ( READONLY_FETCH_ENABLED_KEY ) ; } else if ( currentState . equals ( VoldemortState . OFFLINE_SERVER . toString ( ) ) ) { logger . warn ( "Already in OFFLINE_SERVER state." ) ; return ; } else { logger . error ( "Cannot enter OFFLINE_SERVER state from " + currentState ) ; throw new VoldemortException ( "Cannot enter OFFLINE_SERVER state from " + currentState ) ; } } else { if ( currentState . equals ( VoldemortState . NORMAL_SERVER . toString ( ) ) ) { logger . warn ( "Already in NORMAL_SERVER state." ) ; return ; } else if ( currentState . equals ( VoldemortState . OFFLINE_SERVER . toString ( ) ) ) { put ( SERVER_STATE_KEY , VoldemortState . NORMAL_SERVER ) ; initCache ( SERVER_STATE_KEY ) ; put ( SLOP_STREAMING_ENABLED_KEY , true ) ; initCache ( SLOP_STREAMING_ENABLED_KEY ) ; put ( PARTITION_STREAMING_ENABLED_KEY , true ) ; initCache ( PARTITION_STREAMING_ENABLED_KEY ) ; put ( READONLY_FETCH_ENABLED_KEY , true ) ; initCache ( READONLY_FETCH_ENABLED_KEY ) ; init ( ) ; initNodeId ( getNodeIdNoLock ( ) ) ; } else { logger . error ( "Cannot enter NORMAL_SERVER state from " + currentState ) ; throw new VoldemortException ( "Cannot enter NORMAL_SERVER state from " + currentState ) ; } } } finally { writeLock . unlock ( ) ; } }
change server state between OFFLINE_SERVER and NORMAL_SERVER
24,207
public void addStoreDefinition ( StoreDefinition storeDef ) { writeLock . lock ( ) ; try { if ( this . storeNames . contains ( storeDef . getName ( ) ) ) { throw new VoldemortException ( "Store already exists !" ) ; } StoreDefinitionUtils . validateSchemaAsNeeded ( storeDef ) ; StoreDefinitionsMapper mapper = new StoreDefinitionsMapper ( ) ; String storeDefStr = mapper . writeStore ( storeDef ) ; Versioned < String > versionedValueStr = new Versioned < String > ( storeDefStr ) ; this . storeDefinitionsStorageEngine . put ( storeDef . getName ( ) , versionedValueStr , null ) ; this . metadataCache . put ( storeDef . getName ( ) , new Versioned < Object > ( storeDefStr ) ) ; initStoreDefinitions ( null ) ; updateRoutingStrategies ( getCluster ( ) , getStoreDefList ( ) ) ; } finally { writeLock . unlock ( ) ; } }
Function to add a new Store to the Metadata store . This involves
24,208
public void deleteStoreDefinition ( String storeName ) { writeLock . lock ( ) ; try { if ( ! this . storeNames . contains ( storeName ) ) { throw new VoldemortException ( "Requested store to be deleted does not exist !" ) ; } this . storeDefinitionsStorageEngine . delete ( storeName , null ) ; this . metadataCache . remove ( storeName ) ; initStoreDefinitions ( null ) ; } finally { writeLock . unlock ( ) ; } }
Function to delete the specified store from Metadata store . This involves
24,209
public boolean isValidStore ( String name ) { readLock . lock ( ) ; try { if ( this . storeNames . contains ( name ) ) { return true ; } return false ; } finally { readLock . unlock ( ) ; } }
Utility function to validate if the given store name exists in the store name list managed by MetadataStore . This is used by the Admin service for validation before serving a get - metadata request .
24,210
private void init ( ) { logger . info ( "metadata init()." ) ; writeLock . lock ( ) ; try { initCache ( CLUSTER_KEY ) ; if ( this . storeDefinitionsStorageEngine != null ) { initStoreDefinitions ( null ) ; } else { initCache ( STORES_KEY ) ; } initSystemCache ( ) ; initSystemRoutingStrategies ( getCluster ( ) ) ; initCache ( SLOP_STREAMING_ENABLED_KEY , true ) ; initCache ( PARTITION_STREAMING_ENABLED_KEY , true ) ; initCache ( READONLY_FETCH_ENABLED_KEY , true ) ; initCache ( QUOTA_ENFORCEMENT_ENABLED_KEY , true ) ; initCache ( REBALANCING_STEAL_INFO , new RebalancerState ( new ArrayList < RebalanceTaskInfo > ( ) ) ) ; initCache ( SERVER_STATE_KEY , VoldemortState . NORMAL_SERVER . toString ( ) ) ; initCache ( REBALANCING_SOURCE_CLUSTER_XML , null ) ; initCache ( REBALANCING_SOURCE_STORES_XML , null ) ; } finally { writeLock . unlock ( ) ; } }
Initializes the metadataCache for MetadataStore
24,211
private void initStoreDefinitions ( Version storesXmlVersion ) { if ( this . storeDefinitionsStorageEngine == null ) { throw new VoldemortException ( "The store definitions directory is empty" ) ; } String allStoreDefinitions = "<stores>" ; Version finalStoresXmlVersion = null ; if ( storesXmlVersion != null ) { finalStoresXmlVersion = storesXmlVersion ; } this . storeNames . clear ( ) ; ClosableIterator < Pair < String , Versioned < String > > > storesIterator = this . storeDefinitionsStorageEngine . entries ( ) ; Map < String , Versioned < String > > storeNameToDefMap = new HashMap < String , Versioned < String > > ( ) ; Version maxVersion = null ; while ( storesIterator . hasNext ( ) ) { Pair < String , Versioned < String > > storeDetail = storesIterator . next ( ) ; String storeName = storeDetail . getFirst ( ) ; Versioned < String > versionedStoreDef = storeDetail . getSecond ( ) ; storeNameToDefMap . put ( storeName , versionedStoreDef ) ; Version curVersion = versionedStoreDef . getVersion ( ) ; if ( maxVersion == null ) { maxVersion = curVersion ; } else if ( maxVersion . compare ( curVersion ) == Occurred . BEFORE ) { maxVersion = curVersion ; } } if ( finalStoresXmlVersion == null ) { finalStoresXmlVersion = maxVersion ; } for ( Entry < String , Versioned < String > > storeEntry : storeNameToDefMap . entrySet ( ) ) { String storeName = storeEntry . getKey ( ) ; Versioned < String > versionedStoreDef = storeEntry . getValue ( ) ; this . storeNames . add ( storeName ) ; this . metadataCache . put ( storeName , new Versioned < Object > ( versionedStoreDef . getValue ( ) , versionedStoreDef . getVersion ( ) ) ) ; } Collections . sort ( this . storeNames ) ; for ( String storeName : this . storeNames ) { Versioned < String > versionedStoreDef = storeNameToDefMap . get ( storeName ) ; allStoreDefinitions += versionedStoreDef . getValue ( ) ; } allStoreDefinitions += "</stores>" ; metadataCache . put ( STORES_KEY , convertStringToObject ( STORES_KEY , new Versioned < String > ( allStoreDefinitions , finalStoresXmlVersion ) ) ) ; }
Function to go through all the store definitions contained in the STORES directory and
24,212
private void resetStoreDefinitions ( Set < String > storeNamesToDelete ) { for ( String storeName : storeNamesToDelete ) { this . metadataCache . remove ( storeName ) ; this . storeDefinitionsStorageEngine . delete ( storeName , null ) ; this . storeNames . remove ( storeName ) ; } }
Function to clear all the metadata related to the given store definitions . This is needed when a put on stores . xml is called thus replacing the existing state .
24,213
private synchronized void initSystemCache ( ) { List < StoreDefinition > value = storeMapper . readStoreList ( new StringReader ( SystemStoreConstants . SYSTEM_STORE_SCHEMA ) ) ; metadataCache . put ( SYSTEM_STORES_KEY , new Versioned < Object > ( value ) ) ; }
Initialize the metadata cache with system store list
24,214
public List < Versioned < V > > getWithCustomTimeout ( CompositeVoldemortRequest < K , V > requestWrapper ) { validateTimeout ( requestWrapper . getRoutingTimeoutInMs ( ) ) ; for ( int attempts = 0 ; attempts < this . metadataRefreshAttempts ; attempts ++ ) { try { long startTimeInMs = System . currentTimeMillis ( ) ; String keyHexString = "" ; if ( logger . isDebugEnabled ( ) ) { ByteArray key = ( ByteArray ) requestWrapper . getKey ( ) ; keyHexString = RestUtils . getKeyHexString ( key ) ; debugLogStart ( "GET" , requestWrapper . getRequestOriginTimeInMs ( ) , startTimeInMs , keyHexString ) ; } List < Versioned < V > > items = store . get ( requestWrapper ) ; if ( logger . isDebugEnabled ( ) ) { int vcEntrySize = 0 ; for ( Versioned < V > vc : items ) { vcEntrySize += ( ( VectorClock ) vc . getVersion ( ) ) . getVersionMap ( ) . size ( ) ; } debugLogEnd ( "GET" , requestWrapper . getRequestOriginTimeInMs ( ) , startTimeInMs , System . currentTimeMillis ( ) , keyHexString , vcEntrySize ) ; } return items ; } catch ( InvalidMetadataException e ) { logger . info ( "Received invalid metadata exception during get [ " + e . getMessage ( ) + " ] on store '" + storeName + "'. Rebootstrapping" ) ; bootStrap ( ) ; } } throw new VoldemortException ( this . metadataRefreshAttempts + " metadata refresh attempts failed." ) ; }
Performs a get operation with the specified composite request object
24,215
public Version putWithCustomTimeout ( CompositeVoldemortRequest < K , V > requestWrapper ) { validateTimeout ( requestWrapper . getRoutingTimeoutInMs ( ) ) ; List < Versioned < V > > versionedValues ; long startTime = System . currentTimeMillis ( ) ; String keyHexString = "" ; if ( logger . isDebugEnabled ( ) ) { ByteArray key = ( ByteArray ) requestWrapper . getKey ( ) ; keyHexString = RestUtils . getKeyHexString ( key ) ; logger . debug ( "PUT requested for key: " + keyHexString + " , for store: " + this . storeName + " at time(in ms): " + startTime + " . Nested GET and PUT VERSION requests to follow ---" ) ; } requestWrapper . setResolveConflicts ( true ) ; versionedValues = getWithCustomTimeout ( requestWrapper ) ; Versioned < V > versioned = getItemOrThrow ( requestWrapper . getKey ( ) , null , versionedValues ) ; long endTime = System . currentTimeMillis ( ) ; if ( versioned == null ) versioned = Versioned . value ( requestWrapper . getRawValue ( ) , new VectorClock ( ) ) ; else versioned . setObject ( requestWrapper . getRawValue ( ) ) ; long timeLeft = requestWrapper . getRoutingTimeoutInMs ( ) - ( endTime - startTime ) ; if ( timeLeft <= 0 ) { throw new StoreTimeoutException ( "PUT request timed out" ) ; } CompositeVersionedPutVoldemortRequest < K , V > putVersionedRequestObject = new CompositeVersionedPutVoldemortRequest < K , V > ( requestWrapper . getKey ( ) , versioned , timeLeft ) ; putVersionedRequestObject . setRequestOriginTimeInMs ( requestWrapper . getRequestOriginTimeInMs ( ) ) ; Version result = putVersionedWithCustomTimeout ( putVersionedRequestObject ) ; long endTimeInMs = System . currentTimeMillis ( ) ; if ( logger . isDebugEnabled ( ) ) { logger . debug ( "PUT response received for key: " + keyHexString + " , for store: " + this . storeName + " at time(in ms): " + endTimeInMs ) ; } return result ; }
Performs a put operation with the specified composite request object
24,216
public Version putVersionedWithCustomTimeout ( CompositeVoldemortRequest < K , V > requestWrapper ) throws ObsoleteVersionException { validateTimeout ( requestWrapper . getRoutingTimeoutInMs ( ) ) ; for ( int attempts = 0 ; attempts < this . metadataRefreshAttempts ; attempts ++ ) { try { String keyHexString = "" ; long startTimeInMs = System . currentTimeMillis ( ) ; if ( logger . isDebugEnabled ( ) ) { ByteArray key = ( ByteArray ) requestWrapper . getKey ( ) ; keyHexString = RestUtils . getKeyHexString ( key ) ; debugLogStart ( "PUT_VERSION" , requestWrapper . getRequestOriginTimeInMs ( ) , startTimeInMs , keyHexString ) ; } store . put ( requestWrapper ) ; if ( logger . isDebugEnabled ( ) ) { debugLogEnd ( "PUT_VERSION" , requestWrapper . getRequestOriginTimeInMs ( ) , startTimeInMs , System . currentTimeMillis ( ) , keyHexString , 0 ) ; } return requestWrapper . getValue ( ) . getVersion ( ) ; } catch ( InvalidMetadataException e ) { logger . info ( "Received invalid metadata exception during put [ " + e . getMessage ( ) + " ] on store '" + storeName + "'. Rebootstrapping" ) ; bootStrap ( ) ; } } throw new VoldemortException ( this . metadataRefreshAttempts + " metadata refresh attempts failed." ) ; }
Performs a Versioned put operation with the specified composite request object
24,217
public Map < K , List < Versioned < V > > > getAllWithCustomTimeout ( CompositeVoldemortRequest < K , V > requestWrapper ) { validateTimeout ( requestWrapper . getRoutingTimeoutInMs ( ) ) ; Map < K , List < Versioned < V > > > items = null ; for ( int attempts = 0 ; ; attempts ++ ) { if ( attempts >= this . metadataRefreshAttempts ) throw new VoldemortException ( this . metadataRefreshAttempts + " metadata refresh attempts failed." ) ; try { String KeysHexString = "" ; long startTimeInMs = System . currentTimeMillis ( ) ; if ( logger . isDebugEnabled ( ) ) { Iterable < ByteArray > keys = ( Iterable < ByteArray > ) requestWrapper . getIterableKeys ( ) ; KeysHexString = getKeysHexString ( keys ) ; debugLogStart ( "GET_ALL" , requestWrapper . getRequestOriginTimeInMs ( ) , startTimeInMs , KeysHexString ) ; } items = store . getAll ( requestWrapper ) ; if ( logger . isDebugEnabled ( ) ) { int vcEntrySize = 0 ; for ( List < Versioned < V > > item : items . values ( ) ) { for ( Versioned < V > vc : item ) { vcEntrySize += ( ( VectorClock ) vc . getVersion ( ) ) . getVersionMap ( ) . size ( ) ; } } debugLogEnd ( "GET_ALL" , requestWrapper . getRequestOriginTimeInMs ( ) , startTimeInMs , System . currentTimeMillis ( ) , KeysHexString , vcEntrySize ) ; } return items ; } catch ( InvalidMetadataException e ) { logger . info ( "Received invalid metadata exception during getAll [ " + e . getMessage ( ) + " ] on store '" + storeName + "'. Rebootstrapping" ) ; bootStrap ( ) ; } } }
Performs a get all operation with the specified composite request object
24,218
public boolean deleteWithCustomTimeout ( CompositeVoldemortRequest < K , V > deleteRequestObject ) { List < Versioned < V > > versionedValues ; validateTimeout ( deleteRequestObject . getRoutingTimeoutInMs ( ) ) ; boolean hasVersion = deleteRequestObject . getVersion ( ) == null ? false : true ; String keyHexString = "" ; if ( ! hasVersion ) { long startTimeInMs = System . currentTimeMillis ( ) ; if ( logger . isDebugEnabled ( ) ) { ByteArray key = ( ByteArray ) deleteRequestObject . getKey ( ) ; keyHexString = RestUtils . getKeyHexString ( key ) ; logger . debug ( "DELETE without version requested for key: " + keyHexString + " , for store: " + this . storeName + " at time(in ms): " + startTimeInMs + " . Nested GET and DELETE requests to follow ---" ) ; } deleteRequestObject . setResolveConflicts ( true ) ; versionedValues = getWithCustomTimeout ( deleteRequestObject ) ; Versioned < V > versioned = getItemOrThrow ( deleteRequestObject . getKey ( ) , null , versionedValues ) ; if ( versioned == null ) { return false ; } long timeLeft = deleteRequestObject . getRoutingTimeoutInMs ( ) - ( System . currentTimeMillis ( ) - startTimeInMs ) ; if ( timeLeft < 0 ) { throw new StoreTimeoutException ( "DELETE request timed out" ) ; } deleteRequestObject . setVersion ( versioned . getVersion ( ) ) ; deleteRequestObject . setRoutingTimeoutInMs ( timeLeft ) ; } long deleteVersionStartTimeInNs = System . currentTimeMillis ( ) ; if ( logger . isDebugEnabled ( ) ) { ByteArray key = ( ByteArray ) deleteRequestObject . getKey ( ) ; keyHexString = RestUtils . getKeyHexString ( key ) ; debugLogStart ( "DELETE" , deleteRequestObject . getRequestOriginTimeInMs ( ) , deleteVersionStartTimeInNs , keyHexString ) ; } boolean result = store . delete ( deleteRequestObject ) ; if ( logger . isDebugEnabled ( ) ) { debugLogEnd ( "DELETE" , deleteRequestObject . getRequestOriginTimeInMs ( ) , deleteVersionStartTimeInNs , System . currentTimeMillis ( ) , keyHexString , 0 ) ; } if ( ! hasVersion && logger . isDebugEnabled ( ) ) { logger . debug ( "DELETE without version response received for key: " + keyHexString + ", for store: " + this . storeName + " at time(in ms): " + System . currentTimeMillis ( ) ) ; } return result ; }
Performs a delete operation with the specified composite request object
24,219
private void debugLogStart ( String operationType , Long originTimeInMS , Long requestReceivedTimeInMs , String keyString ) { long durationInMs = requestReceivedTimeInMs - originTimeInMS ; logger . debug ( "Received a new request. Operation Type: " + operationType + " , key(s): " + keyString + " , Store: " + this . storeName + " , Origin time (in ms): " + originTimeInMS + " . Request received at time(in ms): " + requestReceivedTimeInMs + " , Duration from RESTClient to CoordinatorFatClient(in ms): " + durationInMs ) ; }
Traces the duration between origin time in the http Request and time just before being processed by the fat client
24,220
private void debugLogEnd ( String operationType , Long OriginTimeInMs , Long RequestStartTimeInMs , Long ResponseReceivedTimeInMs , String keyString , int numVectorClockEntries ) { long durationInMs = ResponseReceivedTimeInMs - RequestStartTimeInMs ; logger . debug ( "Received a response from voldemort server for Operation Type: " + operationType + " , For key(s): " + keyString + " , Store: " + this . storeName + " , Origin time of request (in ms): " + OriginTimeInMs + " , Response received at time (in ms): " + ResponseReceivedTimeInMs + " . Request sent at(in ms): " + RequestStartTimeInMs + " , Num vector clock entries: " + numVectorClockEntries + " , Duration from CoordinatorFatClient back to CoordinatorFatClient(in ms): " + durationInMs ) ; }
Traces the time taken just by the fat client inside Coordinator to process this request
24,221
public static Node updateNode ( Node node , List < Integer > partitionsList ) { return new Node ( node . getId ( ) , node . getHost ( ) , node . getHttpPort ( ) , node . getSocketPort ( ) , node . getAdminPort ( ) , node . getZoneId ( ) , partitionsList ) ; }
Creates a replica of the node with the new partitions list
24,222
public static Node addPartitionToNode ( final Node node , Integer donatedPartition ) { return UpdateClusterUtils . addPartitionsToNode ( node , Sets . newHashSet ( donatedPartition ) ) ; }
Add a partition to the node provided
24,223
public static Node removePartitionFromNode ( final Node node , Integer donatedPartition ) { return UpdateClusterUtils . removePartitionsFromNode ( node , Sets . newHashSet ( donatedPartition ) ) ; }
Remove a partition from the node provided
24,224
public static Node addPartitionsToNode ( final Node node , final Set < Integer > donatedPartitions ) { List < Integer > deepCopy = new ArrayList < Integer > ( node . getPartitionIds ( ) ) ; deepCopy . addAll ( donatedPartitions ) ; Collections . sort ( deepCopy ) ; return updateNode ( node , deepCopy ) ; }
Add the set of partitions to the node provided
24,225
public static Node removePartitionsFromNode ( final Node node , final Set < Integer > donatedPartitions ) { List < Integer > deepCopy = new ArrayList < Integer > ( node . getPartitionIds ( ) ) ; deepCopy . removeAll ( donatedPartitions ) ; return updateNode ( node , deepCopy ) ; }
Remove the set of partitions from the node provided
24,226
public static Cluster createUpdatedCluster ( Cluster currentCluster , int stealerNodeId , List < Integer > donatedPartitions ) { Cluster updatedCluster = Cluster . cloneCluster ( currentCluster ) ; for ( int donatedPartition : donatedPartitions ) { Node donorNode = updatedCluster . getNodeForPartitionId ( donatedPartition ) ; Node stealerNode = updatedCluster . getNodeById ( stealerNodeId ) ; if ( donorNode == stealerNode ) { continue ; } donorNode = removePartitionFromNode ( donorNode , donatedPartition ) ; stealerNode = addPartitionToNode ( stealerNode , donatedPartition ) ; updatedCluster = updateCluster ( updatedCluster , Lists . newArrayList ( donorNode , stealerNode ) ) ; } return updatedCluster ; }
Updates the existing cluster such that we remove partitions mentioned from the stealer node and add them to the donor node
24,227
public static void main ( String [ ] args ) { DirectoryIterator iter = new DirectoryIterator ( args ) ; while ( iter . hasNext ( ) ) System . out . println ( iter . next ( ) . getAbsolutePath ( ) ) ; }
Command line method to walk the directories provided on the command line and print out their contents
24,228
private void verifyClusterStoreDefinition ( ) { if ( SystemStoreConstants . isSystemStore ( storeDefinition . getName ( ) ) ) { return ; } Set < Integer > clusterZoneIds = cluster . getZoneIds ( ) ; if ( clusterZoneIds . size ( ) > 1 ) { Map < Integer , Integer > zoneRepFactor = storeDefinition . getZoneReplicationFactor ( ) ; Set < Integer > storeDefZoneIds = zoneRepFactor . keySet ( ) ; if ( ! clusterZoneIds . equals ( storeDefZoneIds ) ) { throw new VoldemortException ( "Zone IDs in cluster (" + clusterZoneIds + ") are incongruent with zone IDs in store defs (" + storeDefZoneIds + ")" ) ; } for ( int zoneId : clusterZoneIds ) { if ( zoneRepFactor . get ( zoneId ) > cluster . getNumberOfNodesInZone ( zoneId ) ) { throw new VoldemortException ( "Not enough nodes (" + cluster . getNumberOfNodesInZone ( zoneId ) + ") in zone with id " + zoneId + " for replication factor of " + zoneRepFactor . get ( zoneId ) + "." ) ; } } } else { if ( storeDefinition . getReplicationFactor ( ) > cluster . getNumberOfNodes ( ) ) { System . err . println ( storeDefinition ) ; System . err . println ( cluster ) ; throw new VoldemortException ( "Not enough nodes (" + cluster . getNumberOfNodes ( ) + ") for replication factor of " + storeDefinition . getReplicationFactor ( ) + "." ) ; } } }
Verify that cluster is congruent to store def wrt zones .
24,229
public Integer getNodesPartitionIdForKey ( int nodeId , final byte [ ] key ) { List < Integer > partitionIds = getReplicatingPartitionList ( key ) ; for ( Integer partitionId : partitionIds ) { if ( getNodeIdForPartitionId ( partitionId ) == nodeId ) { return partitionId ; } } return null ; }
Determines the partition ID that replicates the key on the given node .
24,230
private List < Integer > getNodeIdListForPartitionIdList ( List < Integer > partitionIds ) throws VoldemortException { List < Integer > nodeIds = new ArrayList < Integer > ( partitionIds . size ( ) ) ; for ( Integer partitionId : partitionIds ) { int nodeId = getNodeIdForPartitionId ( partitionId ) ; if ( nodeIds . contains ( nodeId ) ) { throw new VoldemortException ( "Node ID " + nodeId + " already in list of Node IDs." ) ; } else { nodeIds . add ( nodeId ) ; } } return nodeIds ; }
Converts from partitionId to nodeId . The list of partition IDs partitionIds is expected to be a replicating partition list i . e . the mapping from partition ID to node ID should be one to one .
24,231
public boolean checkKeyBelongsToNode ( byte [ ] key , int nodeId ) { List < Integer > nodePartitions = cluster . getNodeById ( nodeId ) . getPartitionIds ( ) ; List < Integer > replicatingPartitions = getReplicatingPartitionList ( key ) ; replicatingPartitions . retainAll ( nodePartitions ) ; return replicatingPartitions . size ( ) > 0 ; }
Determines if the key replicates to the given node
24,232
public static List < Integer > checkKeyBelongsToPartition ( byte [ ] key , Set < Pair < Integer , HashMap < Integer , List < Integer > > > > stealerNodeToMappingTuples , Cluster cluster , StoreDefinition storeDef ) { List < Integer > keyPartitions = new RoutingStrategyFactory ( ) . updateRoutingStrategy ( storeDef , cluster ) . getPartitionList ( key ) ; List < Integer > nodesToPush = Lists . newArrayList ( ) ; for ( Pair < Integer , HashMap < Integer , List < Integer > > > stealNodeToMap : stealerNodeToMappingTuples ) { List < Integer > nodePartitions = cluster . getNodeById ( stealNodeToMap . getFirst ( ) ) . getPartitionIds ( ) ; if ( StoreRoutingPlan . checkKeyBelongsToPartition ( keyPartitions , nodePartitions , stealNodeToMap . getSecond ( ) ) ) { nodesToPush . add ( stealNodeToMap . getFirst ( ) ) ; } } return nodesToPush ; }
Given a key and a list of steal infos give back a list of stealer node ids which will steal this .
24,233
public synchronized void maybeThrottle ( int eventsSeen ) { if ( maxRatePerSecond > 0 ) { long now = time . milliseconds ( ) ; try { rateSensor . record ( eventsSeen , now ) ; } catch ( QuotaViolationException e ) { double currentRate = e . getValue ( ) ; if ( currentRate > this . maxRatePerSecond ) { double excessRate = currentRate - this . maxRatePerSecond ; long sleepTimeMs = Math . round ( excessRate / this . maxRatePerSecond * voldemort . utils . Time . MS_PER_SECOND ) ; if ( logger . isDebugEnabled ( ) ) { logger . debug ( "Throttler quota exceeded:\n" + "eventsSeen \t= " + eventsSeen + " in this call of maybeThrotte(),\n" + "currentRate \t= " + currentRate + " events/sec,\n" + "maxRatePerSecond \t= " + this . maxRatePerSecond + " events/sec,\n" + "excessRate \t= " + excessRate + " events/sec,\n" + "sleeping for \t" + sleepTimeMs + " ms to compensate.\n" + "rateConfig.timeWindowMs() = " + rateConfig . timeWindowMs ( ) ) ; } if ( sleepTimeMs > rateConfig . timeWindowMs ( ) ) { logger . warn ( "Throttler sleep time (" + sleepTimeMs + " ms) exceeds " + "window size (" + rateConfig . timeWindowMs ( ) + " ms). This will likely " + "result in not being able to honor the rate limit accurately." ) ; } time . sleep ( sleepTimeMs ) ; } else if ( logger . isDebugEnabled ( ) ) { logger . debug ( "Weird. Got QuotaValidationException but measured rate not over rateLimit: " + "currentRate = " + currentRate + " , rateLimit = " + this . maxRatePerSecond ) ; } } } }
Sleeps if necessary to slow down the caller .
24,234
public List < NodeValue < K , V > > getRepairs ( List < NodeValue < K , V > > nodeValues ) { int size = nodeValues . size ( ) ; if ( size <= 1 ) return Collections . emptyList ( ) ; Map < K , List < NodeValue < K , V > > > keyToNodeValues = Maps . newHashMap ( ) ; for ( NodeValue < K , V > nodeValue : nodeValues ) { List < NodeValue < K , V > > keyNodeValues = keyToNodeValues . get ( nodeValue . getKey ( ) ) ; if ( keyNodeValues == null ) { keyNodeValues = Lists . newArrayListWithCapacity ( 5 ) ; keyToNodeValues . put ( nodeValue . getKey ( ) , keyNodeValues ) ; } keyNodeValues . add ( nodeValue ) ; } List < NodeValue < K , V > > result = Lists . newArrayList ( ) ; for ( List < NodeValue < K , V > > keyNodeValues : keyToNodeValues . values ( ) ) result . addAll ( singleKeyGetRepairs ( keyNodeValues ) ) ; return result ; }
Compute the repair set from the given values and nodes
24,235
public static List < Versioned < byte [ ] > > resolveVersions ( List < Versioned < byte [ ] > > values ) { List < Versioned < byte [ ] > > resolvedVersions = new ArrayList < Versioned < byte [ ] > > ( values . size ( ) ) ; for ( Versioned < byte [ ] > value : values ) { Iterator < Versioned < byte [ ] > > iter = resolvedVersions . iterator ( ) ; boolean obsolete = false ; while ( iter . hasNext ( ) ) { Versioned < byte [ ] > curr = iter . next ( ) ; Occurred occurred = value . getVersion ( ) . compare ( curr . getVersion ( ) ) ; if ( occurred == Occurred . BEFORE ) { obsolete = true ; break ; } else if ( occurred == Occurred . AFTER ) { iter . remove ( ) ; } } if ( ! obsolete ) { resolvedVersions . add ( value ) ; } } return resolvedVersions ; }
Given a set of versions constructs a resolved list of versions based on the compare function above
24,236
public static VectorClock makeClock ( Set < Integer > serverIds , long clockValue , long timestamp ) { List < ClockEntry > clockEntries = new ArrayList < ClockEntry > ( serverIds . size ( ) ) ; for ( Integer serverId : serverIds ) { clockEntries . add ( new ClockEntry ( serverId . shortValue ( ) , clockValue ) ) ; } return new VectorClock ( clockEntries , timestamp ) ; }
Generates a vector clock with the provided values
24,237
private String swapStore ( String storeName , String directory ) throws VoldemortException { ReadOnlyStorageEngine store = getReadOnlyStorageEngine ( metadataStore , storeRepository , storeName ) ; if ( ! Utils . isReadableDir ( directory ) ) throw new VoldemortException ( "Store directory '" + directory + "' is not a readable directory." ) ; String currentDirPath = store . getCurrentDirPath ( ) ; logger . info ( "Swapping RO store '" + storeName + "' to version directory '" + directory + "'" ) ; store . swapFiles ( directory ) ; logger . info ( "Swapping swapped RO store '" + storeName + "' to version directory '" + directory + "'" ) ; return currentDirPath ; }
Given a read - only store name and a directory swaps it in while returning the directory path being swapped out
24,238
public boolean isCompleteRequest ( ByteBuffer buffer ) { DataInputStream inputStream = new DataInputStream ( new ByteBufferBackedInputStream ( buffer ) ) ; try { int dataSize = inputStream . readInt ( ) ; if ( logger . isTraceEnabled ( ) ) logger . trace ( "In isCompleteRequest, dataSize: " + dataSize + ", buffer position: " + buffer . position ( ) ) ; if ( dataSize == - 1 ) return true ; buffer . position ( buffer . position ( ) + dataSize ) ; return true ; } catch ( Exception e ) { if ( logger . isTraceEnabled ( ) ) logger . trace ( "In isCompleteRequest, probable partial read occurred: " + e ) ; return false ; } }
This method is used by non - blocking code to determine if the give buffer represents a complete request . Because the non - blocking code can by definition not just block waiting for more data it s possible to get partial reads and this identifies that case .
24,239
public synchronized void unregisterJmxIfRequired ( ) { referenceCount -- ; if ( isRegistered == true && referenceCount <= 0 ) { JmxUtils . unregisterMbean ( this . jmxObjectName ) ; isRegistered = false ; } }
Last caller of this method will unregister the Mbean . All callers decrement the counter .
24,240
public static String toBinaryString ( byte [ ] bytes ) { StringBuilder buffer = new StringBuilder ( ) ; for ( byte b : bytes ) { String bin = Integer . toBinaryString ( 0xFF & b ) ; bin = bin . substring ( 0 , Math . min ( bin . length ( ) , 8 ) ) ; for ( int j = 0 ; j < 8 - bin . length ( ) ; j ++ ) { buffer . append ( '0' ) ; } buffer . append ( bin ) ; } return buffer . toString ( ) ; }
Translate the given byte array into a string of 1s and 0s
24,241
public static byte [ ] copy ( byte [ ] array , int from , int to ) { if ( to - from < 0 ) { return new byte [ 0 ] ; } else { byte [ ] a = new byte [ to - from ] ; System . arraycopy ( array , from , a , 0 , to - from ) ; return a ; } }
Copy the specified bytes into a new array
24,242
public static int readInt ( byte [ ] bytes , int offset ) { return ( ( ( bytes [ offset + 0 ] & 0xff ) << 24 ) | ( ( bytes [ offset + 1 ] & 0xff ) << 16 ) | ( ( bytes [ offset + 2 ] & 0xff ) << 8 ) | ( bytes [ offset + 3 ] & 0xff ) ) ; }
Read an int from the byte array starting at the given offset
24,243
public static long readUnsignedInt ( byte [ ] bytes , int offset ) { return ( ( ( bytes [ offset + 0 ] & 0xffL ) << 24 ) | ( ( bytes [ offset + 1 ] & 0xffL ) << 16 ) | ( ( bytes [ offset + 2 ] & 0xffL ) << 8 ) | ( bytes [ offset + 3 ] & 0xffL ) ) ; }
Read an unsigned integer from the given byte array
24,244
public static long readBytes ( byte [ ] bytes , int offset , int numBytes ) { int shift = 0 ; long value = 0 ; for ( int i = offset + numBytes - 1 ; i >= offset ; i -- ) { value |= ( bytes [ i ] & 0xFFL ) << shift ; shift += 8 ; } return value ; }
Read the given number of bytes into a long
24,245
public static void writeShort ( byte [ ] bytes , short value , int offset ) { bytes [ offset ] = ( byte ) ( 0xFF & ( value >> 8 ) ) ; bytes [ offset + 1 ] = ( byte ) ( 0xFF & value ) ; }
Write a short to the byte array starting at the given offset
24,246
public static void writeUnsignedShort ( byte [ ] bytes , int value , int offset ) { bytes [ offset ] = ( byte ) ( 0xFF & ( value >> 8 ) ) ; bytes [ offset + 1 ] = ( byte ) ( 0xFF & value ) ; }
Write an unsigned short to the byte array starting at the given offset
24,247
public static void writeInt ( byte [ ] bytes , int value , int offset ) { bytes [ offset ] = ( byte ) ( 0xFF & ( value >> 24 ) ) ; bytes [ offset + 1 ] = ( byte ) ( 0xFF & ( value >> 16 ) ) ; bytes [ offset + 2 ] = ( byte ) ( 0xFF & ( value >> 8 ) ) ; bytes [ offset + 3 ] = ( byte ) ( 0xFF & value ) ; }
Write an int to the byte array starting at the given offset
24,248
public static void writeBytes ( byte [ ] bytes , long value , int offset , int numBytes ) { int shift = 0 ; for ( int i = offset + numBytes - 1 ; i >= offset ; i -- ) { bytes [ i ] = ( byte ) ( 0xFF & ( value >> shift ) ) ; shift += 8 ; } }
Write the given number of bytes out to the array
24,249
public static byte numberOfBytesRequired ( long number ) { if ( number < 0 ) number = - number ; for ( byte i = 1 ; i <= SIZE_OF_LONG ; i ++ ) if ( number < ( 1L << ( 8 * i ) ) ) return i ; throw new IllegalStateException ( "Should never happen." ) ; }
The number of bytes required to hold the given number
24,250
public static void read ( InputStream stream , byte [ ] buffer ) throws IOException { int read = 0 ; while ( read < buffer . length ) { int newlyRead = stream . read ( buffer , read , buffer . length - read ) ; if ( newlyRead == - 1 ) throw new EOFException ( "Attempt to read " + buffer . length + " bytes failed due to EOF." ) ; read += newlyRead ; } }
Read exactly buffer . length bytes from the stream into the buffer
24,251
public static byte [ ] getBytes ( String string , String encoding ) { try { return string . getBytes ( encoding ) ; } catch ( UnsupportedEncodingException e ) { throw new IllegalArgumentException ( encoding + " is not a known encoding name." , e ) ; } }
Translate the string to bytes using the given encoding
24,252
public static String getString ( byte [ ] bytes , String encoding ) { try { return new String ( bytes , encoding ) ; } catch ( UnsupportedEncodingException e ) { throw new IllegalArgumentException ( encoding + " is not a known encoding name." , e ) ; } }
Create a string from bytes using the given encoding
24,253
public void addRequest ( long timeNS , long numEmptyResponses , long valueBytes , long keyBytes , long getAllAggregatedCount ) { long startTimeNs = 0 ; if ( logger . isTraceEnabled ( ) ) { startTimeNs = System . nanoTime ( ) ; } long currentTime = time . milliseconds ( ) ; timeSensor . record ( ( double ) timeNS / voldemort . utils . Time . NS_PER_MS , currentTime ) ; emptyResponseKeysSensor . record ( numEmptyResponses , currentTime ) ; valueBytesSensor . record ( valueBytes , currentTime ) ; keyBytesSensor . record ( keyBytes , currentTime ) ; getAllKeysCountSensor . record ( getAllAggregatedCount , currentTime ) ; if ( logger . isTraceEnabled ( ) ) { logger . trace ( "addRequest took " + ( System . nanoTime ( ) - startTimeNs ) + " ns." ) ; } }
Detailed request to track additional data about PUT GET and GET_ALL
24,254
public void addEvent ( Event event ) { if ( event == null ) throw new IllegalStateException ( "event must be non-null" ) ; if ( logger . isTraceEnabled ( ) ) logger . trace ( "Adding event " + event ) ; eventQueue . add ( event ) ; }
Add an event to the queue . It will be processed in the order received .
24,255
public void execute ( ) { try { while ( true ) { Event event = null ; try { event = eventQueue . poll ( timeout , unit ) ; } catch ( InterruptedException e ) { throw new InsufficientOperationalNodesException ( operation . getSimpleName ( ) + " operation interrupted!" , e ) ; } if ( event == null ) throw new VoldemortException ( operation . getSimpleName ( ) + " returned a null event" ) ; if ( event . equals ( Event . ERROR ) ) { if ( logger . isTraceEnabled ( ) ) logger . trace ( operation . getSimpleName ( ) + " request, events complete due to error" ) ; break ; } else if ( event . equals ( Event . COMPLETED ) ) { if ( logger . isTraceEnabled ( ) ) logger . trace ( operation . getSimpleName ( ) + " request, events complete" ) ; break ; } Action action = eventActions . get ( event ) ; if ( action == null ) throw new IllegalStateException ( "action was null for event " + event ) ; if ( logger . isTraceEnabled ( ) ) logger . trace ( operation . getSimpleName ( ) + " request, action " + action . getClass ( ) . getSimpleName ( ) + " to handle " + event + " event" ) ; action . execute ( this ) ; } } finally { finished = true ; } }
Process events in the order as they were received .
24,256
public static ModelMBean createModelMBean ( Object o ) { try { ModelMBean mbean = new RequiredModelMBean ( ) ; JmxManaged annotation = o . getClass ( ) . getAnnotation ( JmxManaged . class ) ; String description = annotation == null ? "" : annotation . description ( ) ; ModelMBeanInfo info = new ModelMBeanInfoSupport ( o . getClass ( ) . getName ( ) , description , extractAttributeInfo ( o ) , new ModelMBeanConstructorInfo [ 0 ] , extractOperationInfo ( o ) , new ModelMBeanNotificationInfo [ 0 ] ) ; mbean . setModelMBeanInfo ( info ) ; mbean . setManagedResource ( o , "ObjectReference" ) ; return mbean ; } catch ( MBeanException e ) { throw new VoldemortException ( e ) ; } catch ( InvalidTargetObjectTypeException e ) { throw new VoldemortException ( e ) ; } catch ( InstanceNotFoundException e ) { throw new VoldemortException ( e ) ; } }
Create a model mbean from an object using the description given in the Jmx annotation if present . Only operations are supported so far no attributes constructors or notifications
24,257
public static ModelMBeanOperationInfo [ ] extractOperationInfo ( Object object ) { ArrayList < ModelMBeanOperationInfo > infos = new ArrayList < ModelMBeanOperationInfo > ( ) ; for ( Method m : object . getClass ( ) . getMethods ( ) ) { JmxOperation jmxOperation = m . getAnnotation ( JmxOperation . class ) ; JmxGetter jmxGetter = m . getAnnotation ( JmxGetter . class ) ; JmxSetter jmxSetter = m . getAnnotation ( JmxSetter . class ) ; if ( jmxOperation != null || jmxGetter != null || jmxSetter != null ) { String description = "" ; int visibility = 1 ; int impact = MBeanOperationInfo . UNKNOWN ; if ( jmxOperation != null ) { description = jmxOperation . description ( ) ; impact = jmxOperation . impact ( ) ; } else if ( jmxGetter != null ) { description = jmxGetter . description ( ) ; impact = MBeanOperationInfo . INFO ; visibility = 4 ; } else if ( jmxSetter != null ) { description = jmxSetter . description ( ) ; impact = MBeanOperationInfo . ACTION ; visibility = 4 ; } ModelMBeanOperationInfo info = new ModelMBeanOperationInfo ( m . getName ( ) , description , extractParameterInfo ( m ) , m . getReturnType ( ) . getName ( ) , impact ) ; info . getDescriptor ( ) . setField ( "visibility" , Integer . toString ( visibility ) ) ; infos . add ( info ) ; } } return infos . toArray ( new ModelMBeanOperationInfo [ infos . size ( ) ] ) ; }
Extract all operations and attributes from the given object that have been annotated with the Jmx annotation . Operations are all methods that are marked with the JmxOperation annotation .
24,258
public static MBeanParameterInfo [ ] extractParameterInfo ( Method m ) { Class < ? > [ ] types = m . getParameterTypes ( ) ; Annotation [ ] [ ] annotations = m . getParameterAnnotations ( ) ; MBeanParameterInfo [ ] params = new MBeanParameterInfo [ types . length ] ; for ( int i = 0 ; i < params . length ; i ++ ) { boolean hasAnnotation = false ; for ( int j = 0 ; j < annotations [ i ] . length ; j ++ ) { if ( annotations [ i ] [ j ] instanceof JmxParam ) { JmxParam param = ( JmxParam ) annotations [ i ] [ j ] ; params [ i ] = new MBeanParameterInfo ( param . name ( ) , types [ i ] . getName ( ) , param . description ( ) ) ; hasAnnotation = true ; break ; } } if ( ! hasAnnotation ) { params [ i ] = new MBeanParameterInfo ( "" , types [ i ] . getName ( ) , "" ) ; } } return params ; }
Extract the parameters from a method using the Jmx annotation if present or just the raw types otherwise
24,259
public static ObjectName createObjectName ( String domain , String type ) { try { return new ObjectName ( domain + ":type=" + type ) ; } catch ( MalformedObjectNameException e ) { throw new VoldemortException ( e ) ; } }
Create a JMX ObjectName
24,260
public static String getClassName ( Class < ? > c ) { String name = c . getName ( ) ; return name . substring ( name . lastIndexOf ( '.' ) + 1 , name . length ( ) ) ; }
Get the class name without the package
24,261
public static void registerMbean ( Object mbean , ObjectName name ) { registerMbean ( ManagementFactory . getPlatformMBeanServer ( ) , JmxUtils . createModelMBean ( mbean ) , name ) ; }
Register the given mbean with the platform mbean server
24,262
public static ObjectName registerMbean ( String typeName , Object obj ) { MBeanServer server = ManagementFactory . getPlatformMBeanServer ( ) ; ObjectName name = JmxUtils . createObjectName ( JmxUtils . getPackageName ( obj . getClass ( ) ) , typeName ) ; registerMbean ( server , JmxUtils . createModelMBean ( obj ) , name ) ; return name ; }
Register the given object under the package name of the object s class with the given type name .
24,263
public static void registerMbean ( MBeanServer server , ModelMBean mbean , ObjectName name ) { try { synchronized ( LOCK ) { if ( server . isRegistered ( name ) ) JmxUtils . unregisterMbean ( server , name ) ; server . registerMBean ( mbean , name ) ; } } catch ( Exception e ) { logger . error ( "Error registering mbean:" , e ) ; } }
Register the given mbean with the server
24,264
public static void unregisterMbean ( MBeanServer server , ObjectName name ) { try { server . unregisterMBean ( name ) ; } catch ( Exception e ) { logger . error ( "Error unregistering mbean" , e ) ; } }
Unregister the mbean with the given name
24,265
public static void unregisterMbean ( ObjectName name ) { try { ManagementFactory . getPlatformMBeanServer ( ) . unregisterMBean ( name ) ; } catch ( Exception e ) { logger . error ( "Error unregistering mbean" , e ) ; } }
Unregister the mbean with the given name from the platform mbean server
24,266
public static boolean isFormatCorrect ( String fileName , ReadOnlyStorageFormat format ) { switch ( format ) { case READONLY_V0 : case READONLY_V1 : if ( fileName . matches ( "^[\\d]+_[\\d]+\\.(data|index)" ) ) { return true ; } else { return false ; } case READONLY_V2 : if ( fileName . matches ( "^[\\d]+_[\\d]+_[\\d]+\\.(data|index)" ) ) { return true ; } else { return false ; } default : throw new VoldemortException ( "Format type not supported" ) ; } }
Given a file name and read - only storage format tells whether the file name format is correct
24,267
public static int getChunkId ( String fileName ) { Pattern pattern = Pattern . compile ( "_[\\d]+\\." ) ; Matcher matcher = pattern . matcher ( fileName ) ; if ( matcher . find ( ) ) { return new Integer ( fileName . substring ( matcher . start ( ) + 1 , matcher . end ( ) - 1 ) ) ; } else { throw new VoldemortException ( "Could not extract out chunk id from " + fileName ) ; } }
Returns the chunk id for the file name
24,268
public static File getCurrentVersion ( File storeDirectory ) { File latestDir = getLatestDir ( storeDirectory ) ; if ( latestDir != null ) return latestDir ; File [ ] versionDirs = getVersionDirs ( storeDirectory ) ; if ( versionDirs == null || versionDirs . length == 0 ) { return null ; } else { return findKthVersionedDir ( versionDirs , versionDirs . length - 1 , versionDirs . length - 1 ) [ 0 ] ; } }
Retrieve the dir pointed to by latest symbolic - link or the current version dir
24,269
public static boolean checkVersionDirName ( File versionDir ) { return ( versionDir . isDirectory ( ) && versionDir . getName ( ) . contains ( "version-" ) && ! versionDir . getName ( ) . endsWith ( ".bak" ) ) ; }
Checks if the name of the file follows the version - n format
24,270
private static long getVersionId ( String versionDir ) { try { return Long . parseLong ( versionDir . replace ( "version-" , "" ) ) ; } catch ( NumberFormatException e ) { logger . trace ( "Cannot parse version directory to obtain id " + versionDir ) ; return - 1 ; } }
Extracts the version id from a string
24,271
public static File [ ] getVersionDirs ( File rootDir , final long minId , final long maxId ) { return rootDir . listFiles ( new FileFilter ( ) { public boolean accept ( File pathName ) { if ( checkVersionDirName ( pathName ) ) { long versionId = getVersionId ( pathName ) ; if ( versionId != - 1 && versionId <= maxId && versionId >= minId ) { return true ; } } return false ; } } ) ; }
Returns all the version directories present in the root directory specified
24,272
protected void stopInner ( ) throws VoldemortException { List < VoldemortException > exceptions = new ArrayList < VoldemortException > ( ) ; logger . info ( "Stopping services:" + getIdentityNode ( ) . getId ( ) ) ; exceptions . addAll ( stopOnlineServices ( ) ) ; for ( VoldemortService service : Utils . reversed ( basicServices ) ) { try { service . stop ( ) ; } catch ( VoldemortException e ) { exceptions . add ( e ) ; logger . error ( e ) ; } } logger . info ( "All services stopped for Node:" + getIdentityNode ( ) . getId ( ) ) ; if ( exceptions . size ( ) > 0 ) throw exceptions . get ( 0 ) ; JNAUtils . tryMunlockall ( ) ; }
Attempt to shutdown the server . As much shutdown as possible will be completed even if intermediate errors are encountered .
24,273
private int getReplicaTypeForPartition ( int partitionId ) { List < Integer > routingPartitionList = routingStrategy . getReplicatingPartitionList ( partitionId ) ; int correctReplicaType = - 1 ; for ( int replica = 0 ; replica < routingPartitionList . size ( ) ; replica ++ ) { if ( nodePartitionIds . contains ( routingPartitionList . get ( replica ) ) ) { correctReplicaType = replica ; break ; } } return correctReplicaType ; }
Given a partition ID determine which replica of this partition is hosted by the current node if any .
24,274
private void renameReadOnlyV2Files ( int masterPartitionId , int correctReplicaType ) { for ( int replica = 0 ; replica < routingStrategy . getNumReplicas ( ) ; replica ++ ) { if ( replica != correctReplicaType ) { int chunkId = 0 ; while ( true ) { String fileName = Integer . toString ( masterPartitionId ) + "_" + Integer . toString ( replica ) + "_" + Integer . toString ( chunkId ) ; File index = getIndexFile ( fileName ) ; File data = getDataFile ( fileName ) ; if ( index . exists ( ) && data . exists ( ) ) { String correctFileName = Integer . toString ( masterPartitionId ) + "_" + Integer . toString ( correctReplicaType ) + "_" + Integer . toString ( chunkId ) ; File indexWithCorrectReplicaType = getIndexFile ( correctFileName ) ; File dataWithCorrectReplicaType = getDataFile ( correctFileName ) ; Utils . move ( index , indexWithCorrectReplicaType ) ; Utils . move ( data , dataWithCorrectReplicaType ) ; logger . info ( "Renamed files with wrong replica type: " + index . getAbsolutePath ( ) + "|data -> " + indexWithCorrectReplicaType . getName ( ) + "|data" ) ; } else if ( index . exists ( ) ^ data . exists ( ) ) { throw new VoldemortException ( "One of the following does not exist: " + index . toString ( ) + " or " + data . toString ( ) + "." ) ; } else { break ; } chunkId ++ ; } } } }
This function looks for files with the wrong replica type in their name and if it finds any renames them .
24,275
public byte [ ] keyToStorageFormat ( byte [ ] key ) { switch ( getReadOnlyStorageFormat ( ) ) { case READONLY_V0 : case READONLY_V1 : return ByteUtils . md5 ( key ) ; case READONLY_V2 : return ByteUtils . copy ( ByteUtils . md5 ( key ) , 0 , 2 * ByteUtils . SIZE_OF_INT ) ; default : throw new VoldemortException ( "Unknown read-only storage format" ) ; } }
Converts the key to the format in which it is stored for searching
24,276
public int getChunkForKey ( byte [ ] key ) throws IllegalStateException { if ( numChunks == 0 ) { throw new IllegalStateException ( "The ChunkedFileSet is closed." ) ; } switch ( storageFormat ) { case READONLY_V0 : { return ReadOnlyUtils . chunk ( ByteUtils . md5 ( key ) , numChunks ) ; } case READONLY_V1 : { if ( nodePartitionIds == null ) { throw new IllegalStateException ( "nodePartitionIds is null." ) ; } List < Integer > routingPartitionList = routingStrategy . getPartitionList ( key ) ; routingPartitionList . retainAll ( nodePartitionIds ) ; if ( routingPartitionList . size ( ) != 1 ) { throw new IllegalStateException ( "The key does not belong on this node." ) ; } return chunkIdToChunkStart . get ( routingPartitionList . get ( 0 ) ) + ReadOnlyUtils . chunk ( ByteUtils . md5 ( key ) , chunkIdToNumChunks . get ( routingPartitionList . get ( 0 ) ) ) ; } case READONLY_V2 : { List < Integer > routingPartitionList = routingStrategy . getPartitionList ( key ) ; Pair < Integer , Integer > bucket = null ; for ( int replicaType = 0 ; replicaType < routingPartitionList . size ( ) ; replicaType ++ ) { if ( nodePartitionIds == null ) { throw new IllegalStateException ( "nodePartitionIds is null." ) ; } if ( nodePartitionIds . contains ( routingPartitionList . get ( replicaType ) ) ) { if ( bucket == null ) { bucket = Pair . create ( routingPartitionList . get ( 0 ) , replicaType ) ; } else { throw new IllegalStateException ( "Found more than one replica for a given partition on the current node!" ) ; } } } if ( bucket == null ) { throw new IllegalStateException ( "The key does not belong on this node." ) ; } Integer chunkStart = chunkIdToChunkStart . get ( bucket ) ; if ( chunkStart == null ) { throw new IllegalStateException ( "chunkStart is null." ) ; } return chunkStart + ReadOnlyUtils . chunk ( ByteUtils . md5 ( key ) , chunkIdToNumChunks . get ( bucket ) ) ; } default : { throw new IllegalStateException ( "Unsupported storageFormat: " + storageFormat ) ; } } }
Given a particular key first converts its to the storage format and then determines which chunk it belongs to
24,277
private static List < String > parseAndCompare ( List < String > fileNames , int masterPartitionId ) { List < String > sourceFileNames = new ArrayList < String > ( ) ; for ( String fileName : fileNames ) { String [ ] partitionIdReplicaChunk = fileName . split ( SPLIT_LITERAL ) ; if ( Integer . parseInt ( partitionIdReplicaChunk [ 0 ] ) == masterPartitionId ) { sourceFileNames . add ( fileName ) ; } } return sourceFileNames ; }
This method take a list of fileName of the type partitionId_Replica_Chunk and returns file names that match the regular expression masterPartitionId_
24,278
@ JmxGetter ( name = "getChunkIdToNumChunks" , description = "Returns a string representation of the map of chunk id to number of chunks" ) public String getChunkIdToNumChunks ( ) { StringBuilder builder = new StringBuilder ( ) ; for ( Entry < Object , Integer > entry : fileSet . getChunkIdToNumChunks ( ) . entrySet ( ) ) { builder . append ( entry . getKey ( ) . toString ( ) + " - " + entry . getValue ( ) . toString ( ) + ", " ) ; } return builder . toString ( ) ; }
Returns a string representation of map of chunk id to number of chunks
24,279
public void open ( File versionDir ) { fileModificationLock . writeLock ( ) . lock ( ) ; try { if ( isOpen ) throw new IllegalStateException ( "Attempt to open already open store." ) ; if ( versionDir == null ) { versionDir = ReadOnlyUtils . getCurrentVersion ( storeDir ) ; if ( versionDir == null ) versionDir = new File ( storeDir , "version-0" ) ; } long versionId = ReadOnlyUtils . getVersionId ( versionDir ) ; if ( versionId == - 1 ) { throw new VoldemortException ( "Unable to parse id from version directory " + versionDir . getAbsolutePath ( ) ) ; } Utils . mkdirs ( versionDir ) ; Utils . symlink ( versionDir . getAbsolutePath ( ) , storeDir . getAbsolutePath ( ) + File . separator + "latest" ) ; this . fileSet = new ChunkedFileSet ( versionDir , routingStrategy , nodeId , maxValueBufferAllocationSize ) ; storeVersionManager . syncInternalStateFromFileSystem ( false ) ; this . lastSwapped = System . currentTimeMillis ( ) ; this . isOpen = true ; } catch ( IOException e ) { logger . error ( "Error in opening store" , e ) ; } finally { fileModificationLock . writeLock ( ) . unlock ( ) ; } }
Open the store with the version directory specified . If null is specified we open the directory with the maximum version
24,280
@ JmxGetter ( name = "lastSwapped" , description = "Time in milliseconds since the store was swapped" ) public long getLastSwapped ( ) { long timeSinceLastSwap = System . currentTimeMillis ( ) - lastSwapped ; return timeSinceLastSwap > 0 ? timeSinceLastSwap : 0 ; }
Time since last time the store was swapped
24,281
public void close ( ) throws VoldemortException { logger . debug ( "Close called for read-only store." ) ; this . fileModificationLock . writeLock ( ) . lock ( ) ; try { if ( isOpen ) { this . isOpen = false ; fileSet . close ( ) ; } else { logger . debug ( "Attempt to close already closed store " + getName ( ) ) ; } } finally { this . fileModificationLock . writeLock ( ) . unlock ( ) ; } }
Close the store .
24,282
@ JmxOperation ( description = "swapFiles changes this store to use the new data directory" ) public void swapFiles ( String newStoreDirectory ) { logger . info ( "Swapping files for store '" + getName ( ) + "' to " + newStoreDirectory ) ; File newVersionDir = new File ( newStoreDirectory ) ; if ( ! newVersionDir . exists ( ) ) throw new VoldemortException ( "File " + newVersionDir . getAbsolutePath ( ) + " does not exist." ) ; if ( ! ( newVersionDir . getParentFile ( ) . compareTo ( storeDir . getAbsoluteFile ( ) ) == 0 && ReadOnlyUtils . checkVersionDirName ( newVersionDir ) ) ) throw new VoldemortException ( "Invalid version folder name '" + newVersionDir + "'. Either parent directory is incorrect or format(version-n) is incorrect" ) ; File previousVersionDir = ReadOnlyUtils . getCurrentVersion ( storeDir ) ; if ( previousVersionDir == null ) throw new VoldemortException ( "Could not find any latest directory to swap with in store '" + getName ( ) + "'" ) ; long newVersionId = ReadOnlyUtils . getVersionId ( newVersionDir ) ; long previousVersionId = ReadOnlyUtils . getVersionId ( previousVersionDir ) ; if ( newVersionId == - 1 || previousVersionId == - 1 ) throw new VoldemortException ( "Unable to parse folder names (" + newVersionDir . getName ( ) + "," + previousVersionDir . getName ( ) + ") since format(version-n) is incorrect" ) ; if ( previousVersionId > newVersionId ) { logger . info ( "No swap required since current latest version " + previousVersionId + " is greater than swap version " + newVersionId ) ; deleteBackups ( ) ; return ; } logger . info ( "Acquiring write lock on '" + getName ( ) + "':" ) ; fileModificationLock . writeLock ( ) . lock ( ) ; boolean success = false ; try { close ( ) ; logger . info ( "Opening primary files for store '" + getName ( ) + "' at " + newStoreDirectory ) ; open ( newVersionDir ) ; success = true ; } finally { try { if ( ! success ) rollback ( previousVersionDir ) ; } finally { fileModificationLock . writeLock ( ) . unlock ( ) ; if ( success ) logger . info ( "Swap operation completed successfully on store " + getName ( ) + ", releasing lock." ) ; else logger . error ( "Swap operation failed." ) ; } } deleteBackups ( ) ; }
Swap the current version folder for a new one
24,283
private void deleteBackups ( ) { File [ ] storeDirList = ReadOnlyUtils . getVersionDirs ( storeDir , 0L , getCurrentVersionId ( ) ) ; if ( storeDirList != null && storeDirList . length > ( numBackups + 1 ) ) { File [ ] extraBackups = ReadOnlyUtils . findKthVersionedDir ( storeDirList , 0 , storeDirList . length - ( numBackups + 1 ) - 1 ) ; if ( extraBackups != null ) { for ( File backUpFile : extraBackups ) { deleteAsync ( backUpFile ) ; } } } }
Delete all backups asynchronously
24,284
private void deleteAsync ( final File file ) { new Thread ( new Runnable ( ) { public void run ( ) { try { try { logger . info ( "Waiting for " + deleteBackupMs + " milliseconds before deleting " + file . getAbsolutePath ( ) ) ; Thread . sleep ( deleteBackupMs ) ; } catch ( InterruptedException e ) { logger . warn ( "Did not sleep enough before deleting backups" ) ; } logger . info ( "Deleting file " + file . getAbsolutePath ( ) ) ; Utils . rm ( file ) ; logger . info ( "Deleting of " + file . getAbsolutePath ( ) + " completed successfully." ) ; storeVersionManager . syncInternalStateFromFileSystem ( true ) ; } catch ( Exception e ) { logger . error ( "Exception during deleteAsync for path: " + file , e ) ; } } } , "background-file-delete" ) . start ( ) ; }
Delete the given file in a separate thread
24,285
public void rollback ( File rollbackToDir ) { logger . info ( "Rolling back store '" + getName ( ) + "'" ) ; fileModificationLock . writeLock ( ) . lock ( ) ; try { if ( rollbackToDir == null ) throw new VoldemortException ( "Version directory specified to rollback is null" ) ; if ( ! rollbackToDir . exists ( ) ) throw new VoldemortException ( "Version directory " + rollbackToDir . getAbsolutePath ( ) + " specified to rollback does not exist" ) ; long versionId = ReadOnlyUtils . getVersionId ( rollbackToDir ) ; if ( versionId == - 1 ) throw new VoldemortException ( "Cannot parse version id" ) ; File [ ] backUpDirs = ReadOnlyUtils . getVersionDirs ( storeDir , versionId , Long . MAX_VALUE ) ; if ( backUpDirs == null || backUpDirs . length <= 1 ) { logger . warn ( "No rollback performed since there are no back-up directories" ) ; return ; } backUpDirs = ReadOnlyUtils . findKthVersionedDir ( backUpDirs , 0 , backUpDirs . length - 1 ) ; if ( isOpen ) close ( ) ; open ( rollbackToDir ) ; DateFormat df = new SimpleDateFormat ( "MM-dd-yyyy" ) ; for ( int index = 1 ; index < backUpDirs . length ; index ++ ) { Utils . move ( backUpDirs [ index ] , new File ( storeDir , backUpDirs [ index ] . getName ( ) + "." + df . format ( new Date ( ) ) + ".bak" ) ) ; } } finally { fileModificationLock . writeLock ( ) . unlock ( ) ; logger . info ( "Rollback operation completed on '" + getName ( ) + "', releasing lock." ) ; } }
Rollback to the specified push version
24,286
protected boolean hasTimeOutHeader ( ) { boolean result = false ; String timeoutValStr = this . request . getHeader ( RestMessageHeaders . X_VOLD_REQUEST_TIMEOUT_MS ) ; if ( timeoutValStr != null ) { try { this . parsedTimeoutInMs = Long . parseLong ( timeoutValStr ) ; if ( this . parsedTimeoutInMs < 0 ) { RestErrorHandler . writeErrorResponse ( messageEvent , HttpResponseStatus . BAD_REQUEST , "Time out cannot be negative " ) ; } else { result = true ; } } catch ( NumberFormatException nfe ) { logger . error ( "Exception when validating request. Incorrect timeout parameter. Cannot parse this to long: " + timeoutValStr , nfe ) ; RestErrorHandler . writeErrorResponse ( this . messageEvent , HttpResponseStatus . BAD_REQUEST , "Incorrect timeout parameter. Cannot parse this to long: " + timeoutValStr ) ; } } else { logger . error ( "Error when validating request. Missing timeout parameter." ) ; RestErrorHandler . writeErrorResponse ( this . messageEvent , HttpResponseStatus . BAD_REQUEST , "Missing timeout parameter." ) ; } return result ; }
Retrieve and validate the timeout value from the REST request . X_VOLD_REQUEST_TIMEOUT_MS is the timeout header .
24,287
protected void parseRoutingCodeHeader ( ) { String rtCode = this . request . getHeader ( RestMessageHeaders . X_VOLD_ROUTING_TYPE_CODE ) ; if ( rtCode != null ) { try { int routingTypeCode = Integer . parseInt ( rtCode ) ; this . parsedRoutingType = RequestRoutingType . getRequestRoutingType ( routingTypeCode ) ; } catch ( NumberFormatException nfe ) { logger . error ( "Exception when validating request. Incorrect routing type parameter. Cannot parse this to long: " + rtCode , nfe ) ; RestErrorHandler . writeErrorResponse ( this . messageEvent , HttpResponseStatus . BAD_REQUEST , "Incorrect routing type parameter. Cannot parse this to long: " + rtCode ) ; } catch ( VoldemortException ve ) { logger . error ( "Exception when validating request. Incorrect routing type code: " + rtCode , ve ) ; RestErrorHandler . writeErrorResponse ( this . messageEvent , HttpResponseStatus . BAD_REQUEST , "Incorrect routing type code: " + rtCode ) ; } } }
Retrieve the routing type value from the REST request . X_VOLD_ROUTING_TYPE_CODE is the routing type header .
24,288
protected boolean hasTimeStampHeader ( ) { String originTime = request . getHeader ( RestMessageHeaders . X_VOLD_REQUEST_ORIGIN_TIME_MS ) ; boolean result = false ; if ( originTime != null ) { try { this . parsedRequestOriginTimeInMs = System . currentTimeMillis ( ) ; if ( this . parsedRequestOriginTimeInMs < 0 ) { RestErrorHandler . writeErrorResponse ( messageEvent , HttpResponseStatus . BAD_REQUEST , "Origin time cannot be negative " ) ; } else { result = true ; } } catch ( NumberFormatException nfe ) { logger . error ( "Exception when validating request. Incorrect origin time parameter. Cannot parse this to long: " + originTime , nfe ) ; RestErrorHandler . writeErrorResponse ( this . messageEvent , HttpResponseStatus . BAD_REQUEST , "Incorrect origin time parameter. Cannot parse this to long: " + originTime ) ; } } else { logger . error ( "Error when validating request. Missing origin time parameter." ) ; RestErrorHandler . writeErrorResponse ( this . messageEvent , HttpResponseStatus . BAD_REQUEST , "Missing origin time parameter." ) ; } return result ; }
Retrieve and validate the timestamp value from the REST request . X_VOLD_REQUEST_ORIGIN_TIME_MS is timestamp header
24,289
protected boolean hasVectorClock ( boolean isVectorClockOptional ) { boolean result = false ; String vectorClockHeader = this . request . getHeader ( RestMessageHeaders . X_VOLD_VECTOR_CLOCK ) ; if ( vectorClockHeader != null ) { ObjectMapper mapper = new ObjectMapper ( ) ; try { VectorClockWrapper vcWrapper = mapper . readValue ( vectorClockHeader , VectorClockWrapper . class ) ; this . parsedVectorClock = new VectorClock ( vcWrapper . getVersions ( ) , vcWrapper . getTimestamp ( ) ) ; result = true ; } catch ( Exception e ) { logger . error ( "Exception while parsing and constructing vector clock" , e ) ; RestErrorHandler . writeErrorResponse ( this . messageEvent , HttpResponseStatus . BAD_REQUEST , "Invalid Vector Clock" ) ; } } else if ( ! isVectorClockOptional ) { logger . error ( "Error when validating request. Missing Vector Clock" ) ; RestErrorHandler . writeErrorResponse ( this . messageEvent , HttpResponseStatus . BAD_REQUEST , "Missing Vector Clock" ) ; } else { result = true ; } return result ; }
Retrieve and validate vector clock value from the REST request . X_VOLD_VECTOR_CLOCK is the vector clock header .
24,290
protected boolean hasKey ( ) { boolean result = false ; String requestURI = this . request . getUri ( ) ; parseKeys ( requestURI ) ; if ( this . parsedKeys != null ) { result = true ; } else { logger . error ( "Error when validating request. No key specified." ) ; RestErrorHandler . writeErrorResponse ( this . messageEvent , HttpResponseStatus . BAD_REQUEST , "Error: No key specified !" ) ; } return result ; }
Retrieve and validate the key from the REST request .
24,291
protected boolean isStoreValid ( ) { boolean result = false ; String requestURI = this . request . getUri ( ) ; this . storeName = parseStoreName ( requestURI ) ; if ( storeName != null ) { result = true ; } else { logger . error ( "Error when validating request. Missing store name." ) ; RestErrorHandler . writeErrorResponse ( this . messageEvent , HttpResponseStatus . BAD_REQUEST , "Missing store name. Critical error." ) ; } return result ; }
Retrieve and validate store name from the REST request .
24,292
protected void debugLog ( String operationType , Long receivedTimeInMs ) { long durationInMs = receivedTimeInMs - ( this . parsedRequestOriginTimeInMs ) ; int numVectorClockEntries = ( this . parsedVectorClock == null ? 0 : this . parsedVectorClock . getVersionMap ( ) . size ( ) ) ; logger . debug ( "Received a new request. Operation type: " + operationType + " , Key(s): " + keysHexString ( this . parsedKeys ) + " , Store: " + this . storeName + " , Origin time (in ms): " + ( this . parsedRequestOriginTimeInMs ) + " , Request received at time(in ms): " + receivedTimeInMs + " , Num vector clock entries: " + numVectorClockEntries + " , Duration from RESTClient to CoordinatorRestRequestValidator(in ms): " + durationInMs ) ; }
Prints a debug log message that details the time taken for the Http request to be parsed by the coordinator
24,293
public File fetch ( String source , String dest , long diskQuotaSizeInKB ) throws Exception { return fetchFromSource ( source , dest , null , null , - 1 , diskQuotaSizeInKB , null ) ; }
Used for unit tests only .
24,294
public static void main ( String [ ] args ) throws Exception { if ( args . length < 1 ) Utils . croak ( "USAGE: java " + HdfsFetcher . class . getName ( ) + " url [keytab-location kerberos-username hadoop-config-path [destDir]]" ) ; String url = args [ 0 ] ; VoldemortConfig config = new VoldemortConfig ( - 1 , "" ) ; HdfsFetcher fetcher = new HdfsFetcher ( config ) ; String destDir = null ; Long diskQuotaSizeInKB ; if ( args . length >= 4 ) { fetcher . voldemortConfig . setReadOnlyKeytabPath ( args [ 1 ] ) ; fetcher . voldemortConfig . setReadOnlyKerberosUser ( args [ 2 ] ) ; fetcher . voldemortConfig . setHadoopConfigPath ( args [ 3 ] ) ; } if ( args . length >= 5 ) destDir = args [ 4 ] ; if ( args . length >= 6 ) diskQuotaSizeInKB = Long . parseLong ( args [ 5 ] ) ; else diskQuotaSizeInKB = null ; allowFetchingOfSingleFile = true ; FileSystem fs = HadoopUtils . getHadoopFileSystem ( fetcher . voldemortConfig , url ) ; Path p = new Path ( url ) ; FileStatus status = fs . listStatus ( p ) [ 0 ] ; long size = status . getLen ( ) ; long start = System . currentTimeMillis ( ) ; if ( destDir == null ) destDir = System . getProperty ( "java.io.tmpdir" ) + File . separator + start ; File location = fetcher . fetch ( url , destDir , null , null , - 1 , null , diskQuotaSizeInKB ) ; double rate = size * Time . MS_PER_SECOND / ( double ) ( System . currentTimeMillis ( ) - start ) ; NumberFormat nf = NumberFormat . getInstance ( ) ; nf . setMaximumFractionDigits ( 2 ) ; System . out . println ( "Fetch to " + location + " completed: " + nf . format ( rate / ( 1024.0 * 1024.0 ) ) + " MB/sec." ) ; fs . close ( ) ; }
Main method for testing fetching
24,295
public synchronized int getPartitionStoreMoves ( ) { int count = 0 ; for ( List < Integer > entry : storeToPartitionIds . values ( ) ) count += entry . size ( ) ; return count ; }
Total count of partition - stores moved in this task .
24,296
public synchronized int getPartitionStoreCount ( ) { int count = 0 ; for ( String store : storeToPartitionIds . keySet ( ) ) { count += storeToPartitionIds . get ( store ) . size ( ) ; } return count ; }
Returns the total count of partitions across all stores .
24,297
public static String taskListToString ( List < RebalanceTaskInfo > infos ) { StringBuffer sb = new StringBuffer ( ) ; for ( RebalanceTaskInfo info : infos ) { sb . append ( "\t" ) . append ( info . getDonorId ( ) ) . append ( " -> " ) . append ( info . getStealerId ( ) ) . append ( " : [" ) ; for ( String storeName : info . getPartitionStores ( ) ) { sb . append ( "{" ) . append ( storeName ) . append ( " : " ) . append ( info . getPartitionIds ( storeName ) ) . append ( "}" ) ; } sb . append ( "]" ) . append ( Utils . NEWLINE ) ; } return sb . toString ( ) ; }
Pretty prints a task list of rebalancing tasks .
24,298
public void map ( GenericData . Record record , AvroCollector < Pair < ByteBuffer , ByteBuffer > > collector , Reporter reporter ) throws IOException { byte [ ] keyBytes = null ; byte [ ] valBytes = null ; Object keyRecord = null ; Object valRecord = null ; try { keyRecord = record . get ( keyField ) ; valRecord = record . get ( valField ) ; keyBytes = keySerializer . toBytes ( keyRecord ) ; valBytes = valueSerializer . toBytes ( valRecord ) ; this . collectorWrapper . setCollector ( collector ) ; this . mapper . map ( keyBytes , valBytes , this . collectorWrapper ) ; recordCounter ++ ; } catch ( OutOfMemoryError oom ) { logger . error ( oomErrorMessage ( reporter ) ) ; if ( keyBytes == null ) { logger . error ( "keyRecord caused OOM!" ) ; } else { logger . error ( "keyRecord: " + keyRecord ) ; logger . error ( "valRecord: " + ( valBytes == null ? "caused OOM" : valRecord ) ) ; } throw new VoldemortException ( oomErrorMessage ( reporter ) , oom ) ; } }
Create the voldemort key and value from the input Avro record by extracting the key and value and map it out for each of the responsible voldemort nodes
24,299
public static Pointer mmap ( long len , int prot , int flags , int fildes , long off ) throws IOException { Pointer addr = new Pointer ( 0 ) ; Pointer result = Delegate . mmap ( addr , new NativeLong ( len ) , prot , flags , fildes , new NativeLong ( off ) ) ; if ( Pointer . nativeValue ( result ) == - 1 ) { if ( logger . isDebugEnabled ( ) ) logger . debug ( errno . strerror ( ) ) ; throw new IOException ( "mmap failed: " + errno . strerror ( ) ) ; } return result ; }
Map the given region of the given file descriptor into memory . Returns a Pointer to the newly mapped memory throws an IOException on error .