idx
int64
0
165k
question
stringlengths
73
4.15k
target
stringlengths
5
918
len_question
int64
21
890
len_target
int64
3
255
147,600
private void cleanStaleFiles ( File backupDir , AsyncOperationStatus status ) { String [ ] filesInEnv = env . getHome ( ) . list ( ) ; String [ ] filesInBackupDir = backupDir . list ( ) ; if ( filesInEnv != null && filesInBackupDir != null ) { HashSet < String > envFileSet = new HashSet < String > ( ) ; for ( String file : filesInEnv ) envFileSet . ( file ) ; // delete all files in backup which are currently not in environment for ( String file : filesInBackupDir ) { if ( file . endsWith ( BDB_EXT ) && ! envFileSet . contains ( file ) ) { status . setStatus ( "Deleting stale jdb file :" + file ) ; File staleJdbFile = new File ( backupDir , file ) ; staleJdbFile . delete ( ) ; } } } }
For recovery from the latest consistent snapshot we should clean up the old files from the previous backup set else we will fill the disk with useless log files
202
29
147,601
private void verifiedCopyFile ( File sourceFile , File destFile ) throws IOException { if ( ! destFile . exists ( ) ) { destFile . createNewFile ( ) ; } FileInputStream source = null ; FileOutputStream destination = null ; LogVerificationInputStream verifyStream = null ; try { source = new FileInputStream ( sourceFile ) ; destination = new FileOutputStream ( destFile ) ; verifyStream = new LogVerificationInputStream ( env , source , sourceFile . getName ( ) ) ; final byte [ ] buf = new byte [ LOGVERIFY_BUFSIZE ] ; while ( true ) { final int len = verifyStream . read ( buf ) ; if ( len < 0 ) { break ; } destination . write ( buf , 0 , len ) ; } } finally { if ( verifyStream != null ) { verifyStream . close ( ) ; } if ( destination != null ) { destination . close ( ) ; } } }
Copies the jdb log files with additional verification of the checksums .
203
15
147,602
@ Override public List < Integer > getReplicatingPartitionList ( int index ) { List < Node > preferenceNodesList = new ArrayList < Node > ( getNumReplicas ( ) ) ; List < Integer > replicationPartitionsList = new ArrayList < Integer > ( getNumReplicas ( ) ) ; // Copy Zone based Replication Factor HashMap < Integer , Integer > requiredRepFactor = new HashMap < Integer , Integer > ( ) ; requiredRepFactor . putAll ( zoneReplicationFactor ) ; // Cross-check if individual zone replication factor equals global int sum = 0 ; for ( Integer zoneRepFactor : requiredRepFactor . values ( ) ) { sum += zoneRepFactor ; } if ( sum != getNumReplicas ( ) ) throw new IllegalArgumentException ( "Number of zone replicas is not equal to the total replication factor" ) ; if ( getPartitionToNode ( ) . length == 0 ) { return new ArrayList < Integer > ( 0 ) ; } for ( int i = 0 ; i < getPartitionToNode ( ) . length ; i ++ ) { // add this one if we haven't already, and it can satisfy some zone // replicationFactor Node currentNode = getNodeByPartition ( index ) ; if ( ! preferenceNodesList . contains ( currentNode ) ) { preferenceNodesList . add ( currentNode ) ; if ( checkZoneRequirement ( requiredRepFactor , currentNode . getZoneId ( ) ) ) replicationPartitionsList . add ( index ) ; } // if we have enough, go home if ( replicationPartitionsList . size ( ) >= getNumReplicas ( ) ) return replicationPartitionsList ; // move to next clockwise slot on the ring index = ( index + 1 ) % getPartitionToNode ( ) . length ; } // we don't have enough, but that may be okay return replicationPartitionsList ; }
Get the replication partitions list for the given partition .
403
10
147,603
private boolean checkZoneRequirement ( HashMap < Integer , Integer > requiredRepFactor , int zoneId ) { if ( requiredRepFactor . containsKey ( zoneId ) ) { if ( requiredRepFactor . get ( zoneId ) == 0 ) { return false ; } else { requiredRepFactor . put ( zoneId , requiredRepFactor . get ( zoneId ) - 1 ) ; return true ; } } return false ; }
Check if we still need more nodes from the given zone and reduce the zoneReplicationFactor count accordingly .
89
21
147,604
public static void acceptsUrlMultiple ( OptionParser parser ) { parser . acceptsAll ( Arrays . asList ( OPT_U , OPT_URL ) , "coordinator bootstrap urls" ) . withRequiredArg ( ) . describedAs ( "url-list" ) . withValuesSeparatedBy ( ' ' ) . ofType ( String . class ) ; }
Adds OPT_U | OPT_URL option to OptionParser with multiple arguments .
78
16
147,605
public static String [ ] copyArrayCutFirst ( String [ ] arr ) { if ( arr . length > 1 ) { String [ ] arrCopy = new String [ arr . length - 1 ] ; System . arraycopy ( arr , 1 , arrCopy , 0 , arrCopy . length ) ; return arrCopy ; } else { return new String [ 0 ] ; } }
Utility function that copies a string array except for the first element
77
13
147,606
public static String [ ] copyArrayAddFirst ( String [ ] arr , String add ) { String [ ] arrCopy = new String [ arr . length + 1 ] ; arrCopy [ 0 ] = add ; System . arraycopy ( arr , 0 , arrCopy , 1 , arr . length ) ; return arrCopy ; }
Utility function that copies a string array and add another string to first
67
14
147,607
@ SuppressWarnings ( "unchecked" ) public void put ( String key , Versioned < Object > value ) { // acquire write lock writeLock . lock ( ) ; try { if ( this . storeNames . contains ( key ) || key . equals ( STORES_KEY ) ) { // Check for backwards compatibility List < StoreDefinition > storeDefinitions = ( List < StoreDefinition > ) value . getValue ( ) ; StoreDefinitionUtils . validateSchemasAsNeeded ( storeDefinitions ) ; // If the put is on the entire stores.xml key, delete the // additional stores which do not exist in the specified // stores.xml Set < String > storeNamesToDelete = new HashSet < String > ( ) ; for ( String storeName : this . storeNames ) { storeNamesToDelete . add ( storeName ) ; } // Add / update the list of store definitions specified in the // value StoreDefinitionsMapper mapper = new StoreDefinitionsMapper ( ) ; // Update the STORES directory and the corresponding entry in // metadata cache Set < String > specifiedStoreNames = new HashSet < String > ( ) ; for ( StoreDefinition storeDef : storeDefinitions ) { specifiedStoreNames . add ( storeDef . getName ( ) ) ; String storeDefStr = mapper . writeStore ( storeDef ) ; Versioned < String > versionedValueStr = new Versioned < String > ( storeDefStr , value . getVersion ( ) ) ; this . storeDefinitionsStorageEngine . put ( storeDef . getName ( ) , versionedValueStr , "" ) ; // Update the metadata cache this . metadataCache . put ( storeDef . getName ( ) , new Versioned < Object > ( storeDefStr , value . getVersion ( ) ) ) ; } if ( key . equals ( STORES_KEY ) ) { storeNamesToDelete . removeAll ( specifiedStoreNames ) ; resetStoreDefinitions ( storeNamesToDelete ) ; } // Re-initialize the store definitions initStoreDefinitions ( value . getVersion ( ) ) ; // Update routing strategies updateRoutingStrategies ( getCluster ( ) , getStoreDefList ( ) ) ; } else if ( METADATA_KEYS . contains ( key ) ) { // try inserting into inner store first putInner ( key , convertObjectToString ( key , value ) ) ; // cache all keys if innerStore put succeeded metadataCache . put ( key , value ) ; // do special stuff if needed if ( CLUSTER_KEY . equals ( key ) ) { updateRoutingStrategies ( ( Cluster ) value . getValue ( ) , getStoreDefList ( ) ) ; } else if ( NODE_ID_KEY . equals ( key ) ) { initNodeId ( getNodeIdNoLock ( ) ) ; } else if ( SYSTEM_STORES_KEY . equals ( key ) ) throw new VoldemortException ( "Cannot overwrite system store definitions" ) ; } else { throw new VoldemortException ( "Unhandled Key:" + key + " for MetadataStore put()" ) ; } } finally { writeLock . unlock ( ) ; } }
helper function to convert strings to bytes as needed .
669
11
147,608
@ SuppressWarnings ( "unchecked" ) public void updateStoreDefinitions ( Versioned < byte [ ] > valueBytes ) { // acquire write lock writeLock . lock ( ) ; try { Versioned < String > value = new Versioned < String > ( ByteUtils . getString ( valueBytes . getValue ( ) , "UTF-8" ) , valueBytes . getVersion ( ) ) ; Versioned < Object > valueObject = convertStringToObject ( STORES_KEY , value ) ; StoreDefinitionsMapper mapper = new StoreDefinitionsMapper ( ) ; List < StoreDefinition > storeDefinitions = ( List < StoreDefinition > ) valueObject . getValue ( ) ; // Check for backwards compatibility StoreDefinitionUtils . validateSchemasAsNeeded ( storeDefinitions ) ; StoreDefinitionUtils . validateNewStoreDefsAreNonBreaking ( getStoreDefList ( ) , storeDefinitions ) ; // Go through each store definition and do a corresponding put for ( StoreDefinition storeDef : storeDefinitions ) { if ( ! this . storeNames . contains ( storeDef . getName ( ) ) ) { throw new VoldemortException ( "Cannot update a store which does not exist !" ) ; } String storeDefStr = mapper . writeStore ( storeDef ) ; Versioned < String > versionedValueStr = new Versioned < String > ( storeDefStr , value . getVersion ( ) ) ; this . storeDefinitionsStorageEngine . put ( storeDef . getName ( ) , versionedValueStr , "" ) ; // Update the metadata cache this . metadataCache . put ( storeDef . getName ( ) , new Versioned < Object > ( storeDefStr , value . getVersion ( ) ) ) ; } // Re-initialize the store definitions initStoreDefinitions ( value . getVersion ( ) ) ; // Update routing strategies // TODO: Make this more fine grained.. i.e only update listeners for // a specific store. updateRoutingStrategies ( getCluster ( ) , getStoreDefList ( ) ) ; } finally { writeLock . unlock ( ) ; } }
Function to update store definitions . Unlike the put method this function does not delete any existing state . It only updates the state of the stores specified in the given stores . xml
453
34
147,609
@ Override public void put ( ByteArray keyBytes , Versioned < byte [ ] > valueBytes , byte [ ] transforms ) throws VoldemortException { // acquire write lock writeLock . lock ( ) ; try { String key = ByteUtils . getString ( keyBytes . get ( ) , "UTF-8" ) ; Versioned < String > value = new Versioned < String > ( ByteUtils . getString ( valueBytes . getValue ( ) , "UTF-8" ) , valueBytes . getVersion ( ) ) ; Versioned < Object > valueObject = convertStringToObject ( key , value ) ; this . put ( key , valueObject ) ; } finally { writeLock . unlock ( ) ; } }
A write through put to inner - store .
153
9
147,610
private HashMap < String , StoreDefinition > makeStoreDefinitionMap ( List < StoreDefinition > storeDefs ) { HashMap < String , StoreDefinition > storeDefMap = new HashMap < String , StoreDefinition > ( ) ; for ( StoreDefinition storeDef : storeDefs ) storeDefMap . put ( storeDef . getName ( ) , storeDef ) ; return storeDefMap ; }
Returns the list of store defs as a map
83
10
147,611
private void updateRoutingStrategies ( Cluster cluster , List < StoreDefinition > storeDefs ) { // acquire write lock writeLock . lock ( ) ; try { VectorClock clock = new VectorClock ( ) ; if ( metadataCache . containsKey ( ROUTING_STRATEGY_KEY ) ) clock = ( VectorClock ) metadataCache . get ( ROUTING_STRATEGY_KEY ) . getVersion ( ) ; logger . info ( "Updating routing strategy for all stores" ) ; HashMap < String , StoreDefinition > storeDefMap = makeStoreDefinitionMap ( storeDefs ) ; HashMap < String , RoutingStrategy > routingStrategyMap = createRoutingStrategyMap ( cluster , storeDefMap ) ; this . metadataCache . put ( ROUTING_STRATEGY_KEY , new Versioned < Object > ( routingStrategyMap , clock . incremented ( getNodeId ( ) , System . currentTimeMillis ( ) ) ) ) ; for ( String storeName : storeNameTolisteners . keySet ( ) ) { RoutingStrategy updatedRoutingStrategy = routingStrategyMap . get ( storeName ) ; if ( updatedRoutingStrategy != null ) { try { for ( MetadataStoreListener listener : storeNameTolisteners . get ( storeName ) ) { listener . updateRoutingStrategy ( updatedRoutingStrategy ) ; listener . updateStoreDefinition ( storeDefMap . get ( storeName ) ) ; } } catch ( Exception e ) { if ( logger . isEnabledFor ( Level . WARN ) ) logger . warn ( e , e ) ; } } } } finally { writeLock . unlock ( ) ; } }
Changes to cluster OR store definition metadata results in routing strategies changing . These changes need to be propagated to all the listeners .
360
25
147,612
public void addRebalancingState ( final RebalanceTaskInfo stealInfo ) { // acquire write lock writeLock . lock ( ) ; try { // Move into rebalancing state if ( ByteUtils . getString ( get ( SERVER_STATE_KEY , null ) . get ( 0 ) . getValue ( ) , "UTF-8" ) . compareTo ( VoldemortState . NORMAL_SERVER . toString ( ) ) == 0 ) { put ( SERVER_STATE_KEY , VoldemortState . REBALANCING_MASTER_SERVER ) ; initCache ( SERVER_STATE_KEY ) ; } // Add the steal information RebalancerState rebalancerState = getRebalancerState ( ) ; if ( ! rebalancerState . update ( stealInfo ) ) { throw new VoldemortException ( "Could not add steal information " + stealInfo + " since a plan for the same donor node " + stealInfo . getDonorId ( ) + " ( " + rebalancerState . find ( stealInfo . getDonorId ( ) ) + " ) already exists" ) ; } put ( MetadataStore . REBALANCING_STEAL_INFO , rebalancerState ) ; initCache ( REBALANCING_STEAL_INFO ) ; } finally { writeLock . unlock ( ) ; } }
Add the steal information to the rebalancer state
287
10
147,613
public void deleteRebalancingState ( RebalanceTaskInfo stealInfo ) { // acquire write lock writeLock . lock ( ) ; try { RebalancerState rebalancerState = getRebalancerState ( ) ; if ( ! rebalancerState . remove ( stealInfo ) ) throw new IllegalArgumentException ( "Couldn't find " + stealInfo + " in " + rebalancerState + " while deleting" ) ; if ( rebalancerState . isEmpty ( ) ) { logger . debug ( "Cleaning all rebalancing state" ) ; cleanAllRebalancingState ( ) ; } else { put ( REBALANCING_STEAL_INFO , rebalancerState ) ; initCache ( REBALANCING_STEAL_INFO ) ; } } finally { writeLock . unlock ( ) ; } }
Delete the partition steal information from the rebalancer state
179
11
147,614
public void setOfflineState ( boolean setToOffline ) { // acquire write lock writeLock . lock ( ) ; try { String currentState = ByteUtils . getString ( get ( SERVER_STATE_KEY , null ) . get ( 0 ) . getValue ( ) , "UTF-8" ) ; if ( setToOffline ) { // from NORMAL_SERVER to OFFLINE_SERVER if ( currentState . equals ( VoldemortState . NORMAL_SERVER . toString ( ) ) ) { put ( SERVER_STATE_KEY , VoldemortState . OFFLINE_SERVER ) ; initCache ( SERVER_STATE_KEY ) ; put ( SLOP_STREAMING_ENABLED_KEY , false ) ; initCache ( SLOP_STREAMING_ENABLED_KEY ) ; put ( PARTITION_STREAMING_ENABLED_KEY , false ) ; initCache ( PARTITION_STREAMING_ENABLED_KEY ) ; put ( READONLY_FETCH_ENABLED_KEY , false ) ; initCache ( READONLY_FETCH_ENABLED_KEY ) ; } else if ( currentState . equals ( VoldemortState . OFFLINE_SERVER . toString ( ) ) ) { logger . warn ( "Already in OFFLINE_SERVER state." ) ; return ; } else { logger . error ( "Cannot enter OFFLINE_SERVER state from " + currentState ) ; throw new VoldemortException ( "Cannot enter OFFLINE_SERVER state from " + currentState ) ; } } else { // from OFFLINE_SERVER to NORMAL_SERVER if ( currentState . equals ( VoldemortState . NORMAL_SERVER . toString ( ) ) ) { logger . warn ( "Already in NORMAL_SERVER state." ) ; return ; } else if ( currentState . equals ( VoldemortState . OFFLINE_SERVER . toString ( ) ) ) { put ( SERVER_STATE_KEY , VoldemortState . NORMAL_SERVER ) ; initCache ( SERVER_STATE_KEY ) ; put ( SLOP_STREAMING_ENABLED_KEY , true ) ; initCache ( SLOP_STREAMING_ENABLED_KEY ) ; put ( PARTITION_STREAMING_ENABLED_KEY , true ) ; initCache ( PARTITION_STREAMING_ENABLED_KEY ) ; put ( READONLY_FETCH_ENABLED_KEY , true ) ; initCache ( READONLY_FETCH_ENABLED_KEY ) ; init ( ) ; initNodeId ( getNodeIdNoLock ( ) ) ; } else { logger . error ( "Cannot enter NORMAL_SERVER state from " + currentState ) ; throw new VoldemortException ( "Cannot enter NORMAL_SERVER state from " + currentState ) ; } } } finally { writeLock . unlock ( ) ; } }
change server state between OFFLINE_SERVER and NORMAL_SERVER
639
16
147,615
public void addStoreDefinition ( StoreDefinition storeDef ) { // acquire write lock writeLock . lock ( ) ; try { // Check if store already exists if ( this . storeNames . contains ( storeDef . getName ( ) ) ) { throw new VoldemortException ( "Store already exists !" ) ; } // Check for backwards compatibility StoreDefinitionUtils . validateSchemaAsNeeded ( storeDef ) ; // Otherwise add to the STORES directory StoreDefinitionsMapper mapper = new StoreDefinitionsMapper ( ) ; String storeDefStr = mapper . writeStore ( storeDef ) ; Versioned < String > versionedValueStr = new Versioned < String > ( storeDefStr ) ; this . storeDefinitionsStorageEngine . put ( storeDef . getName ( ) , versionedValueStr , null ) ; // Update the metadata cache this . metadataCache . put ( storeDef . getName ( ) , new Versioned < Object > ( storeDefStr ) ) ; // Re-initialize the store definitions. This is primarily required // to re-create the value for key: 'stores.xml'. This is necessary // for backwards compatibility. initStoreDefinitions ( null ) ; updateRoutingStrategies ( getCluster ( ) , getStoreDefList ( ) ) ; } finally { writeLock . unlock ( ) ; } }
Function to add a new Store to the Metadata store . This involves
283
14
147,616
public void deleteStoreDefinition ( String storeName ) { // acquire write lock writeLock . lock ( ) ; try { // Check if store exists if ( ! this . storeNames . contains ( storeName ) ) { throw new VoldemortException ( "Requested store to be deleted does not exist !" ) ; } // Otherwise remove from the STORES directory. Note: The version // argument is not required here since the // ConfigurationStorageEngine simply ignores this. this . storeDefinitionsStorageEngine . delete ( storeName , null ) ; // Update the metadata cache this . metadataCache . remove ( storeName ) ; // Re-initialize the store definitions. This is primarily required // to re-create the value for key: 'stores.xml'. This is necessary // for backwards compatibility. initStoreDefinitions ( null ) ; } finally { writeLock . unlock ( ) ; } }
Function to delete the specified store from Metadata store . This involves
181
13
147,617
public boolean isValidStore ( String name ) { readLock . lock ( ) ; try { if ( this . storeNames . contains ( name ) ) { return true ; } return false ; } finally { readLock . unlock ( ) ; } }
Utility function to validate if the given store name exists in the store name list managed by MetadataStore . This is used by the Admin service for validation before serving a get - metadata request .
51
39
147,618
private void init ( ) { logger . info ( "metadata init()." ) ; writeLock . lock ( ) ; try { // Required keys initCache ( CLUSTER_KEY ) ; // If stores definition storage engine is not null, initialize metadata // Add the mapping from key to the storage engine used if ( this . storeDefinitionsStorageEngine != null ) { initStoreDefinitions ( null ) ; } else { initCache ( STORES_KEY ) ; } // Initialize system store in the metadata cache initSystemCache ( ) ; initSystemRoutingStrategies ( getCluster ( ) ) ; // Initialize with default if not present initCache ( SLOP_STREAMING_ENABLED_KEY , true ) ; initCache ( PARTITION_STREAMING_ENABLED_KEY , true ) ; initCache ( READONLY_FETCH_ENABLED_KEY , true ) ; initCache ( QUOTA_ENFORCEMENT_ENABLED_KEY , true ) ; initCache ( REBALANCING_STEAL_INFO , new RebalancerState ( new ArrayList < RebalanceTaskInfo > ( ) ) ) ; initCache ( SERVER_STATE_KEY , VoldemortState . NORMAL_SERVER . toString ( ) ) ; initCache ( REBALANCING_SOURCE_CLUSTER_XML , null ) ; initCache ( REBALANCING_SOURCE_STORES_XML , null ) ; } finally { writeLock . unlock ( ) ; } }
Initializes the metadataCache for MetadataStore
325
9
147,619
private void initStoreDefinitions ( Version storesXmlVersion ) { if ( this . storeDefinitionsStorageEngine == null ) { throw new VoldemortException ( "The store definitions directory is empty" ) ; } String allStoreDefinitions = "<stores>" ; Version finalStoresXmlVersion = null ; if ( storesXmlVersion != null ) { finalStoresXmlVersion = storesXmlVersion ; } this . storeNames . clear ( ) ; ClosableIterator < Pair < String , Versioned < String > > > storesIterator = this . storeDefinitionsStorageEngine . entries ( ) ; // Some test setups may result in duplicate entries for 'store' element. // Do the de-dup here Map < String , Versioned < String > > storeNameToDefMap = new HashMap < String , Versioned < String > > ( ) ; Version maxVersion = null ; while ( storesIterator . hasNext ( ) ) { Pair < String , Versioned < String > > storeDetail = storesIterator . next ( ) ; String storeName = storeDetail . getFirst ( ) ; Versioned < String > versionedStoreDef = storeDetail . getSecond ( ) ; storeNameToDefMap . put ( storeName , versionedStoreDef ) ; Version curVersion = versionedStoreDef . getVersion ( ) ; // Get the highest version from all the store entries if ( maxVersion == null ) { maxVersion = curVersion ; } else if ( maxVersion . compare ( curVersion ) == Occurred . BEFORE ) { maxVersion = curVersion ; } } // If the specified version is null, assign highest Version to // 'stores.xml' key if ( finalStoresXmlVersion == null ) { finalStoresXmlVersion = maxVersion ; } // Go through all the individual stores and update metadata for ( Entry < String , Versioned < String > > storeEntry : storeNameToDefMap . entrySet ( ) ) { String storeName = storeEntry . getKey ( ) ; Versioned < String > versionedStoreDef = storeEntry . getValue ( ) ; // Add all the store names to the list of storeNames this . storeNames . add ( storeName ) ; this . metadataCache . put ( storeName , new Versioned < Object > ( versionedStoreDef . getValue ( ) , versionedStoreDef . getVersion ( ) ) ) ; } Collections . sort ( this . storeNames ) ; for ( String storeName : this . storeNames ) { Versioned < String > versionedStoreDef = storeNameToDefMap . get ( storeName ) ; // Stitch together to form the complete store definition list. allStoreDefinitions += versionedStoreDef . getValue ( ) ; } allStoreDefinitions += "</stores>" ; // Update cache with the composite store definition list. metadataCache . put ( STORES_KEY , convertStringToObject ( STORES_KEY , new Versioned < String > ( allStoreDefinitions , finalStoresXmlVersion ) ) ) ; }
Function to go through all the store definitions contained in the STORES directory and
639
15
147,620
private void resetStoreDefinitions ( Set < String > storeNamesToDelete ) { // Clear entries in the metadata cache for ( String storeName : storeNamesToDelete ) { this . metadataCache . remove ( storeName ) ; this . storeDefinitionsStorageEngine . delete ( storeName , null ) ; this . storeNames . remove ( storeName ) ; } }
Function to clear all the metadata related to the given store definitions . This is needed when a put on stores . xml is called thus replacing the existing state .
76
31
147,621
private synchronized void initSystemCache ( ) { List < StoreDefinition > value = storeMapper . readStoreList ( new StringReader ( SystemStoreConstants . SYSTEM_STORE_SCHEMA ) ) ; metadataCache . put ( SYSTEM_STORES_KEY , new Versioned < Object > ( value ) ) ; }
Initialize the metadata cache with system store list
69
9
147,622
public List < Versioned < V > > getWithCustomTimeout ( CompositeVoldemortRequest < K , V > requestWrapper ) { validateTimeout ( requestWrapper . getRoutingTimeoutInMs ( ) ) ; for ( int attempts = 0 ; attempts < this . metadataRefreshAttempts ; attempts ++ ) { try { long startTimeInMs = System . currentTimeMillis ( ) ; String keyHexString = "" ; if ( logger . isDebugEnabled ( ) ) { ByteArray key = ( ByteArray ) requestWrapper . getKey ( ) ; keyHexString = RestUtils . getKeyHexString ( key ) ; debugLogStart ( "GET" , requestWrapper . getRequestOriginTimeInMs ( ) , startTimeInMs , keyHexString ) ; } List < Versioned < V > > items = store . get ( requestWrapper ) ; if ( logger . isDebugEnabled ( ) ) { int vcEntrySize = 0 ; for ( Versioned < V > vc : items ) { vcEntrySize += ( ( VectorClock ) vc . getVersion ( ) ) . getVersionMap ( ) . size ( ) ; } debugLogEnd ( "GET" , requestWrapper . getRequestOriginTimeInMs ( ) , startTimeInMs , System . currentTimeMillis ( ) , keyHexString , vcEntrySize ) ; } return items ; } catch ( InvalidMetadataException e ) { logger . info ( "Received invalid metadata exception during get [ " + e . getMessage ( ) + " ] on store '" + storeName + "'. Rebootstrapping" ) ; bootStrap ( ) ; } } throw new VoldemortException ( this . metadataRefreshAttempts + " metadata refresh attempts failed." ) ; }
Performs a get operation with the specified composite request object
379
11
147,623
public Version putWithCustomTimeout ( CompositeVoldemortRequest < K , V > requestWrapper ) { validateTimeout ( requestWrapper . getRoutingTimeoutInMs ( ) ) ; List < Versioned < V > > versionedValues ; long startTime = System . currentTimeMillis ( ) ; String keyHexString = "" ; if ( logger . isDebugEnabled ( ) ) { ByteArray key = ( ByteArray ) requestWrapper . getKey ( ) ; keyHexString = RestUtils . getKeyHexString ( key ) ; logger . debug ( "PUT requested for key: " + keyHexString + " , for store: " + this . storeName + " at time(in ms): " + startTime + " . Nested GET and PUT VERSION requests to follow ---" ) ; } // We use the full timeout for doing the Get. In this, we're being // optimistic that the subsequent put might be faster such that all the // steps might finish within the allotted time requestWrapper . setResolveConflicts ( true ) ; versionedValues = getWithCustomTimeout ( requestWrapper ) ; Versioned < V > versioned = getItemOrThrow ( requestWrapper . getKey ( ) , null , versionedValues ) ; long endTime = System . currentTimeMillis ( ) ; if ( versioned == null ) versioned = Versioned . value ( requestWrapper . getRawValue ( ) , new VectorClock ( ) ) ; else versioned . setObject ( requestWrapper . getRawValue ( ) ) ; // This should not happen unless there's a bug in the // getWithCustomTimeout long timeLeft = requestWrapper . getRoutingTimeoutInMs ( ) - ( endTime - startTime ) ; if ( timeLeft <= 0 ) { throw new StoreTimeoutException ( "PUT request timed out" ) ; } CompositeVersionedPutVoldemortRequest < K , V > putVersionedRequestObject = new CompositeVersionedPutVoldemortRequest < K , V > ( requestWrapper . getKey ( ) , versioned , timeLeft ) ; putVersionedRequestObject . setRequestOriginTimeInMs ( requestWrapper . getRequestOriginTimeInMs ( ) ) ; Version result = putVersionedWithCustomTimeout ( putVersionedRequestObject ) ; long endTimeInMs = System . currentTimeMillis ( ) ; if ( logger . isDebugEnabled ( ) ) { logger . debug ( "PUT response received for key: " + keyHexString + " , for store: " + this . storeName + " at time(in ms): " + endTimeInMs ) ; } return result ; }
Performs a put operation with the specified composite request object
566
11
147,624
public Version putVersionedWithCustomTimeout ( CompositeVoldemortRequest < K , V > requestWrapper ) throws ObsoleteVersionException { validateTimeout ( requestWrapper . getRoutingTimeoutInMs ( ) ) ; for ( int attempts = 0 ; attempts < this . metadataRefreshAttempts ; attempts ++ ) { try { String keyHexString = "" ; long startTimeInMs = System . currentTimeMillis ( ) ; if ( logger . isDebugEnabled ( ) ) { ByteArray key = ( ByteArray ) requestWrapper . getKey ( ) ; keyHexString = RestUtils . getKeyHexString ( key ) ; debugLogStart ( "PUT_VERSION" , requestWrapper . getRequestOriginTimeInMs ( ) , startTimeInMs , keyHexString ) ; } store . put ( requestWrapper ) ; if ( logger . isDebugEnabled ( ) ) { debugLogEnd ( "PUT_VERSION" , requestWrapper . getRequestOriginTimeInMs ( ) , startTimeInMs , System . currentTimeMillis ( ) , keyHexString , 0 ) ; } return requestWrapper . getValue ( ) . getVersion ( ) ; } catch ( InvalidMetadataException e ) { logger . info ( "Received invalid metadata exception during put [ " + e . getMessage ( ) + " ] on store '" + storeName + "'. Rebootstrapping" ) ; bootStrap ( ) ; } } throw new VoldemortException ( this . metadataRefreshAttempts + " metadata refresh attempts failed." ) ; }
Performs a Versioned put operation with the specified composite request object
331
13
147,625
public Map < K , List < Versioned < V > > > getAllWithCustomTimeout ( CompositeVoldemortRequest < K , V > requestWrapper ) { validateTimeout ( requestWrapper . getRoutingTimeoutInMs ( ) ) ; Map < K , List < Versioned < V > > > items = null ; for ( int attempts = 0 ; ; attempts ++ ) { if ( attempts >= this . metadataRefreshAttempts ) throw new VoldemortException ( this . metadataRefreshAttempts + " metadata refresh attempts failed." ) ; try { String KeysHexString = "" ; long startTimeInMs = System . currentTimeMillis ( ) ; if ( logger . isDebugEnabled ( ) ) { Iterable < ByteArray > keys = ( Iterable < ByteArray > ) requestWrapper . getIterableKeys ( ) ; KeysHexString = getKeysHexString ( keys ) ; debugLogStart ( "GET_ALL" , requestWrapper . getRequestOriginTimeInMs ( ) , startTimeInMs , KeysHexString ) ; } items = store . getAll ( requestWrapper ) ; if ( logger . isDebugEnabled ( ) ) { int vcEntrySize = 0 ; for ( List < Versioned < V > > item : items . values ( ) ) { for ( Versioned < V > vc : item ) { vcEntrySize += ( ( VectorClock ) vc . getVersion ( ) ) . getVersionMap ( ) . size ( ) ; } } debugLogEnd ( "GET_ALL" , requestWrapper . getRequestOriginTimeInMs ( ) , startTimeInMs , System . currentTimeMillis ( ) , KeysHexString , vcEntrySize ) ; } return items ; } catch ( InvalidMetadataException e ) { logger . info ( "Received invalid metadata exception during getAll [ " + e . getMessage ( ) + " ] on store '" + storeName + "'. Rebootstrapping" ) ; bootStrap ( ) ; } } }
Performs a get all operation with the specified composite request object
429
12
147,626
public boolean deleteWithCustomTimeout ( CompositeVoldemortRequest < K , V > deleteRequestObject ) { List < Versioned < V >> versionedValues ; validateTimeout ( deleteRequestObject . getRoutingTimeoutInMs ( ) ) ; boolean hasVersion = deleteRequestObject . getVersion ( ) == null ? false : true ; String keyHexString = "" ; if ( ! hasVersion ) { long startTimeInMs = System . currentTimeMillis ( ) ; if ( logger . isDebugEnabled ( ) ) { ByteArray key = ( ByteArray ) deleteRequestObject . getKey ( ) ; keyHexString = RestUtils . getKeyHexString ( key ) ; logger . debug ( "DELETE without version requested for key: " + keyHexString + " , for store: " + this . storeName + " at time(in ms): " + startTimeInMs + " . Nested GET and DELETE requests to follow ---" ) ; } // We use the full timeout for doing the Get. In this, we're being // optimistic that the subsequent delete might be faster all the // steps might finish within the allotted time deleteRequestObject . setResolveConflicts ( true ) ; versionedValues = getWithCustomTimeout ( deleteRequestObject ) ; Versioned < V > versioned = getItemOrThrow ( deleteRequestObject . getKey ( ) , null , versionedValues ) ; if ( versioned == null ) { return false ; } long timeLeft = deleteRequestObject . getRoutingTimeoutInMs ( ) - ( System . currentTimeMillis ( ) - startTimeInMs ) ; // This should not happen unless there's a bug in the // getWithCustomTimeout if ( timeLeft < 0 ) { throw new StoreTimeoutException ( "DELETE request timed out" ) ; } // Update the version and the new timeout deleteRequestObject . setVersion ( versioned . getVersion ( ) ) ; deleteRequestObject . setRoutingTimeoutInMs ( timeLeft ) ; } long deleteVersionStartTimeInNs = System . currentTimeMillis ( ) ; if ( logger . isDebugEnabled ( ) ) { ByteArray key = ( ByteArray ) deleteRequestObject . getKey ( ) ; keyHexString = RestUtils . getKeyHexString ( key ) ; debugLogStart ( "DELETE" , deleteRequestObject . getRequestOriginTimeInMs ( ) , deleteVersionStartTimeInNs , keyHexString ) ; } boolean result = store . delete ( deleteRequestObject ) ; if ( logger . isDebugEnabled ( ) ) { debugLogEnd ( "DELETE" , deleteRequestObject . getRequestOriginTimeInMs ( ) , deleteVersionStartTimeInNs , System . currentTimeMillis ( ) , keyHexString , 0 ) ; } if ( ! hasVersion && logger . isDebugEnabled ( ) ) { logger . debug ( "DELETE without version response received for key: " + keyHexString + ", for store: " + this . storeName + " at time(in ms): " + System . currentTimeMillis ( ) ) ; } return result ; }
Performs a delete operation with the specified composite request object
670
11
147,627
private void debugLogStart ( String operationType , Long originTimeInMS , Long requestReceivedTimeInMs , String keyString ) { long durationInMs = requestReceivedTimeInMs - originTimeInMS ; logger . debug ( "Received a new request. Operation Type: " + operationType + " , key(s): " + keyString + " , Store: " + this . storeName + " , Origin time (in ms): " + originTimeInMS + " . Request received at time(in ms): " + requestReceivedTimeInMs + " , Duration from RESTClient to CoordinatorFatClient(in ms): " + durationInMs ) ; }
Traces the duration between origin time in the http Request and time just before being processed by the fat client
143
21
147,628
private void debugLogEnd ( String operationType , Long OriginTimeInMs , Long RequestStartTimeInMs , Long ResponseReceivedTimeInMs , String keyString , int numVectorClockEntries ) { long durationInMs = ResponseReceivedTimeInMs - RequestStartTimeInMs ; logger . debug ( "Received a response from voldemort server for Operation Type: " + operationType + " , For key(s): " + keyString + " , Store: " + this . storeName + " , Origin time of request (in ms): " + OriginTimeInMs + " , Response received at time (in ms): " + ResponseReceivedTimeInMs + " . Request sent at(in ms): " + RequestStartTimeInMs + " , Num vector clock entries: " + numVectorClockEntries + " , Duration from CoordinatorFatClient back to CoordinatorFatClient(in ms): " + durationInMs ) ; }
Traces the time taken just by the fat client inside Coordinator to process this request
198
16
147,629
public static Node updateNode ( Node node , List < Integer > partitionsList ) { return new Node ( node . getId ( ) , node . getHost ( ) , node . getHttpPort ( ) , node . getSocketPort ( ) , node . getAdminPort ( ) , node . getZoneId ( ) , partitionsList ) ; }
Creates a replica of the node with the new partitions list
72
12
147,630
public static Node addPartitionToNode ( final Node node , Integer donatedPartition ) { return UpdateClusterUtils . addPartitionsToNode ( node , Sets . newHashSet ( donatedPartition ) ) ; }
Add a partition to the node provided
47
7
147,631
public static Node removePartitionFromNode ( final Node node , Integer donatedPartition ) { return UpdateClusterUtils . removePartitionsFromNode ( node , Sets . newHashSet ( donatedPartition ) ) ; }
Remove a partition from the node provided
47
7
147,632
public static Node addPartitionsToNode ( final Node node , final Set < Integer > donatedPartitions ) { List < Integer > deepCopy = new ArrayList < Integer > ( node . getPartitionIds ( ) ) ; deepCopy . addAll ( donatedPartitions ) ; Collections . sort ( deepCopy ) ; return updateNode ( node , deepCopy ) ; }
Add the set of partitions to the node provided
78
9
147,633
public static Node removePartitionsFromNode ( final Node node , final Set < Integer > donatedPartitions ) { List < Integer > deepCopy = new ArrayList < Integer > ( node . getPartitionIds ( ) ) ; deepCopy . removeAll ( donatedPartitions ) ; return updateNode ( node , deepCopy ) ; }
Remove the set of partitions from the node provided
70
9
147,634
public static Cluster createUpdatedCluster ( Cluster currentCluster , int stealerNodeId , List < Integer > donatedPartitions ) { Cluster updatedCluster = Cluster . cloneCluster ( currentCluster ) ; // Go over every donated partition one by one for ( int donatedPartition : donatedPartitions ) { // Gets the donor Node that owns this donated partition Node donorNode = updatedCluster . getNodeForPartitionId ( donatedPartition ) ; Node stealerNode = updatedCluster . getNodeById ( stealerNodeId ) ; if ( donorNode == stealerNode ) { // Moving to the same location = No-op continue ; } // Update the list of partitions for this node donorNode = removePartitionFromNode ( donorNode , donatedPartition ) ; stealerNode = addPartitionToNode ( stealerNode , donatedPartition ) ; // Sort the nodes updatedCluster = updateCluster ( updatedCluster , Lists . newArrayList ( donorNode , stealerNode ) ) ; } return updatedCluster ; }
Updates the existing cluster such that we remove partitions mentioned from the stealer node and add them to the donor node
220
23
147,635
public static void main ( String [ ] args ) { DirectoryIterator iter = new DirectoryIterator ( args ) ; while ( iter . hasNext ( ) ) System . out . println ( iter . next ( ) . getAbsolutePath ( ) ) ; }
Command line method to walk the directories provided on the command line and print out their contents
52
17
147,636
private void verifyClusterStoreDefinition ( ) { if ( SystemStoreConstants . isSystemStore ( storeDefinition . getName ( ) ) ) { // TODO: Once "todo" in StorageService.initSystemStores is complete, // this early return can be removed and verification can be enabled // for system stores. return ; } Set < Integer > clusterZoneIds = cluster . getZoneIds ( ) ; if ( clusterZoneIds . size ( ) > 1 ) { // Zoned Map < Integer , Integer > zoneRepFactor = storeDefinition . getZoneReplicationFactor ( ) ; Set < Integer > storeDefZoneIds = zoneRepFactor . keySet ( ) ; if ( ! clusterZoneIds . equals ( storeDefZoneIds ) ) { throw new VoldemortException ( "Zone IDs in cluster (" + clusterZoneIds + ") are incongruent with zone IDs in store defs (" + storeDefZoneIds + ")" ) ; } for ( int zoneId : clusterZoneIds ) { if ( zoneRepFactor . get ( zoneId ) > cluster . getNumberOfNodesInZone ( zoneId ) ) { throw new VoldemortException ( "Not enough nodes (" + cluster . getNumberOfNodesInZone ( zoneId ) + ") in zone with id " + zoneId + " for replication factor of " + zoneRepFactor . get ( zoneId ) + "." ) ; } } } else { // Non-zoned if ( storeDefinition . getReplicationFactor ( ) > cluster . getNumberOfNodes ( ) ) { System . err . println ( storeDefinition ) ; System . err . println ( cluster ) ; throw new VoldemortException ( "Not enough nodes (" + cluster . getNumberOfNodes ( ) + ") for replication factor of " + storeDefinition . getReplicationFactor ( ) + "." ) ; } } }
Verify that cluster is congruent to store def wrt zones .
402
15
147,637
public Integer getNodesPartitionIdForKey ( int nodeId , final byte [ ] key ) { // this is all the partitions the key replicates to. List < Integer > partitionIds = getReplicatingPartitionList ( key ) ; for ( Integer partitionId : partitionIds ) { // check which of the replicating partitions belongs to the node in // question if ( getNodeIdForPartitionId ( partitionId ) == nodeId ) { return partitionId ; } } return null ; }
Determines the partition ID that replicates the key on the given node .
106
16
147,638
private List < Integer > getNodeIdListForPartitionIdList ( List < Integer > partitionIds ) throws VoldemortException { List < Integer > nodeIds = new ArrayList < Integer > ( partitionIds . size ( ) ) ; for ( Integer partitionId : partitionIds ) { int nodeId = getNodeIdForPartitionId ( partitionId ) ; if ( nodeIds . contains ( nodeId ) ) { throw new VoldemortException ( "Node ID " + nodeId + " already in list of Node IDs." ) ; } else { nodeIds . add ( nodeId ) ; } } return nodeIds ; }
Converts from partitionId to nodeId . The list of partition IDs partitionIds is expected to be a replicating partition list i . e . the mapping from partition ID to node ID should be one to one .
135
44
147,639
public boolean checkKeyBelongsToNode ( byte [ ] key , int nodeId ) { List < Integer > nodePartitions = cluster . getNodeById ( nodeId ) . getPartitionIds ( ) ; List < Integer > replicatingPartitions = getReplicatingPartitionList ( key ) ; // remove all partitions from the list, except those that belong to the // node replicatingPartitions . retainAll ( nodePartitions ) ; return replicatingPartitions . size ( ) > 0 ; }
Determines if the key replicates to the given node
106
12
147,640
public static List < Integer > checkKeyBelongsToPartition ( byte [ ] key , Set < Pair < Integer , HashMap < Integer , List < Integer > > > > stealerNodeToMappingTuples , Cluster cluster , StoreDefinition storeDef ) { List < Integer > keyPartitions = new RoutingStrategyFactory ( ) . updateRoutingStrategy ( storeDef , cluster ) . getPartitionList ( key ) ; List < Integer > nodesToPush = Lists . newArrayList ( ) ; for ( Pair < Integer , HashMap < Integer , List < Integer > > > stealNodeToMap : stealerNodeToMappingTuples ) { List < Integer > nodePartitions = cluster . getNodeById ( stealNodeToMap . getFirst ( ) ) . getPartitionIds ( ) ; if ( StoreRoutingPlan . checkKeyBelongsToPartition ( keyPartitions , nodePartitions , stealNodeToMap . getSecond ( ) ) ) { nodesToPush . add ( stealNodeToMap . getFirst ( ) ) ; } } return nodesToPush ; }
Given a key and a list of steal infos give back a list of stealer node ids which will steal this .
233
25
147,641
public synchronized void maybeThrottle ( int eventsSeen ) { if ( maxRatePerSecond > 0 ) { long now = time . milliseconds ( ) ; try { rateSensor . record ( eventsSeen , now ) ; } catch ( QuotaViolationException e ) { // If we're over quota, we calculate how long to sleep to compensate. double currentRate = e . getValue ( ) ; if ( currentRate > this . maxRatePerSecond ) { double excessRate = currentRate - this . maxRatePerSecond ; long sleepTimeMs = Math . round ( excessRate / this . maxRatePerSecond * voldemort . utils . Time . MS_PER_SECOND ) ; if ( logger . isDebugEnabled ( ) ) { logger . debug ( "Throttler quota exceeded:\n" + "eventsSeen \t= " + eventsSeen + " in this call of maybeThrotte(),\n" + "currentRate \t= " + currentRate + " events/sec,\n" + "maxRatePerSecond \t= " + this . maxRatePerSecond + " events/sec,\n" + "excessRate \t= " + excessRate + " events/sec,\n" + "sleeping for \t" + sleepTimeMs + " ms to compensate.\n" + "rateConfig.timeWindowMs() = " + rateConfig . timeWindowMs ( ) ) ; } if ( sleepTimeMs > rateConfig . timeWindowMs ( ) ) { logger . warn ( "Throttler sleep time (" + sleepTimeMs + " ms) exceeds " + "window size (" + rateConfig . timeWindowMs ( ) + " ms). This will likely " + "result in not being able to honor the rate limit accurately." ) ; // When using the HDFS Fetcher, setting the hdfs.fetcher.buffer.size // too high could cause this problem. } time . sleep ( sleepTimeMs ) ; } else if ( logger . isDebugEnabled ( ) ) { logger . debug ( "Weird. Got QuotaValidationException but measured rate not over rateLimit: " + "currentRate = " + currentRate + " , rateLimit = " + this . maxRatePerSecond ) ; } } } }
Sleeps if necessary to slow down the caller .
490
10
147,642
public List < NodeValue < K , V > > getRepairs ( List < NodeValue < K , V > > nodeValues ) { int size = nodeValues . size ( ) ; if ( size <= 1 ) return Collections . emptyList ( ) ; Map < K , List < NodeValue < K , V > > > keyToNodeValues = Maps . newHashMap ( ) ; for ( NodeValue < K , V > nodeValue : nodeValues ) { List < NodeValue < K , V > > keyNodeValues = keyToNodeValues . get ( nodeValue . getKey ( ) ) ; if ( keyNodeValues == null ) { keyNodeValues = Lists . newArrayListWithCapacity ( 5 ) ; keyToNodeValues . put ( nodeValue . getKey ( ) , keyNodeValues ) ; } keyNodeValues . add ( nodeValue ) ; } List < NodeValue < K , V > > result = Lists . newArrayList ( ) ; for ( List < NodeValue < K , V > > keyNodeValues : keyToNodeValues . values ( ) ) result . addAll ( singleKeyGetRepairs ( keyNodeValues ) ) ; return result ; }
Compute the repair set from the given values and nodes
250
11
147,643
public static List < Versioned < byte [ ] > > resolveVersions ( List < Versioned < byte [ ] > > values ) { List < Versioned < byte [ ] > > resolvedVersions = new ArrayList < Versioned < byte [ ] > > ( values . size ( ) ) ; // Go over all the values and determine whether the version is // acceptable for ( Versioned < byte [ ] > value : values ) { Iterator < Versioned < byte [ ] > > iter = resolvedVersions . iterator ( ) ; boolean obsolete = false ; // Compare the current version with a set of accepted versions while ( iter . hasNext ( ) ) { Versioned < byte [ ] > curr = iter . next ( ) ; Occurred occurred = value . getVersion ( ) . compare ( curr . getVersion ( ) ) ; if ( occurred == Occurred . BEFORE ) { obsolete = true ; break ; } else if ( occurred == Occurred . AFTER ) { iter . remove ( ) ; } } if ( ! obsolete ) { // else update the set of accepted versions resolvedVersions . add ( value ) ; } } return resolvedVersions ; }
Given a set of versions constructs a resolved list of versions based on the compare function above
238
17
147,644
public static VectorClock makeClock ( Set < Integer > serverIds , long clockValue , long timestamp ) { List < ClockEntry > clockEntries = new ArrayList < ClockEntry > ( serverIds . size ( ) ) ; for ( Integer serverId : serverIds ) { clockEntries . add ( new ClockEntry ( serverId . shortValue ( ) , clockValue ) ) ; } return new VectorClock ( clockEntries , timestamp ) ; }
Generates a vector clock with the provided values
97
9
147,645
private String swapStore ( String storeName , String directory ) throws VoldemortException { ReadOnlyStorageEngine store = getReadOnlyStorageEngine ( metadataStore , storeRepository , storeName ) ; if ( ! Utils . isReadableDir ( directory ) ) throw new VoldemortException ( "Store directory '" + directory + "' is not a readable directory." ) ; String currentDirPath = store . getCurrentDirPath ( ) ; logger . info ( "Swapping RO store '" + storeName + "' to version directory '" + directory + "'" ) ; store . swapFiles ( directory ) ; logger . info ( "Swapping swapped RO store '" + storeName + "' to version directory '" + directory + "'" ) ; return currentDirPath ; }
Given a read - only store name and a directory swaps it in while returning the directory path being swapped out
160
21
147,646
@ Override public boolean isCompleteRequest ( ByteBuffer buffer ) { DataInputStream inputStream = new DataInputStream ( new ByteBufferBackedInputStream ( buffer ) ) ; try { int dataSize = inputStream . readInt ( ) ; if ( logger . isTraceEnabled ( ) ) logger . trace ( "In isCompleteRequest, dataSize: " + dataSize + ", buffer position: " + buffer . position ( ) ) ; if ( dataSize == - 1 ) return true ; // Here we skip over the data (without reading it in) and // move our position to just past it. buffer . position ( buffer . position ( ) + dataSize ) ; return true ; } catch ( Exception e ) { // This could also occur if the various methods we call into // re-throw a corrupted value error as some other type of exception. // For example, updating the position on a buffer past its limit // throws an InvalidArgumentException. if ( logger . isTraceEnabled ( ) ) logger . trace ( "In isCompleteRequest, probable partial read occurred: " + e ) ; return false ; } }
This method is used by non - blocking code to determine if the give buffer represents a complete request . Because the non - blocking code can by definition not just block waiting for more data it s possible to get partial reads and this identifies that case .
235
49
147,647
public synchronized void unregisterJmxIfRequired ( ) { referenceCount -- ; if ( isRegistered == true && referenceCount <= 0 ) { JmxUtils . unregisterMbean ( this . jmxObjectName ) ; isRegistered = false ; } }
Last caller of this method will unregister the Mbean . All callers decrement the counter .
54
20
147,648
public static String toBinaryString ( byte [ ] bytes ) { StringBuilder buffer = new StringBuilder ( ) ; for ( byte b : bytes ) { String bin = Integer . toBinaryString ( 0xFF & b ) ; bin = bin . substring ( 0 , Math . min ( bin . length ( ) , 8 ) ) ; for ( int j = 0 ; j < 8 - bin . length ( ) ; j ++ ) { buffer . append ( ' ' ) ; } buffer . append ( bin ) ; } return buffer . toString ( ) ; }
Translate the given byte array into a string of 1s and 0s
119
15
147,649
public static byte [ ] copy ( byte [ ] array , int from , int to ) { if ( to - from < 0 ) { return new byte [ 0 ] ; } else { byte [ ] a = new byte [ to - from ] ; System . arraycopy ( array , from , a , 0 , to - from ) ; return a ; } }
Copy the specified bytes into a new array
74
8
147,650
public static int readInt ( byte [ ] bytes , int offset ) { return ( ( ( bytes [ offset + 0 ] & 0xff ) << 24 ) | ( ( bytes [ offset + 1 ] & 0xff ) << 16 ) | ( ( bytes [ offset + 2 ] & 0xff ) << 8 ) | ( bytes [ offset + 3 ] & 0xff ) ) ; }
Read an int from the byte array starting at the given offset
79
12
147,651
public static long readUnsignedInt ( byte [ ] bytes , int offset ) { return ( ( ( bytes [ offset + 0 ] & 0xff L ) << 24 ) | ( ( bytes [ offset + 1 ] & 0xff L ) << 16 ) | ( ( bytes [ offset + 2 ] & 0xff L ) << 8 ) | ( bytes [ offset + 3 ] & 0xff L ) ) ; }
Read an unsigned integer from the given byte array
85
9
147,652
public static long readBytes ( byte [ ] bytes , int offset , int numBytes ) { int shift = 0 ; long value = 0 ; for ( int i = offset + numBytes - 1 ; i >= offset ; i -- ) { value |= ( bytes [ i ] & 0xFF L ) << shift ; shift += 8 ; } return value ; }
Read the given number of bytes into a long
75
9
147,653
public static void writeShort ( byte [ ] bytes , short value , int offset ) { bytes [ offset ] = ( byte ) ( 0xFF & ( value >> 8 ) ) ; bytes [ offset + 1 ] = ( byte ) ( 0xFF & value ) ; }
Write a short to the byte array starting at the given offset
57
12
147,654
public static void writeUnsignedShort ( byte [ ] bytes , int value , int offset ) { bytes [ offset ] = ( byte ) ( 0xFF & ( value >> 8 ) ) ; bytes [ offset + 1 ] = ( byte ) ( 0xFF & value ) ; }
Write an unsigned short to the byte array starting at the given offset
59
13
147,655
public static void writeInt ( byte [ ] bytes , int value , int offset ) { bytes [ offset ] = ( byte ) ( 0xFF & ( value >> 24 ) ) ; bytes [ offset + 1 ] = ( byte ) ( 0xFF & ( value >> 16 ) ) ; bytes [ offset + 2 ] = ( byte ) ( 0xFF & ( value >> 8 ) ) ; bytes [ offset + 3 ] = ( byte ) ( 0xFF & value ) ; }
Write an int to the byte array starting at the given offset
101
12
147,656
public static void writeBytes ( byte [ ] bytes , long value , int offset , int numBytes ) { int shift = 0 ; for ( int i = offset + numBytes - 1 ; i >= offset ; i -- ) { bytes [ i ] = ( byte ) ( 0xFF & ( value >> shift ) ) ; shift += 8 ; } }
Write the given number of bytes out to the array
73
10
147,657
public static byte numberOfBytesRequired ( long number ) { if ( number < 0 ) number = - number ; for ( byte i = 1 ; i <= SIZE_OF_LONG ; i ++ ) if ( number < ( 1L << ( 8 * i ) ) ) return i ; throw new IllegalStateException ( "Should never happen." ) ; }
The number of bytes required to hold the given number
75
10
147,658
public static void read ( InputStream stream , byte [ ] buffer ) throws IOException { int read = 0 ; while ( read < buffer . length ) { int newlyRead = stream . read ( buffer , read , buffer . length - read ) ; if ( newlyRead == - 1 ) throw new EOFException ( "Attempt to read " + buffer . length + " bytes failed due to EOF." ) ; read += newlyRead ; } }
Read exactly buffer . length bytes from the stream into the buffer
92
12
147,659
public static byte [ ] getBytes ( String string , String encoding ) { try { return string . getBytes ( encoding ) ; } catch ( UnsupportedEncodingException e ) { throw new IllegalArgumentException ( encoding + " is not a known encoding name." , e ) ; } }
Translate the string to bytes using the given encoding
60
10
147,660
public static String getString ( byte [ ] bytes , String encoding ) { try { return new String ( bytes , encoding ) ; } catch ( UnsupportedEncodingException e ) { throw new IllegalArgumentException ( encoding + " is not a known encoding name." , e ) ; } }
Create a string from bytes using the given encoding
60
9
147,661
public void addRequest ( long timeNS , long numEmptyResponses , long valueBytes , long keyBytes , long getAllAggregatedCount ) { // timing instrumentation (trace only) long startTimeNs = 0 ; if ( logger . isTraceEnabled ( ) ) { startTimeNs = System . nanoTime ( ) ; } long currentTime = time . milliseconds ( ) ; timeSensor . record ( ( double ) timeNS / voldemort . utils . Time . NS_PER_MS , currentTime ) ; emptyResponseKeysSensor . record ( numEmptyResponses , currentTime ) ; valueBytesSensor . record ( valueBytes , currentTime ) ; keyBytesSensor . record ( keyBytes , currentTime ) ; getAllKeysCountSensor . record ( getAllAggregatedCount , currentTime ) ; // timing instrumentation (trace only) if ( logger . isTraceEnabled ( ) ) { logger . trace ( "addRequest took " + ( System . nanoTime ( ) - startTimeNs ) + " ns." ) ; } }
Detailed request to track additional data about PUT GET and GET_ALL
223
14
147,662
public void addEvent ( Event event ) { if ( event == null ) throw new IllegalStateException ( "event must be non-null" ) ; if ( logger . isTraceEnabled ( ) ) logger . trace ( "Adding event " + event ) ; eventQueue . add ( event ) ; }
Add an event to the queue . It will be processed in the order received .
63
16
147,663
public void execute ( ) { try { while ( true ) { Event event = null ; try { event = eventQueue . poll ( timeout , unit ) ; } catch ( InterruptedException e ) { throw new InsufficientOperationalNodesException ( operation . getSimpleName ( ) + " operation interrupted!" , e ) ; } if ( event == null ) throw new VoldemortException ( operation . getSimpleName ( ) + " returned a null event" ) ; if ( event . equals ( Event . ERROR ) ) { if ( logger . isTraceEnabled ( ) ) logger . trace ( operation . getSimpleName ( ) + " request, events complete due to error" ) ; break ; } else if ( event . equals ( Event . COMPLETED ) ) { if ( logger . isTraceEnabled ( ) ) logger . trace ( operation . getSimpleName ( ) + " request, events complete" ) ; break ; } Action action = eventActions . get ( event ) ; if ( action == null ) throw new IllegalStateException ( "action was null for event " + event ) ; if ( logger . isTraceEnabled ( ) ) logger . trace ( operation . getSimpleName ( ) + " request, action " + action . getClass ( ) . getSimpleName ( ) + " to handle " + event + " event" ) ; action . execute ( this ) ; } } finally { finished = true ; } }
Process events in the order as they were received .
298
10
147,664
public static ModelMBean createModelMBean ( Object o ) { try { ModelMBean mbean = new RequiredModelMBean ( ) ; JmxManaged annotation = o . getClass ( ) . getAnnotation ( JmxManaged . class ) ; String description = annotation == null ? "" : annotation . description ( ) ; ModelMBeanInfo info = new ModelMBeanInfoSupport ( o . getClass ( ) . getName ( ) , description , extractAttributeInfo ( o ) , new ModelMBeanConstructorInfo [ 0 ] , extractOperationInfo ( o ) , new ModelMBeanNotificationInfo [ 0 ] ) ; mbean . setModelMBeanInfo ( info ) ; mbean . setManagedResource ( o , "ObjectReference" ) ; return mbean ; } catch ( MBeanException e ) { throw new VoldemortException ( e ) ; } catch ( InvalidTargetObjectTypeException e ) { throw new VoldemortException ( e ) ; } catch ( InstanceNotFoundException e ) { throw new VoldemortException ( e ) ; } }
Create a model mbean from an object using the description given in the Jmx annotation if present . Only operations are supported so far no attributes constructors or notifications
233
32
147,665
public static ModelMBeanOperationInfo [ ] extractOperationInfo ( Object object ) { ArrayList < ModelMBeanOperationInfo > infos = new ArrayList < ModelMBeanOperationInfo > ( ) ; for ( Method m : object . getClass ( ) . getMethods ( ) ) { JmxOperation jmxOperation = m . getAnnotation ( JmxOperation . class ) ; JmxGetter jmxGetter = m . getAnnotation ( JmxGetter . class ) ; JmxSetter jmxSetter = m . getAnnotation ( JmxSetter . class ) ; if ( jmxOperation != null || jmxGetter != null || jmxSetter != null ) { String description = "" ; int visibility = 1 ; int impact = MBeanOperationInfo . UNKNOWN ; if ( jmxOperation != null ) { description = jmxOperation . description ( ) ; impact = jmxOperation . impact ( ) ; } else if ( jmxGetter != null ) { description = jmxGetter . description ( ) ; impact = MBeanOperationInfo . INFO ; visibility = 4 ; } else if ( jmxSetter != null ) { description = jmxSetter . description ( ) ; impact = MBeanOperationInfo . ACTION ; visibility = 4 ; } ModelMBeanOperationInfo info = new ModelMBeanOperationInfo ( m . getName ( ) , description , extractParameterInfo ( m ) , m . getReturnType ( ) . getName ( ) , impact ) ; info . getDescriptor ( ) . setField ( "visibility" , Integer . toString ( visibility ) ) ; infos . add ( info ) ; } } return infos . toArray ( new ModelMBeanOperationInfo [ infos . size ( ) ] ) ; }
Extract all operations and attributes from the given object that have been annotated with the Jmx annotation . Operations are all methods that are marked with the JmxOperation annotation .
387
35
147,666
public static MBeanParameterInfo [ ] extractParameterInfo ( Method m ) { Class < ? > [ ] types = m . getParameterTypes ( ) ; Annotation [ ] [ ] annotations = m . getParameterAnnotations ( ) ; MBeanParameterInfo [ ] params = new MBeanParameterInfo [ types . length ] ; for ( int i = 0 ; i < params . length ; i ++ ) { boolean hasAnnotation = false ; for ( int j = 0 ; j < annotations [ i ] . length ; j ++ ) { if ( annotations [ i ] [ j ] instanceof JmxParam ) { JmxParam param = ( JmxParam ) annotations [ i ] [ j ] ; params [ i ] = new MBeanParameterInfo ( param . name ( ) , types [ i ] . getName ( ) , param . description ( ) ) ; hasAnnotation = true ; break ; } } if ( ! hasAnnotation ) { params [ i ] = new MBeanParameterInfo ( "" , types [ i ] . getName ( ) , "" ) ; } } return params ; }
Extract the parameters from a method using the Jmx annotation if present or just the raw types otherwise
235
20
147,667
public static ObjectName createObjectName ( String domain , String type ) { try { return new ObjectName ( domain + ":type=" + type ) ; } catch ( MalformedObjectNameException e ) { throw new VoldemortException ( e ) ; } }
Create a JMX ObjectName
53
6
147,668
public static String getClassName ( Class < ? > c ) { String name = c . getName ( ) ; return name . substring ( name . lastIndexOf ( ' ' ) + 1 , name . length ( ) ) ; }
Get the class name without the package
50
7
147,669
public static void registerMbean ( Object mbean , ObjectName name ) { registerMbean ( ManagementFactory . getPlatformMBeanServer ( ) , JmxUtils . createModelMBean ( mbean ) , name ) ; }
Register the given mbean with the platform mbean server
51
11
147,670
public static ObjectName registerMbean ( String typeName , Object obj ) { MBeanServer server = ManagementFactory . getPlatformMBeanServer ( ) ; ObjectName name = JmxUtils . createObjectName ( JmxUtils . getPackageName ( obj . getClass ( ) ) , typeName ) ; registerMbean ( server , JmxUtils . createModelMBean ( obj ) , name ) ; return name ; }
Register the given object under the package name of the object s class with the given type name .
95
19
147,671
public static void registerMbean ( MBeanServer server , ModelMBean mbean , ObjectName name ) { try { synchronized ( LOCK ) { if ( server . isRegistered ( name ) ) JmxUtils . unregisterMbean ( server , name ) ; server . registerMBean ( mbean , name ) ; } } catch ( Exception e ) { logger . error ( "Error registering mbean:" , e ) ; } }
Register the given mbean with the server
95
8
147,672
public static void unregisterMbean ( MBeanServer server , ObjectName name ) { try { server . unregisterMBean ( name ) ; } catch ( Exception e ) { logger . error ( "Error unregistering mbean" , e ) ; } }
Unregister the mbean with the given name
57
9
147,673
public static void unregisterMbean ( ObjectName name ) { try { ManagementFactory . getPlatformMBeanServer ( ) . unregisterMBean ( name ) ; } catch ( Exception e ) { logger . error ( "Error unregistering mbean" , e ) ; } }
Unregister the mbean with the given name from the platform mbean server
61
15
147,674
public static boolean isFormatCorrect ( String fileName , ReadOnlyStorageFormat format ) { switch ( format ) { case READONLY_V0 : case READONLY_V1 : if ( fileName . matches ( "^[\\d]+_[\\d]+\\.(data|index)" ) ) { return true ; } else { return false ; } case READONLY_V2 : if ( fileName . matches ( "^[\\d]+_[\\d]+_[\\d]+\\.(data|index)" ) ) { return true ; } else { return false ; } default : throw new VoldemortException ( "Format type not supported" ) ; } }
Given a file name and read - only storage format tells whether the file name format is correct
143
18
147,675
public static int getChunkId ( String fileName ) { Pattern pattern = Pattern . compile ( "_[\\d]+\\." ) ; Matcher matcher = pattern . matcher ( fileName ) ; if ( matcher . find ( ) ) { return new Integer ( fileName . substring ( matcher . start ( ) + 1 , matcher . end ( ) - 1 ) ) ; } else { throw new VoldemortException ( "Could not extract out chunk id from " + fileName ) ; } }
Returns the chunk id for the file name
107
8
147,676
public static File getCurrentVersion ( File storeDirectory ) { File latestDir = getLatestDir ( storeDirectory ) ; if ( latestDir != null ) return latestDir ; File [ ] versionDirs = getVersionDirs ( storeDirectory ) ; if ( versionDirs == null || versionDirs . length == 0 ) { return null ; } else { return findKthVersionedDir ( versionDirs , versionDirs . length - 1 , versionDirs . length - 1 ) [ 0 ] ; } }
Retrieve the dir pointed to by latest symbolic - link or the current version dir
108
16
147,677
public static boolean checkVersionDirName ( File versionDir ) { return ( versionDir . isDirectory ( ) && versionDir . getName ( ) . contains ( "version-" ) && ! versionDir . getName ( ) . endsWith ( ".bak" ) ) ; }
Checks if the name of the file follows the version - n format
58
14
147,678
private static long getVersionId ( String versionDir ) { try { return Long . parseLong ( versionDir . replace ( "version-" , "" ) ) ; } catch ( NumberFormatException e ) { logger . trace ( "Cannot parse version directory to obtain id " + versionDir ) ; return - 1 ; } }
Extracts the version id from a string
67
9
147,679
public static File [ ] getVersionDirs ( File rootDir , final long minId , final long maxId ) { return rootDir . listFiles ( new FileFilter ( ) { public boolean accept ( File pathName ) { if ( checkVersionDirName ( pathName ) ) { long versionId = getVersionId ( pathName ) ; if ( versionId != - 1 && versionId <= maxId && versionId >= minId ) { return true ; } } return false ; } } ) ; }
Returns all the version directories present in the root directory specified
105
11
147,680
@ Override protected void stopInner ( ) throws VoldemortException { List < VoldemortException > exceptions = new ArrayList < VoldemortException > ( ) ; logger . info ( "Stopping services:" + getIdentityNode ( ) . getId ( ) ) ; /* Stop in reverse order */ exceptions . addAll ( stopOnlineServices ( ) ) ; for ( VoldemortService service : Utils . reversed ( basicServices ) ) { try { service . stop ( ) ; } catch ( VoldemortException e ) { exceptions . add ( e ) ; logger . error ( e ) ; } } logger . info ( "All services stopped for Node:" + getIdentityNode ( ) . getId ( ) ) ; if ( exceptions . size ( ) > 0 ) throw exceptions . get ( 0 ) ; // release lock of jvm heap JNAUtils . tryMunlockall ( ) ; }
Attempt to shutdown the server . As much shutdown as possible will be completed even if intermediate errors are encountered .
184
21
147,681
private int getReplicaTypeForPartition ( int partitionId ) { List < Integer > routingPartitionList = routingStrategy . getReplicatingPartitionList ( partitionId ) ; // Determine if we should host this partition, and if so, whether we are a primary, // secondary or n-ary replica for it int correctReplicaType = - 1 ; for ( int replica = 0 ; replica < routingPartitionList . size ( ) ; replica ++ ) { if ( nodePartitionIds . contains ( routingPartitionList . get ( replica ) ) ) { // This means the partitionId currently being iterated on should be hosted // by this node. Let's remember its replica type in order to make sure the // files we have are properly named. correctReplicaType = replica ; break ; } } return correctReplicaType ; }
Given a partition ID determine which replica of this partition is hosted by the current node if any .
178
19
147,682
private void renameReadOnlyV2Files ( int masterPartitionId , int correctReplicaType ) { for ( int replica = 0 ; replica < routingStrategy . getNumReplicas ( ) ; replica ++ ) { if ( replica != correctReplicaType ) { int chunkId = 0 ; while ( true ) { String fileName = Integer . toString ( masterPartitionId ) + "_" + Integer . toString ( replica ) + "_" + Integer . toString ( chunkId ) ; File index = getIndexFile ( fileName ) ; File data = getDataFile ( fileName ) ; if ( index . exists ( ) && data . exists ( ) ) { // We found files with the "wrong" replica type, so let's rename them String correctFileName = Integer . toString ( masterPartitionId ) + "_" + Integer . toString ( correctReplicaType ) + "_" + Integer . toString ( chunkId ) ; File indexWithCorrectReplicaType = getIndexFile ( correctFileName ) ; File dataWithCorrectReplicaType = getDataFile ( correctFileName ) ; Utils . move ( index , indexWithCorrectReplicaType ) ; Utils . move ( data , dataWithCorrectReplicaType ) ; // Maybe change this to DEBUG? logger . info ( "Renamed files with wrong replica type: " + index . getAbsolutePath ( ) + "|data -> " + indexWithCorrectReplicaType . getName ( ) + "|data" ) ; } else if ( index . exists ( ) ^ data . exists ( ) ) { throw new VoldemortException ( "One of the following does not exist: " + index . toString ( ) + " or " + data . toString ( ) + "." ) ; } else { // The files don't exist, or we've gone over all available chunks, // so let's move on. break ; } chunkId ++ ; } } } }
This function looks for files with the wrong replica type in their name and if it finds any renames them .
412
22
147,683
public byte [ ] keyToStorageFormat ( byte [ ] key ) { switch ( getReadOnlyStorageFormat ( ) ) { case READONLY_V0 : case READONLY_V1 : return ByteUtils . md5 ( key ) ; case READONLY_V2 : return ByteUtils . copy ( ByteUtils . md5 ( key ) , 0 , 2 * ByteUtils . SIZE_OF_INT ) ; default : throw new VoldemortException ( "Unknown read-only storage format" ) ; } }
Converts the key to the format in which it is stored for searching
113
14
147,684
public int getChunkForKey ( byte [ ] key ) throws IllegalStateException { if ( numChunks == 0 ) { throw new IllegalStateException ( "The ChunkedFileSet is closed." ) ; } switch ( storageFormat ) { case READONLY_V0 : { return ReadOnlyUtils . chunk ( ByteUtils . md5 ( key ) , numChunks ) ; } case READONLY_V1 : { if ( nodePartitionIds == null ) { throw new IllegalStateException ( "nodePartitionIds is null." ) ; } List < Integer > routingPartitionList = routingStrategy . getPartitionList ( key ) ; routingPartitionList . retainAll ( nodePartitionIds ) ; if ( routingPartitionList . size ( ) != 1 ) { throw new IllegalStateException ( "The key does not belong on this node." ) ; } return chunkIdToChunkStart . get ( routingPartitionList . get ( 0 ) ) + ReadOnlyUtils . chunk ( ByteUtils . md5 ( key ) , chunkIdToNumChunks . get ( routingPartitionList . get ( 0 ) ) ) ; } case READONLY_V2 : { List < Integer > routingPartitionList = routingStrategy . getPartitionList ( key ) ; Pair < Integer , Integer > bucket = null ; for ( int replicaType = 0 ; replicaType < routingPartitionList . size ( ) ; replicaType ++ ) { if ( nodePartitionIds == null ) { throw new IllegalStateException ( "nodePartitionIds is null." ) ; } if ( nodePartitionIds . contains ( routingPartitionList . get ( replicaType ) ) ) { if ( bucket == null ) { bucket = Pair . create ( routingPartitionList . get ( 0 ) , replicaType ) ; } else { throw new IllegalStateException ( "Found more than one replica for a given partition on the current node!" ) ; } } } if ( bucket == null ) { throw new IllegalStateException ( "The key does not belong on this node." ) ; } Integer chunkStart = chunkIdToChunkStart . get ( bucket ) ; if ( chunkStart == null ) { throw new IllegalStateException ( "chunkStart is null." ) ; } return chunkStart + ReadOnlyUtils . chunk ( ByteUtils . md5 ( key ) , chunkIdToNumChunks . get ( bucket ) ) ; } default : { throw new IllegalStateException ( "Unsupported storageFormat: " + storageFormat ) ; } } }
Given a particular key first converts its to the storage format and then determines which chunk it belongs to
550
19
147,685
private static List < String > parseAndCompare ( List < String > fileNames , int masterPartitionId ) { List < String > sourceFileNames = new ArrayList < String > ( ) ; for ( String fileName : fileNames ) { String [ ] partitionIdReplicaChunk = fileName . split ( SPLIT_LITERAL ) ; if ( Integer . parseInt ( partitionIdReplicaChunk [ 0 ] ) == masterPartitionId ) { sourceFileNames . add ( fileName ) ; } } return sourceFileNames ; }
This method take a list of fileName of the type partitionId_Replica_Chunk and returns file names that match the regular expression masterPartitionId_
116
33
147,686
@ JmxGetter ( name = "getChunkIdToNumChunks" , description = "Returns a string representation of the map of chunk id to number of chunks" ) public String getChunkIdToNumChunks ( ) { StringBuilder builder = new StringBuilder ( ) ; for ( Entry < Object , Integer > entry : fileSet . getChunkIdToNumChunks ( ) . entrySet ( ) ) { builder . append ( entry . getKey ( ) . toString ( ) + " - " + entry . getValue ( ) . toString ( ) + ", " ) ; } return builder . toString ( ) ; }
Returns a string representation of map of chunk id to number of chunks
137
13
147,687
public void open ( File versionDir ) { /* acquire modification lock */ fileModificationLock . writeLock ( ) . lock ( ) ; try { /* check that the store is currently closed */ if ( isOpen ) throw new IllegalStateException ( "Attempt to open already open store." ) ; // Find version directory from symbolic link or max version id if ( versionDir == null ) { versionDir = ReadOnlyUtils . getCurrentVersion ( storeDir ) ; if ( versionDir == null ) versionDir = new File ( storeDir , "version-0" ) ; } // Set the max version id long versionId = ReadOnlyUtils . getVersionId ( versionDir ) ; if ( versionId == - 1 ) { throw new VoldemortException ( "Unable to parse id from version directory " + versionDir . getAbsolutePath ( ) ) ; } Utils . mkdirs ( versionDir ) ; // Validate symbolic link, and create it if it doesn't already exist Utils . symlink ( versionDir . getAbsolutePath ( ) , storeDir . getAbsolutePath ( ) + File . separator + "latest" ) ; this . fileSet = new ChunkedFileSet ( versionDir , routingStrategy , nodeId , maxValueBufferAllocationSize ) ; storeVersionManager . syncInternalStateFromFileSystem ( false ) ; this . lastSwapped = System . currentTimeMillis ( ) ; this . isOpen = true ; } catch ( IOException e ) { logger . error ( "Error in opening store" , e ) ; } finally { fileModificationLock . writeLock ( ) . unlock ( ) ; } }
Open the store with the version directory specified . If null is specified we open the directory with the maximum version
349
21
147,688
@ JmxGetter ( name = "lastSwapped" , description = "Time in milliseconds since the store was swapped" ) public long getLastSwapped ( ) { long timeSinceLastSwap = System . currentTimeMillis ( ) - lastSwapped ; return timeSinceLastSwap > 0 ? timeSinceLastSwap : 0 ; }
Time since last time the store was swapped
74
8
147,689
@ Override public void close ( ) throws VoldemortException { logger . debug ( "Close called for read-only store." ) ; this . fileModificationLock . writeLock ( ) . lock ( ) ; try { if ( isOpen ) { this . isOpen = false ; fileSet . close ( ) ; } else { logger . debug ( "Attempt to close already closed store " + getName ( ) ) ; } } finally { this . fileModificationLock . writeLock ( ) . unlock ( ) ; } }
Close the store .
109
4
147,690
@ JmxOperation ( description = "swapFiles changes this store to use the new data directory" ) public void swapFiles ( String newStoreDirectory ) { logger . info ( "Swapping files for store '" + getName ( ) + "' to " + newStoreDirectory ) ; File newVersionDir = new File ( newStoreDirectory ) ; if ( ! newVersionDir . exists ( ) ) throw new VoldemortException ( "File " + newVersionDir . getAbsolutePath ( ) + " does not exist." ) ; if ( ! ( newVersionDir . getParentFile ( ) . compareTo ( storeDir . getAbsoluteFile ( ) ) == 0 && ReadOnlyUtils . checkVersionDirName ( newVersionDir ) ) ) throw new VoldemortException ( "Invalid version folder name '" + newVersionDir + "'. Either parent directory is incorrect or format(version-n) is incorrect" ) ; // retrieve previous version for (a) check if last write is winning // (b) if failure, rollback use File previousVersionDir = ReadOnlyUtils . getCurrentVersion ( storeDir ) ; if ( previousVersionDir == null ) throw new VoldemortException ( "Could not find any latest directory to swap with in store '" + getName ( ) + "'" ) ; long newVersionId = ReadOnlyUtils . getVersionId ( newVersionDir ) ; long previousVersionId = ReadOnlyUtils . getVersionId ( previousVersionDir ) ; if ( newVersionId == - 1 || previousVersionId == - 1 ) throw new VoldemortException ( "Unable to parse folder names (" + newVersionDir . getName ( ) + "," + previousVersionDir . getName ( ) + ") since format(version-n) is incorrect" ) ; // check if we're greater than latest since we want last write to win if ( previousVersionId > newVersionId ) { logger . info ( "No swap required since current latest version " + previousVersionId + " is greater than swap version " + newVersionId ) ; deleteBackups ( ) ; return ; } logger . info ( "Acquiring write lock on '" + getName ( ) + "':" ) ; fileModificationLock . writeLock ( ) . lock ( ) ; boolean success = false ; try { close ( ) ; logger . info ( "Opening primary files for store '" + getName ( ) + "' at " + newStoreDirectory ) ; // open the latest store open ( newVersionDir ) ; success = true ; } finally { try { // we failed to do the swap, attempt a rollback to last version if ( ! success ) rollback ( previousVersionDir ) ; } finally { fileModificationLock . writeLock ( ) . unlock ( ) ; if ( success ) logger . info ( "Swap operation completed successfully on store " + getName ( ) + ", releasing lock." ) ; else logger . error ( "Swap operation failed." ) ; } } // okay we have released the lock and the store is now open again, it is // safe to do a potentially slow delete if we have one too many backups deleteBackups ( ) ; }
Swap the current version folder for a new one
666
10
147,691
private void deleteBackups ( ) { File [ ] storeDirList = ReadOnlyUtils . getVersionDirs ( storeDir , 0L , getCurrentVersionId ( ) ) ; if ( storeDirList != null && storeDirList . length > ( numBackups + 1 ) ) { // delete ALL old directories asynchronously File [ ] extraBackups = ReadOnlyUtils . findKthVersionedDir ( storeDirList , 0 , storeDirList . length - ( numBackups + 1 ) - 1 ) ; if ( extraBackups != null ) { for ( File backUpFile : extraBackups ) { deleteAsync ( backUpFile ) ; } } } }
Delete all backups asynchronously
145
6
147,692
private void deleteAsync ( final File file ) { new Thread ( new Runnable ( ) { @ Override public void run ( ) { try { try { logger . info ( "Waiting for " + deleteBackupMs + " milliseconds before deleting " + file . getAbsolutePath ( ) ) ; Thread . sleep ( deleteBackupMs ) ; } catch ( InterruptedException e ) { logger . warn ( "Did not sleep enough before deleting backups" ) ; } logger . info ( "Deleting file " + file . getAbsolutePath ( ) ) ; Utils . rm ( file ) ; logger . info ( "Deleting of " + file . getAbsolutePath ( ) + " completed successfully." ) ; storeVersionManager . syncInternalStateFromFileSystem ( true ) ; } catch ( Exception e ) { logger . error ( "Exception during deleteAsync for path: " + file , e ) ; } } } , "background-file-delete" ) . start ( ) ; }
Delete the given file in a separate thread
212
8
147,693
public void rollback ( File rollbackToDir ) { logger . info ( "Rolling back store '" + getName ( ) + "'" ) ; fileModificationLock . writeLock ( ) . lock ( ) ; try { if ( rollbackToDir == null ) throw new VoldemortException ( "Version directory specified to rollback is null" ) ; if ( ! rollbackToDir . exists ( ) ) throw new VoldemortException ( "Version directory " + rollbackToDir . getAbsolutePath ( ) + " specified to rollback does not exist" ) ; long versionId = ReadOnlyUtils . getVersionId ( rollbackToDir ) ; if ( versionId == - 1 ) throw new VoldemortException ( "Cannot parse version id" ) ; File [ ] backUpDirs = ReadOnlyUtils . getVersionDirs ( storeDir , versionId , Long . MAX_VALUE ) ; if ( backUpDirs == null || backUpDirs . length <= 1 ) { logger . warn ( "No rollback performed since there are no back-up directories" ) ; return ; } backUpDirs = ReadOnlyUtils . findKthVersionedDir ( backUpDirs , 0 , backUpDirs . length - 1 ) ; if ( isOpen ) close ( ) ; // open the rollback directory open ( rollbackToDir ) ; // back-up all other directories DateFormat df = new SimpleDateFormat ( "MM-dd-yyyy" ) ; for ( int index = 1 ; index < backUpDirs . length ; index ++ ) { Utils . move ( backUpDirs [ index ] , new File ( storeDir , backUpDirs [ index ] . getName ( ) + "." + df . format ( new Date ( ) ) + ".bak" ) ) ; } } finally { fileModificationLock . writeLock ( ) . unlock ( ) ; logger . info ( "Rollback operation completed on '" + getName ( ) + "', releasing lock." ) ; } }
Rollback to the specified push version
434
7
147,694
protected boolean hasTimeOutHeader ( ) { boolean result = false ; String timeoutValStr = this . request . getHeader ( RestMessageHeaders . X_VOLD_REQUEST_TIMEOUT_MS ) ; if ( timeoutValStr != null ) { try { this . parsedTimeoutInMs = Long . parseLong ( timeoutValStr ) ; if ( this . parsedTimeoutInMs < 0 ) { RestErrorHandler . writeErrorResponse ( messageEvent , HttpResponseStatus . BAD_REQUEST , "Time out cannot be negative " ) ; } else { result = true ; } } catch ( NumberFormatException nfe ) { logger . error ( "Exception when validating request. Incorrect timeout parameter. Cannot parse this to long: " + timeoutValStr , nfe ) ; RestErrorHandler . writeErrorResponse ( this . messageEvent , HttpResponseStatus . BAD_REQUEST , "Incorrect timeout parameter. Cannot parse this to long: " + timeoutValStr ) ; } } else { logger . error ( "Error when validating request. Missing timeout parameter." ) ; RestErrorHandler . writeErrorResponse ( this . messageEvent , HttpResponseStatus . BAD_REQUEST , "Missing timeout parameter." ) ; } return result ; }
Retrieve and validate the timeout value from the REST request . X_VOLD_REQUEST_TIMEOUT_MS is the timeout header .
263
29
147,695
protected void parseRoutingCodeHeader ( ) { String rtCode = this . request . getHeader ( RestMessageHeaders . X_VOLD_ROUTING_TYPE_CODE ) ; if ( rtCode != null ) { try { int routingTypeCode = Integer . parseInt ( rtCode ) ; this . parsedRoutingType = RequestRoutingType . getRequestRoutingType ( routingTypeCode ) ; } catch ( NumberFormatException nfe ) { logger . error ( "Exception when validating request. Incorrect routing type parameter. Cannot parse this to long: " + rtCode , nfe ) ; RestErrorHandler . writeErrorResponse ( this . messageEvent , HttpResponseStatus . BAD_REQUEST , "Incorrect routing type parameter. Cannot parse this to long: " + rtCode ) ; } catch ( VoldemortException ve ) { logger . error ( "Exception when validating request. Incorrect routing type code: " + rtCode , ve ) ; RestErrorHandler . writeErrorResponse ( this . messageEvent , HttpResponseStatus . BAD_REQUEST , "Incorrect routing type code: " + rtCode ) ; } } }
Retrieve the routing type value from the REST request . X_VOLD_ROUTING_TYPE_CODE is the routing type header .
251
30
147,696
protected boolean hasTimeStampHeader ( ) { String originTime = request . getHeader ( RestMessageHeaders . X_VOLD_REQUEST_ORIGIN_TIME_MS ) ; boolean result = false ; if ( originTime != null ) { try { // TODO: remove the originTime field from request header, // because coordinator should not accept the request origin time // from the client.. In this commit, we only changed // "this.parsedRequestOriginTimeInMs" from // "Long.parseLong(originTime)" to current system time, // The reason that we did not remove the field from request // header right now, is because this commit is a quick fix for // internal performance test to be available as soon as // possible. this . parsedRequestOriginTimeInMs = System . currentTimeMillis ( ) ; if ( this . parsedRequestOriginTimeInMs < 0 ) { RestErrorHandler . writeErrorResponse ( messageEvent , HttpResponseStatus . BAD_REQUEST , "Origin time cannot be negative " ) ; } else { result = true ; } } catch ( NumberFormatException nfe ) { logger . error ( "Exception when validating request. Incorrect origin time parameter. Cannot parse this to long: " + originTime , nfe ) ; RestErrorHandler . writeErrorResponse ( this . messageEvent , HttpResponseStatus . BAD_REQUEST , "Incorrect origin time parameter. Cannot parse this to long: " + originTime ) ; } } else { logger . error ( "Error when validating request. Missing origin time parameter." ) ; RestErrorHandler . writeErrorResponse ( this . messageEvent , HttpResponseStatus . BAD_REQUEST , "Missing origin time parameter." ) ; } return result ; }
Retrieve and validate the timestamp value from the REST request . X_VOLD_REQUEST_ORIGIN_TIME_MS is timestamp header
371
30
147,697
protected boolean hasVectorClock ( boolean isVectorClockOptional ) { boolean result = false ; String vectorClockHeader = this . request . getHeader ( RestMessageHeaders . X_VOLD_VECTOR_CLOCK ) ; if ( vectorClockHeader != null ) { ObjectMapper mapper = new ObjectMapper ( ) ; try { VectorClockWrapper vcWrapper = mapper . readValue ( vectorClockHeader , VectorClockWrapper . class ) ; this . parsedVectorClock = new VectorClock ( vcWrapper . getVersions ( ) , vcWrapper . getTimestamp ( ) ) ; result = true ; } catch ( Exception e ) { logger . error ( "Exception while parsing and constructing vector clock" , e ) ; RestErrorHandler . writeErrorResponse ( this . messageEvent , HttpResponseStatus . BAD_REQUEST , "Invalid Vector Clock" ) ; } } else if ( ! isVectorClockOptional ) { logger . error ( "Error when validating request. Missing Vector Clock" ) ; RestErrorHandler . writeErrorResponse ( this . messageEvent , HttpResponseStatus . BAD_REQUEST , "Missing Vector Clock" ) ; } else { result = true ; } return result ; }
Retrieve and validate vector clock value from the REST request . X_VOLD_VECTOR_CLOCK is the vector clock header .
260
29
147,698
protected boolean hasKey ( ) { boolean result = false ; String requestURI = this . request . getUri ( ) ; parseKeys ( requestURI ) ; if ( this . parsedKeys != null ) { result = true ; } else { logger . error ( "Error when validating request. No key specified." ) ; RestErrorHandler . writeErrorResponse ( this . messageEvent , HttpResponseStatus . BAD_REQUEST , "Error: No key specified !" ) ; } return result ; }
Retrieve and validate the key from the REST request .
105
11
147,699
protected boolean isStoreValid ( ) { boolean result = false ; String requestURI = this . request . getUri ( ) ; this . storeName = parseStoreName ( requestURI ) ; if ( storeName != null ) { result = true ; } else { logger . error ( "Error when validating request. Missing store name." ) ; RestErrorHandler . writeErrorResponse ( this . messageEvent , HttpResponseStatus . BAD_REQUEST , "Missing store name. Critical error." ) ; } return result ; }
Retrieve and validate store name from the REST request .
110
11