idx
int64
0
41.2k
question
stringlengths
83
4.15k
target
stringlengths
5
715
24,100
private void reconnectFlows ( ) { for ( Entry < Object , List < String > > entry : _outgoingFlows . entrySet ( ) ) { for ( String flowId : entry . getValue ( ) ) { if ( entry . getKey ( ) instanceof SequenceFlow ) { if ( _idMap . get ( flowId ) instanceof FlowNode ) { ( ( SequenceFlow ) entry . getKey ( ) ) . setTargetRef ( ( FlowNode ) _idMap . get ( flowId ) ) ; } if ( _idMap . get ( flowId ) instanceof Association ) { ( ( Association ) _idMap . get ( flowId ) ) . setTargetRef ( ( SequenceFlow ) entry . getKey ( ) ) ; } } else if ( entry . getKey ( ) instanceof Association ) { ( ( Association ) entry . getKey ( ) ) . setTargetRef ( ( BaseElement ) _idMap . get ( flowId ) ) ; } else { if ( _idMap . get ( flowId ) instanceof SequenceFlow ) { ( ( FlowNode ) entry . getKey ( ) ) . getOutgoing ( ) . add ( ( SequenceFlow ) _idMap . get ( flowId ) ) ; } else if ( _idMap . get ( flowId ) instanceof Association ) { ( ( Association ) _idMap . get ( flowId ) ) . setSourceRef ( ( BaseElement ) entry . getKey ( ) ) ; } } } } }
Reconnect the sequence flows and the flow nodes . Done after the initial pass so that we have all the target information .
24,101
protected void parseRequest ( HttpServletRequest request , HttpServletResponse response ) { requestParams = new HashMap < String , Object > ( ) ; listFiles = new ArrayList < FileItemStream > ( ) ; listFileStreams = new ArrayList < ByteArrayOutputStream > ( ) ; if ( ServletFileUpload . isMultipartContent ( request ) ) { try { ServletFileUpload upload = new ServletFileUpload ( ) ; FileItemIterator iter = upload . getItemIterator ( request ) ; while ( iter . hasNext ( ) ) { FileItemStream item = iter . next ( ) ; String name = item . getFieldName ( ) ; InputStream stream = item . openStream ( ) ; if ( item . isFormField ( ) ) { requestParams . put ( name , Streams . asString ( stream ) ) ; } else { String fileName = item . getName ( ) ; if ( fileName != null && ! "" . equals ( fileName . trim ( ) ) ) { listFiles . add ( item ) ; ByteArrayOutputStream os = new ByteArrayOutputStream ( ) ; IOUtils . copy ( stream , os ) ; listFileStreams . add ( os ) ; } } } } catch ( Exception e ) { logger . error ( "Unexpected error parsing multipart content" , e ) ; } } else { for ( Object mapKey : request . getParameterMap ( ) . keySet ( ) ) { String mapKeyString = ( String ) mapKey ; if ( mapKeyString . endsWith ( "[]" ) ) { String values [ ] = request . getParameterValues ( mapKeyString ) ; List < String > listeValues = new ArrayList < String > ( ) ; for ( String value : values ) { listeValues . add ( value ) ; } requestParams . put ( mapKeyString , listeValues ) ; } else { String value = request . getParameter ( mapKeyString ) ; requestParams . put ( mapKeyString , value ) ; } } } }
Parse request parameters and files .
24,102
protected void putResponse ( JSONObject json , String param , Object value ) { try { json . put ( param , value ) ; } catch ( JSONException e ) { logger . error ( "json write error" , e ) ; } }
Append data to JSON response .
24,103
public static Variable deserialize ( String s , VariableType variableType , List < String > dataTypes ) { Variable var = new Variable ( variableType ) ; String [ ] varParts = s . split ( ":" ) ; if ( varParts . length > 0 ) { String name = varParts [ 0 ] ; if ( ! name . isEmpty ( ) ) { var . setName ( name ) ; if ( varParts . length == 2 ) { String dataType = varParts [ 1 ] ; if ( ! dataType . isEmpty ( ) ) { if ( dataTypes != null && dataTypes . contains ( dataType ) ) { var . setDataType ( dataType ) ; } else { var . setCustomDataType ( dataType ) ; } } } } } return var ; }
Deserializes a variable checking whether the datatype is custom or not
24,104
public static Variable deserialize ( String s , VariableType variableType ) { return deserialize ( s , variableType , null ) ; }
Deserializes a variable NOT checking whether the datatype is custom
24,105
public void processStencilSet ( ) throws IOException { StringBuilder stencilSetFileContents = new StringBuilder ( ) ; Scanner scanner = null ; try { scanner = new Scanner ( new File ( ssInFile ) , "UTF-8" ) ; String currentLine = "" ; String prevLine = "" ; while ( scanner . hasNextLine ( ) ) { prevLine = currentLine ; currentLine = scanner . nextLine ( ) ; String trimmedPrevLine = prevLine . trim ( ) ; String trimmedCurrentLine = currentLine . trim ( ) ; if ( trimmedCurrentLine . matches ( VIEW_PROPERTY_NAME_PATTERN ) && trimmedCurrentLine . endsWith ( VIEW_PROPERTY_VALUE_SUFFIX ) ) { String newLines = processViewPropertySvgReference ( currentLine ) ; stencilSetFileContents . append ( newLines ) ; } else if ( trimmedPrevLine . matches ( VIEW_FILE_PROPERTY_NAME_PATTERN ) && trimmedPrevLine . endsWith ( VIEW_PROPERTY_VALUE_SUFFIX ) && trimmedCurrentLine . matches ( VIEW_PROPERTY_NAME_PATTERN ) ) { String newLines = processViewFilePropertySvgReference ( prevLine , currentLine ) ; stencilSetFileContents . append ( newLines ) ; } else { stencilSetFileContents . append ( currentLine + LINE_SEPARATOR ) ; } } } finally { if ( scanner != null ) { scanner . close ( ) ; } } Writer out = null ; try { out = new BufferedWriter ( new OutputStreamWriter ( new FileOutputStream ( ssOutFile ) , "UTF-8" ) ) ; out . write ( stencilSetFileContents . toString ( ) ) ; } catch ( FileNotFoundException e ) { } catch ( UnsupportedEncodingException e ) { } catch ( IOException e ) { } finally { if ( out != null ) { try { out . close ( ) ; } catch ( IOException e ) { } } } System . out . println ( "SVG files referenced more than once:" ) ; for ( Map . Entry < String , Integer > stringIntegerEntry : mapSVGCounts . entrySet ( ) ) { if ( stringIntegerEntry . getValue ( ) > 1 ) { System . out . println ( "\t" + stringIntegerEntry . getKey ( ) + "\t = " + stringIntegerEntry . getValue ( ) ) ; } } }
Processes a stencilset template file
24,106
public void init ( ServletContext context ) { if ( profiles != null ) { for ( IDiagramProfile profile : profiles ) { profile . init ( context ) ; _registry . put ( profile . getName ( ) , profile ) ; } } }
Initialize the service with a context
24,107
public static String getPropertyName ( String name ) { if ( name != null && ( name . startsWith ( "get" ) || name . startsWith ( "set" ) ) ) { StringBuilder b = new StringBuilder ( name ) ; b . delete ( 0 , 3 ) ; b . setCharAt ( 0 , Character . toLowerCase ( b . charAt ( 0 ) ) ) ; return b . toString ( ) ; } else { return name ; } }
Get the property name of a method name . For example the property name of setSomeValue would be someValue . Names not beginning with set or get are not changed .
24,108
public static Class < ? > loadClass ( String className ) { try { return Class . forName ( className ) ; } catch ( ClassNotFoundException e ) { throw new IllegalArgumentException ( e ) ; } }
Load the given class using the default constructor
24,109
public static Class < ? > loadClass ( String className , ClassLoader cl ) { try { return Class . forName ( className , false , cl ) ; } catch ( ClassNotFoundException e ) { throw new IllegalArgumentException ( e ) ; } }
Load the given class using a specific class loader .
24,110
public static < T > T callConstructor ( Class < T > klass ) { return callConstructor ( klass , new Class < ? > [ 0 ] , new Object [ 0 ] ) ; }
Call the no - arg constructor for the given class
24,111
public static < T > T callConstructor ( Class < T > klass , Object [ ] args ) { Class < ? > [ ] klasses = new Class [ args . length ] ; for ( int i = 0 ; i < args . length ; i ++ ) klasses [ i ] = args [ i ] . getClass ( ) ; return callConstructor ( klass , klasses , args ) ; }
Call the constructor for the given class inferring the correct types for the arguments . This could be confusing if there are multiple constructors with the same number of arguments and the values themselves don t disambiguate .
24,112
public static < T > Object callMethod ( Object obj , Class < T > c , String name , Class < ? > [ ] classes , Object [ ] args ) { try { Method m = getMethod ( c , name , classes ) ; return m . invoke ( obj , args ) ; } catch ( InvocationTargetException e ) { throw getCause ( e ) ; } catch ( IllegalAccessException e ) { throw new IllegalStateException ( e ) ; } }
Call the named method
24,113
public static < T > Method getMethod ( Class < T > c , String name , Class < ? > ... argTypes ) { try { return c . getMethod ( name , argTypes ) ; } catch ( NoSuchMethodException e ) { throw new IllegalArgumentException ( e ) ; } }
Get the named method from the class
24,114
public static List < Versioned < byte [ ] > > pruneNonReplicaEntries ( List < Versioned < byte [ ] > > vals , List < Integer > keyReplicas , MutableBoolean didPrune ) { List < Versioned < byte [ ] > > prunedVals = new ArrayList < Versioned < byte [ ] > > ( vals . size ( ) ) ; for ( Versioned < byte [ ] > val : vals ) { VectorClock clock = ( VectorClock ) val . getVersion ( ) ; List < ClockEntry > clockEntries = new ArrayList < ClockEntry > ( ) ; for ( ClockEntry clockEntry : clock . getEntries ( ) ) { if ( keyReplicas . contains ( ( int ) clockEntry . getNodeId ( ) ) ) { clockEntries . add ( clockEntry ) ; } else { didPrune . setValue ( true ) ; } } prunedVals . add ( new Versioned < byte [ ] > ( val . getValue ( ) , new VectorClock ( clockEntries , clock . getTimestamp ( ) ) ) ) ; } return prunedVals ; }
Remove all non replica clock entries from the list of versioned values provided
24,115
public byte [ ] toBytes ( T object ) { try { ByteArrayOutputStream stream = new ByteArrayOutputStream ( ) ; ObjectOutputStream out = new ObjectOutputStream ( stream ) ; out . writeObject ( object ) ; return stream . toByteArray ( ) ; } catch ( IOException e ) { throw new SerializationException ( e ) ; } }
Transform the given object into an array of bytes
24,116
@ SuppressWarnings ( "unchecked" ) public T toObject ( byte [ ] bytes ) { try { return ( T ) new ObjectInputStream ( new ByteArrayInputStream ( bytes ) ) . readObject ( ) ; } catch ( IOException e ) { throw new SerializationException ( e ) ; } catch ( ClassNotFoundException c ) { throw new SerializationException ( c ) ; } }
Transform the given bytes into an object .
24,117
public boolean isCompleteRequest ( final ByteBuffer buffer ) throws VoldemortException { DataInputStream inputStream = new DataInputStream ( new ByteBufferBackedInputStream ( buffer ) ) ; try { byte opCode = inputStream . readByte ( ) ; inputStream . readUTF ( ) ; getRoutingType ( inputStream ) ; switch ( opCode ) { case VoldemortOpCode . GET_VERSION_OP_CODE : if ( ! GetVersionRequestHandler . isCompleteRequest ( inputStream , buffer ) ) return false ; break ; case VoldemortOpCode . GET_OP_CODE : if ( ! GetRequestHandler . isCompleteRequest ( inputStream , buffer , protocolVersion ) ) return false ; break ; case VoldemortOpCode . GET_ALL_OP_CODE : if ( ! GetAllRequestHandler . isCompleteRequest ( inputStream , buffer , protocolVersion ) ) return false ; break ; case VoldemortOpCode . PUT_OP_CODE : { if ( ! PutRequestHandler . isCompleteRequest ( inputStream , buffer , protocolVersion ) ) return false ; break ; } case VoldemortOpCode . DELETE_OP_CODE : { if ( ! DeleteRequestHandler . isCompleteRequest ( inputStream , buffer ) ) return false ; break ; } default : throw new VoldemortException ( " Unrecognized Voldemort OpCode " + opCode ) ; } if ( buffer . hasRemaining ( ) ) { logger . info ( "Probably a client bug, Discarding additional bytes in isCompleteRequest. Opcode: " + opCode + ", remaining bytes: " + buffer . remaining ( ) ) ; } return true ; } catch ( IOException e ) { if ( logger . isDebugEnabled ( ) ) logger . debug ( "Probable partial read occurred causing exception" , e ) ; return false ; } }
This is pretty ugly . We end up mimicking the request logic here so this needs to stay in sync with handleRequest .
24,118
synchronized public V put ( K key , V value ) { V oldValue = this . get ( key ) ; try { super . put ( key , value ) ; writeBack ( key , value ) ; return oldValue ; } catch ( Exception e ) { super . put ( key , oldValue ) ; writeBack ( key , oldValue ) ; throw new VoldemortException ( "Failed to put(" + key + ", " + value + ") in write through cache" , e ) ; } }
Updates the value in HashMap and writeBack as Atomic step
24,119
public void update ( int number ) { byte [ ] numberInBytes = new byte [ ByteUtils . SIZE_OF_INT ] ; ByteUtils . writeInt ( numberInBytes , number , 0 ) ; update ( numberInBytes ) ; }
Update the underlying buffer using the integer
24,120
public void update ( short number ) { byte [ ] numberInBytes = new byte [ ByteUtils . SIZE_OF_SHORT ] ; ByteUtils . writeShort ( numberInBytes , number , 0 ) ; update ( numberInBytes ) ; }
Update the underlying buffer using the short
24,121
public synchronized boolean tryDelegateSlop ( Node node ) { if ( asyncCallbackShouldSendhint ) { return false ; } else { slopDestinations . put ( node , true ) ; return true ; } }
Try to delegate the responsibility of sending slops to master
24,122
public synchronized boolean tryDelegateResponseHandling ( Response < ByteArray , Object > response ) { if ( responseHandlingCutoff ) { return false ; } else { responseQueue . offer ( response ) ; this . notifyAll ( ) ; return true ; } }
try to delegate the master to handle the response
24,123
public synchronized Response < ByteArray , Object > responseQueuePoll ( long timeout , TimeUnit timeUnit ) throws InterruptedException { long timeoutMs = timeUnit . toMillis ( timeout ) ; long timeoutWallClockMs = System . currentTimeMillis ( ) + timeoutMs ; while ( responseQueue . isEmpty ( ) && System . currentTimeMillis ( ) < timeoutWallClockMs ) { long remainingMs = Math . max ( 0 , timeoutWallClockMs - System . currentTimeMillis ( ) ) ; if ( logger . isDebugEnabled ( ) ) { logger . debug ( "Start waiting for response queue with timeoutMs: " + timeoutMs ) ; } this . wait ( remainingMs ) ; if ( logger . isDebugEnabled ( ) ) { logger . debug ( "End waiting for response queue with timeoutMs: " + timeoutMs ) ; } } return responseQueue . poll ( ) ; }
poll the response queue for response
24,124
private void setProperties ( Properties properties ) { Props props = new Props ( properties ) ; if ( props . containsKey ( ClientConfig . ENABLE_JMX_PROPERTY ) ) { this . setEnableJmx ( props . getBoolean ( ClientConfig . ENABLE_JMX_PROPERTY ) ) ; } if ( props . containsKey ( ClientConfig . BOOTSTRAP_URLS_PROPERTY ) ) { List < String > urls = props . getList ( ClientConfig . BOOTSTRAP_URLS_PROPERTY ) ; if ( urls . size ( ) > 0 ) { setHttpBootstrapURL ( urls . get ( 0 ) ) ; } } if ( props . containsKey ( ClientConfig . MAX_TOTAL_CONNECTIONS_PROPERTY ) ) { setMaxR2ConnectionPoolSize ( props . getInt ( ClientConfig . MAX_TOTAL_CONNECTIONS_PROPERTY , maxR2ConnectionPoolSize ) ) ; } if ( props . containsKey ( ClientConfig . ROUTING_TIMEOUT_MS_PROPERTY ) ) this . setTimeoutMs ( props . getLong ( ClientConfig . ROUTING_TIMEOUT_MS_PROPERTY , timeoutMs ) , TimeUnit . MILLISECONDS ) ; timeoutConfig = new TimeoutConfig ( timeoutMs , false ) ; if ( props . containsKey ( ClientConfig . GETALL_ROUTING_TIMEOUT_MS_PROPERTY ) ) timeoutConfig . setOperationTimeout ( VoldemortOpCode . GET_ALL_OP_CODE , props . getInt ( ClientConfig . GETALL_ROUTING_TIMEOUT_MS_PROPERTY ) ) ; if ( props . containsKey ( ClientConfig . GET_ROUTING_TIMEOUT_MS_PROPERTY ) ) timeoutConfig . setOperationTimeout ( VoldemortOpCode . GET_OP_CODE , props . getInt ( ClientConfig . GET_ROUTING_TIMEOUT_MS_PROPERTY ) ) ; if ( props . containsKey ( ClientConfig . PUT_ROUTING_TIMEOUT_MS_PROPERTY ) ) { long putTimeoutMs = props . getInt ( ClientConfig . PUT_ROUTING_TIMEOUT_MS_PROPERTY ) ; timeoutConfig . setOperationTimeout ( VoldemortOpCode . PUT_OP_CODE , putTimeoutMs ) ; timeoutConfig . setOperationTimeout ( VoldemortOpCode . GET_VERSION_OP_CODE , putTimeoutMs ) ; } if ( props . containsKey ( ClientConfig . GET_VERSIONS_ROUTING_TIMEOUT_MS_PROPERTY ) ) timeoutConfig . setOperationTimeout ( VoldemortOpCode . GET_VERSION_OP_CODE , props . getInt ( ClientConfig . GET_VERSIONS_ROUTING_TIMEOUT_MS_PROPERTY ) ) ; if ( props . containsKey ( ClientConfig . DELETE_ROUTING_TIMEOUT_MS_PROPERTY ) ) timeoutConfig . setOperationTimeout ( VoldemortOpCode . DELETE_OP_CODE , props . getInt ( ClientConfig . DELETE_ROUTING_TIMEOUT_MS_PROPERTY ) ) ; if ( props . containsKey ( ClientConfig . ALLOW_PARTIAL_GETALLS_PROPERTY ) ) timeoutConfig . setPartialGetAllAllowed ( props . getBoolean ( ClientConfig . ALLOW_PARTIAL_GETALLS_PROPERTY ) ) ; }
Set the values using the specified Properties object .
24,125
private void setProperties ( Properties properties ) { Props props = new Props ( properties ) ; if ( props . containsKey ( BOOTSTRAP_URLS_PROPERTY ) ) { setBootstrapURLs ( props . getList ( BOOTSTRAP_URLS_PROPERTY ) ) ; } if ( props . containsKey ( FAT_CLIENTS_CONFIG_SOURCE ) ) { setFatClientConfigSource ( StoreClientConfigSource . get ( props . getString ( FAT_CLIENTS_CONFIG_SOURCE ) ) ) ; } if ( props . containsKey ( FAT_CLIENTS_CONFIG_FILE_PATH_PROPERTY ) ) { setFatClientConfigPath ( props . getString ( FAT_CLIENTS_CONFIG_FILE_PATH_PROPERTY ) ) ; } if ( props . containsKey ( METADATA_CHECK_INTERVAL_IN_MS ) ) { setMetadataCheckIntervalInMs ( props . getInt ( METADATA_CHECK_INTERVAL_IN_MS ) ) ; } if ( props . containsKey ( NETTY_SERVER_PORT ) ) { setServerPort ( props . getInt ( NETTY_SERVER_PORT ) ) ; } if ( props . containsKey ( NETTY_SERVER_BACKLOG ) ) { setNettyServerBacklog ( props . getInt ( NETTY_SERVER_BACKLOG ) ) ; } if ( props . containsKey ( COORDINATOR_CORE_THREADS ) ) { setCoordinatorCoreThreads ( props . getInt ( COORDINATOR_CORE_THREADS ) ) ; } if ( props . containsKey ( COORDINATOR_MAX_THREADS ) ) { setCoordinatorMaxThreads ( props . getInt ( COORDINATOR_MAX_THREADS ) ) ; } if ( props . containsKey ( COORDINATOR_QUEUED_REQUESTS ) ) { setCoordinatorQueuedRequestsSize ( props . getInt ( COORDINATOR_QUEUED_REQUESTS ) ) ; } if ( props . containsKey ( HTTP_MESSAGE_DECODER_MAX_INITIAL_LINE_LENGTH ) ) { setHttpMessageDecoderMaxInitialLength ( props . getInt ( HTTP_MESSAGE_DECODER_MAX_INITIAL_LINE_LENGTH ) ) ; } if ( props . containsKey ( HTTP_MESSAGE_DECODER_MAX_HEADER_SIZE ) ) { setHttpMessageDecoderMaxHeaderSize ( props . getInt ( HTTP_MESSAGE_DECODER_MAX_HEADER_SIZE ) ) ; } if ( props . containsKey ( HTTP_MESSAGE_DECODER_MAX_CHUNK_SIZE ) ) { setHttpMessageDecoderMaxChunkSize ( props . getInt ( HTTP_MESSAGE_DECODER_MAX_CHUNK_SIZE ) ) ; } if ( props . containsKey ( ADMIN_ENABLE ) ) { setAdminServiceEnabled ( props . getBoolean ( ADMIN_ENABLE ) ) ; } if ( props . containsKey ( ADMIN_PORT ) ) { setAdminPort ( props . getInt ( ADMIN_PORT ) ) ; } }
Set the values using the specified Properties object
24,126
public CoordinatorConfig setBootstrapURLs ( List < String > bootstrapUrls ) { this . bootstrapURLs = Utils . notNull ( bootstrapUrls ) ; if ( this . bootstrapURLs . size ( ) <= 0 ) throw new IllegalArgumentException ( "Must provide at least one bootstrap URL." ) ; return this ; }
Sets the bootstrap URLs used by the different Fat clients inside the Coordinator
24,127
private void sendOneAsyncHint ( final ByteArray slopKey , final Versioned < byte [ ] > slopVersioned , final List < Node > nodesToTry ) { Node nodeToHostHint = null ; boolean foundNode = false ; while ( nodesToTry . size ( ) > 0 ) { nodeToHostHint = nodesToTry . remove ( 0 ) ; if ( ! failedNodes . contains ( nodeToHostHint ) && failureDetector . isAvailable ( nodeToHostHint ) ) { foundNode = true ; break ; } } if ( ! foundNode ) { Slop slop = slopSerializer . toObject ( slopVersioned . getValue ( ) ) ; logger . error ( "Trying to send an async hint but used up all nodes. key: " + slop . getKey ( ) + " version: " + slopVersioned . getVersion ( ) . toString ( ) ) ; return ; } final Node node = nodeToHostHint ; int nodeId = node . getId ( ) ; NonblockingStore nonblockingStore = nonblockingSlopStores . get ( nodeId ) ; Utils . notNull ( nonblockingStore ) ; final Long startNs = System . nanoTime ( ) ; NonblockingStoreCallback callback = new NonblockingStoreCallback ( ) { public void requestComplete ( Object result , long requestTime ) { Slop slop = null ; boolean loggerDebugEnabled = logger . isDebugEnabled ( ) ; if ( loggerDebugEnabled ) { slop = slopSerializer . toObject ( slopVersioned . getValue ( ) ) ; } Response < ByteArray , Object > response = new Response < ByteArray , Object > ( node , slopKey , result , requestTime ) ; if ( response . getValue ( ) instanceof Exception && ! ( response . getValue ( ) instanceof ObsoleteVersionException ) ) { if ( ! failedNodes . contains ( node ) ) failedNodes . add ( node ) ; if ( response . getValue ( ) instanceof UnreachableStoreException ) { UnreachableStoreException use = ( UnreachableStoreException ) response . getValue ( ) ; if ( loggerDebugEnabled ) { logger . debug ( "Write of key " + slop . getKey ( ) + " for " + slop . getNodeId ( ) + " to node " + node + " failed due to unreachable: " + use . getMessage ( ) ) ; } failureDetector . recordException ( node , ( System . nanoTime ( ) - startNs ) / Time . NS_PER_MS , use ) ; } sendOneAsyncHint ( slopKey , slopVersioned , nodesToTry ) ; } if ( loggerDebugEnabled ) logger . debug ( "Slop write of key " + slop . getKey ( ) + " for node " + slop . getNodeId ( ) + " to node " + node + " succeeded in " + ( System . nanoTime ( ) - startNs ) + " ns" ) ; failureDetector . recordSuccess ( node , ( System . nanoTime ( ) - startNs ) / Time . NS_PER_MS ) ; } } ; nonblockingStore . submitPutRequest ( slopKey , slopVersioned , null , callback , timeoutMs ) ; }
A callback that handles requestComplete event from NIO selector manager Will try any possible nodes and pass itself as callback util all nodes are exhausted
24,128
public Iterable < V > sorted ( Iterator < V > input ) { ExecutorService executor = new ThreadPoolExecutor ( this . numThreads , this . numThreads , 1000L , TimeUnit . MILLISECONDS , new SynchronousQueue < Runnable > ( ) , new CallerRunsPolicy ( ) ) ; final AtomicInteger count = new AtomicInteger ( 0 ) ; final List < File > tempFiles = Collections . synchronizedList ( new ArrayList < File > ( ) ) ; while ( input . hasNext ( ) ) { final int segmentId = count . getAndIncrement ( ) ; final long segmentStartMs = System . currentTimeMillis ( ) ; logger . info ( "Segment " + segmentId + ": filling sort buffer for segment..." ) ; @ SuppressWarnings ( "unchecked" ) final V [ ] buffer = ( V [ ] ) new Object [ internalSortSize ] ; int segmentSizeIter = 0 ; for ( ; segmentSizeIter < internalSortSize && input . hasNext ( ) ; segmentSizeIter ++ ) buffer [ segmentSizeIter ] = input . next ( ) ; final int segmentSize = segmentSizeIter ; logger . info ( "Segment " + segmentId + ": sort buffer filled...adding to sort queue." ) ; executor . execute ( new Runnable ( ) { public void run ( ) { logger . info ( "Segment " + segmentId + ": sorting buffer." ) ; long start = System . currentTimeMillis ( ) ; Arrays . sort ( buffer , 0 , segmentSize , comparator ) ; long elapsed = System . currentTimeMillis ( ) - start ; logger . info ( "Segment " + segmentId + ": sort completed in " + elapsed + " ms, writing to temp file." ) ; try { File tempFile = File . createTempFile ( "segment-" , ".dat" , tempDir ) ; tempFile . deleteOnExit ( ) ; tempFiles . add ( tempFile ) ; OutputStream os = new BufferedOutputStream ( new FileOutputStream ( tempFile ) , bufferSize ) ; if ( gzip ) os = new GZIPOutputStream ( os ) ; DataOutputStream output = new DataOutputStream ( os ) ; for ( int i = 0 ; i < segmentSize ; i ++ ) writeValue ( output , buffer [ i ] ) ; output . close ( ) ; } catch ( IOException e ) { throw new VoldemortException ( e ) ; } long segmentElapsed = System . currentTimeMillis ( ) - segmentStartMs ; logger . info ( "Segment " + segmentId + ": completed processing of segment in " + segmentElapsed + " ms." ) ; } } ) ; } executor . shutdown ( ) ; try { executor . awaitTermination ( Long . MAX_VALUE , TimeUnit . MILLISECONDS ) ; return new DefaultIterable < V > ( new ExternalSorterIterator ( tempFiles , bufferSize / tempFiles . size ( ) ) ) ; } catch ( InterruptedException e ) { throw new RuntimeException ( e ) ; } }
Produce an iterator over the input values in sorted order . Sorting will occur in the fixed space configured in the constructor data will be dumped to disk as necessary .
24,129
private void processResponse ( Response < ByteArray , Object > response , Pipeline pipeline ) { if ( response == null ) { logger . warn ( "RoutingTimedout on waiting for async ops; parallelResponseToWait: " + numNodesPendingResponse + "; preferred-1: " + ( preferred - 1 ) + "; quorumOK: " + quorumSatisfied + "; zoneOK: " + zonesSatisfied ) ; } else { numNodesPendingResponse = numNodesPendingResponse - 1 ; numResponsesGot = numResponsesGot + 1 ; if ( response . getValue ( ) instanceof Exception && ! ( response . getValue ( ) instanceof ObsoleteVersionException ) ) { if ( logger . isDebugEnabled ( ) ) { logger . debug ( "PUT {key:" + key + "} handling async put error" ) ; } if ( response . getValue ( ) instanceof QuotaExceededException ) { if ( logger . isDebugEnabled ( ) ) { logger . debug ( "Received quota exceeded exception after a successful " + pipeline . getOperation ( ) . getSimpleName ( ) + " call on node " + response . getNode ( ) . getId ( ) + ", store '" + pipelineData . getStoreName ( ) + "', master-node '" + pipelineData . getMaster ( ) . getId ( ) + "'" ) ; } } else if ( handleResponseError ( response , pipeline , failureDetector ) ) { if ( logger . isDebugEnabled ( ) ) { logger . debug ( "PUT {key:" + key + "} severe async put error, exiting parallel put stage" ) ; } return ; } if ( PipelineRoutedStore . isSlopableFailure ( response . getValue ( ) ) || response . getValue ( ) instanceof QuotaExceededException ) { pipelineData . getSynchronizer ( ) . tryDelegateSlop ( response . getNode ( ) ) ; } if ( logger . isDebugEnabled ( ) ) { logger . debug ( "PUT {key:" + key + "} handled async put error" ) ; } } else { pipelineData . incrementSuccesses ( ) ; failureDetector . recordSuccess ( response . getNode ( ) , response . getRequestTime ( ) ) ; pipelineData . getZoneResponses ( ) . add ( response . getNode ( ) . getZoneId ( ) ) ; } } }
Process the response by reporting proper log and feeding failure detectors
24,130
private boolean isZonesSatisfied ( ) { boolean zonesSatisfied = false ; if ( pipelineData . getZonesRequired ( ) == null ) { zonesSatisfied = true ; } else { int numZonesSatisfied = pipelineData . getZoneResponses ( ) . size ( ) ; if ( numZonesSatisfied >= ( pipelineData . getZonesRequired ( ) + 1 ) ) { zonesSatisfied = true ; } } return zonesSatisfied ; }
Check if zone count policy is satisfied
24,131
public ClientConfig setIdleConnectionTimeout ( long idleConnectionTimeout , TimeUnit unit ) { if ( idleConnectionTimeout <= 0 ) { this . idleConnectionTimeoutMs = - 1 ; } else { if ( unit . toMinutes ( idleConnectionTimeout ) < 10 ) { throw new IllegalArgumentException ( "idleConnectionTimeout should be minimum of 10 minutes" ) ; } this . idleConnectionTimeoutMs = unit . toMillis ( idleConnectionTimeout ) ; } return this ; }
Set the timeout for idle connections . Voldemort client caches all connections to the Voldemort server . This setting allows the a connection to be dropped if it is idle for more than this time .
24,132
public ClientConfig setNodeBannagePeriod ( int nodeBannagePeriod , TimeUnit unit ) { this . failureDetectorBannagePeriod = unit . toMillis ( nodeBannagePeriod ) ; return this ; }
The period of time to ban a node that gives an error on an operation .
24,133
public ClientConfig setThreadIdleTime ( long threadIdleTime , TimeUnit unit ) { this . threadIdleMs = unit . toMillis ( threadIdleTime ) ; return this ; }
The amount of time to keep an idle client thread alive
24,134
public void close ( SocketDestination destination ) { factory . setLastClosedTimestamp ( destination ) ; queuedPool . reset ( destination ) ; }
Reset the pool of resources for a specific destination . Idle resources will be destroyed . Checked out resources that are subsequently checked in will be destroyed . Newly created resources can be checked in to reestablish resources for the specific destination .
24,135
public void close ( ) { if ( stats != null ) { try { if ( this . jmxEnabled ) JmxUtils . unregisterMbean ( getAggregateMetricName ( ) ) ; } catch ( Exception e ) { } stats . close ( ) ; } factory . close ( ) ; queuedPool . close ( ) ; }
Permanently close the ClientRequestExecutor pool . Resources subsequently checked in will be destroyed .
24,136
public static String readFileContents ( FileSystem fs , Path path , int bufferSize ) throws IOException { if ( bufferSize <= 0 ) return new String ( ) ; FSDataInputStream input = fs . open ( path ) ; byte [ ] buffer = new byte [ bufferSize ] ; ByteArrayOutputStream stream = new ByteArrayOutputStream ( ) ; while ( true ) { int read = input . read ( buffer ) ; if ( read < 0 ) { break ; } else { buffer = ByteUtils . copy ( buffer , 0 , read ) ; } stream . write ( buffer ) ; } return new String ( stream . toByteArray ( ) ) ; }
Given a filesystem path and buffer - size read the file contents and presents it as a string
24,137
public static FileStatus [ ] getDataChunkFiles ( FileSystem fs , Path path , final int partitionId , final int replicaType ) throws IOException { return fs . listStatus ( path , new PathFilter ( ) { public boolean accept ( Path input ) { if ( input . getName ( ) . matches ( "^" + Integer . toString ( partitionId ) + "_" + Integer . toString ( replicaType ) + "_[\\d]+\\.data" ) ) { return true ; } else { return false ; } } } ) ; }
Given a filesystem and path to a node gets all the files which belong to a partition and replica type
24,138
private boolean isSatisfied ( ) { if ( pipelineData . getZonesRequired ( ) != null ) { return ( ( pipelineData . getSuccesses ( ) >= required ) && ( pipelineData . getZoneResponses ( ) . size ( ) >= ( pipelineData . getZonesRequired ( ) + 1 ) ) ) ; } else { return pipelineData . getSuccesses ( ) >= required ; } }
Checks whether every property except preferred is satisfied
24,139
public int getCrossZonePartitionStoreMoves ( ) { int xzonePartitionStoreMoves = 0 ; for ( RebalanceTaskInfo info : batchPlan ) { Node donorNode = finalCluster . getNodeById ( info . getDonorId ( ) ) ; Node stealerNode = finalCluster . getNodeById ( info . getStealerId ( ) ) ; if ( donorNode . getZoneId ( ) != stealerNode . getZoneId ( ) ) { xzonePartitionStoreMoves += info . getPartitionStoreMoves ( ) ; } } return xzonePartitionStoreMoves ; }
Determines total number of partition - stores moved across zones .
24,140
protected int getDonorId ( StoreRoutingPlan currentSRP , StoreRoutingPlan finalSRP , int stealerZoneId , int stealerNodeId , int stealerPartitionId ) { int stealerZoneNAry = finalSRP . getZoneNaryForNodesPartition ( stealerZoneId , stealerNodeId , stealerPartitionId ) ; int donorZoneId ; if ( currentSRP . zoneNAryExists ( stealerZoneId , stealerZoneNAry , stealerPartitionId ) ) { donorZoneId = stealerZoneId ; } else { int currentMasterNodeId = currentSRP . getNodeIdForPartitionId ( stealerPartitionId ) ; donorZoneId = currentCluster . getNodeById ( currentMasterNodeId ) . getZoneId ( ) ; } return currentSRP . getNodeIdForZoneNary ( donorZoneId , stealerZoneNAry , stealerPartitionId ) ; }
Decide which donor node to steal from . This is a policy implementation . I . e . in the future additional policies could be considered . At that time this method should be overridden in a sub - class or a policy object ought to implement this algorithm .
24,141
protected void handleExceptions ( MessageEvent messageEvent , Exception exception ) { logger . error ( "Unknown exception. Internal Server Error." , exception ) ; writeErrorResponse ( messageEvent , HttpResponseStatus . INTERNAL_SERVER_ERROR , "Internal Server Error" ) ; }
Exceptions specific to each operation is handled in the corresponding subclass . At this point we don t know the reason behind this exception .
24,142
public static void writeErrorResponse ( MessageEvent messageEvent , HttpResponseStatus status , String message ) { HttpResponse response = new DefaultHttpResponse ( HTTP_1_1 , status ) ; response . setHeader ( CONTENT_TYPE , "text/plain; charset=UTF-8" ) ; response . setContent ( ChannelBuffers . copiedBuffer ( "Failure: " + status . toString ( ) + ". " + message + "\r\n" , CharsetUtil . UTF_8 ) ) ; response . setHeader ( CONTENT_LENGTH , response . getContent ( ) . readableBytes ( ) ) ; messageEvent . getChannel ( ) . write ( response ) ; }
Writes all error responses to the client .
24,143
public boolean parseAndValidateRequest ( ) { boolean result = false ; if ( ! super . parseAndValidateRequest ( ) || ! hasVectorClock ( this . isVectorClockOptional ) || ! hasContentLength ( ) || ! hasContentType ( ) ) { result = false ; } else { result = true ; } return result ; }
Validations specific to PUT
24,144
protected boolean hasContentLength ( ) { boolean result = false ; String contentLength = this . request . getHeader ( RestMessageHeaders . CONTENT_LENGTH ) ; if ( contentLength != null ) { try { Long . parseLong ( contentLength ) ; result = true ; } catch ( NumberFormatException nfe ) { logger . error ( "Exception when validating put request. Incorrect content length parameter. Cannot parse this to long: " + contentLength + ". Details: " + nfe . getMessage ( ) , nfe ) ; RestErrorHandler . writeErrorResponse ( this . messageEvent , HttpResponseStatus . BAD_REQUEST , "Incorrect content length parameter. Cannot parse this to long: " + contentLength + ". Details: " + nfe . getMessage ( ) ) ; } } else { logger . error ( "Error when validating put request. Missing Content-Length header." ) ; RestErrorHandler . writeErrorResponse ( this . messageEvent , HttpResponseStatus . BAD_REQUEST , "Missing Content-Length header" ) ; } return result ; }
Retrieves and validates the content length from the REST request .
24,145
protected boolean hasContentType ( ) { boolean result = false ; if ( this . request . getHeader ( RestMessageHeaders . CONTENT_TYPE ) != null ) { result = true ; } else { logger . error ( "Error when validating put request. Missing Content-Type header." ) ; RestErrorHandler . writeErrorResponse ( this . messageEvent , HttpResponseStatus . BAD_REQUEST , "Missing Content-Type header" ) ; } return result ; }
Retrieves and validates the content type from the REST requests
24,146
private void parseValue ( ) { ChannelBuffer content = this . request . getContent ( ) ; this . parsedValue = new byte [ content . capacity ( ) ] ; content . readBytes ( parsedValue ) ; }
Retrieve the value from the REST request body .
24,147
private void handleIOException ( IOException e , String action , int attempt ) throws VoldemortException , InterruptedException { if ( e . getMessage ( ) . contains ( "Filesystem closed" ) || ExceptionUtils . recursiveClassEquals ( e , AccessControlException . class ) ) { throw new VoldemortException ( "Got an IOException we cannot recover from while trying to " + action + ". Attempt # " + attempt + "/" + maxAttempts + ". Will not try again." , e ) ; } else { logFailureAndWait ( action , IO_EXCEPTION , attempt , e ) ; } }
This function is intended to detect the subset of IOException which are not considered recoverable in which case we want to bubble up the exception instead of retrying .
24,148
private boolean setAvailable ( NodeStatus nodeStatus , boolean isAvailable ) { synchronized ( nodeStatus ) { boolean previous = nodeStatus . isAvailable ( ) ; nodeStatus . setAvailable ( isAvailable ) ; nodeStatus . setLastChecked ( getConfig ( ) . getTime ( ) . getMilliseconds ( ) ) ; return previous ; } }
We need to distinguish the case where we re newly available and the case where we re already available . So we check the node status before we update it and return it to the caller .
24,149
public static void acceptsDir ( OptionParser parser ) { parser . acceptsAll ( Arrays . asList ( OPT_D , OPT_DIR ) , "directory path for input/output" ) . withRequiredArg ( ) . describedAs ( "dir-path" ) . ofType ( String . class ) ; }
Adds OPT_D | OPT_DIR option to OptionParser with one argument .
24,150
public static void acceptsFile ( OptionParser parser ) { parser . acceptsAll ( Arrays . asList ( OPT_F , OPT_FILE ) , "file path for input/output" ) . withRequiredArg ( ) . describedAs ( "file-path" ) . ofType ( String . class ) ; }
Adds OPT_F | OPT_FILE option to OptionParser with one argument .
24,151
public static void acceptsFormat ( OptionParser parser ) { parser . accepts ( OPT_FORMAT , "format of key or entry, could be hex, json or binary" ) . withRequiredArg ( ) . describedAs ( "hex | json | binary" ) . ofType ( String . class ) ; }
Adds OPT_FORMAT option to OptionParser with one argument .
24,152
public static void acceptsNodeSingle ( OptionParser parser ) { parser . acceptsAll ( Arrays . asList ( OPT_N , OPT_NODE ) , "node id" ) . withRequiredArg ( ) . describedAs ( "node-id" ) . ofType ( Integer . class ) ; }
Adds OPT_N | OPT_NODE option to OptionParser with one argument .
24,153
public static void acceptsUrl ( OptionParser parser ) { parser . acceptsAll ( Arrays . asList ( OPT_U , OPT_URL ) , "bootstrap url" ) . withRequiredArg ( ) . describedAs ( "url" ) . ofType ( String . class ) ; }
Adds OPT_U | OPT_URL option to OptionParser with one argument .
24,154
public static void acceptsZone ( OptionParser parser ) { parser . acceptsAll ( Arrays . asList ( OPT_Z , OPT_ZONE ) , "zone id" ) . withRequiredArg ( ) . describedAs ( "zone-id" ) . ofType ( Integer . class ) ; }
Adds OPT_Z | OPT_ZONE option to OptionParser with one argument .
24,155
public static void acceptsHex ( OptionParser parser ) { parser . acceptsAll ( Arrays . asList ( OPT_X , OPT_HEX ) , "fetch key/entry by key value of hex type" ) . withRequiredArg ( ) . describedAs ( "key-list" ) . withValuesSeparatedBy ( ',' ) . ofType ( String . class ) ; }
Adds OPT_X | OPT_HEX option to OptionParser with multiple arguments .
24,156
public static void acceptsJson ( OptionParser parser ) { parser . acceptsAll ( Arrays . asList ( OPT_J , OPT_JSON ) , "fetch key/entry by key value of json type" ) . withRequiredArg ( ) . describedAs ( "key-list" ) . withValuesSeparatedBy ( ',' ) . ofType ( String . class ) ; }
Adds OPT_J | OPT_JSON option to OptionParser with multiple arguments .
24,157
public static void acceptsNodeMultiple ( OptionParser parser ) { parser . acceptsAll ( Arrays . asList ( OPT_N , OPT_NODE ) , "node id list" ) . withRequiredArg ( ) . describedAs ( "node-id-list" ) . withValuesSeparatedBy ( ',' ) . ofType ( Integer . class ) ; }
Adds OPT_N | OPT_NODE option to OptionParser with multiple arguments .
24,158
public static void acceptsPartition ( OptionParser parser ) { parser . acceptsAll ( Arrays . asList ( OPT_P , OPT_PARTITION ) , "partition id list" ) . withRequiredArg ( ) . describedAs ( "partition-id-list" ) . withValuesSeparatedBy ( ',' ) . ofType ( Integer . class ) ; }
Adds OPT_P | OPT_PARTITION option to OptionParser with multiple arguments .
24,159
public static void checkRequired ( OptionSet options , String opt ) throws VoldemortException { List < String > opts = Lists . newArrayList ( ) ; opts . add ( opt ) ; checkRequired ( options , opts ) ; }
Checks if the required option exists .
24,160
public static void checkRequired ( OptionSet options , String opt1 , String opt2 ) throws VoldemortException { List < String > opts = Lists . newArrayList ( ) ; opts . add ( opt1 ) ; opts . add ( opt2 ) ; checkRequired ( options , opts ) ; }
Checks if there s exactly one option that exists among all possible opts .
24,161
public static void checkRequired ( OptionSet options , List < String > opts ) throws VoldemortException { List < String > optCopy = Lists . newArrayList ( ) ; for ( String opt : opts ) { if ( options . has ( opt ) ) { optCopy . add ( opt ) ; } } if ( optCopy . size ( ) < 1 ) { System . err . println ( "Please specify one of the following options:" ) ; for ( String opt : opts ) { System . err . println ( "--" + opt ) ; } Utils . croak ( "Missing required option." ) ; } if ( optCopy . size ( ) > 1 ) { System . err . println ( "Conflicting options:" ) ; for ( String opt : optCopy ) { System . err . println ( "--" + opt ) ; } Utils . croak ( "Conflicting options detected." ) ; } }
Checks if there s exactly one option that exists among all opts .
24,162
protected void registerRequest ( RestRequestValidator requestValidator , ChannelHandlerContext ctx , MessageEvent messageEvent ) { CompositeVoldemortRequest < ByteArray , byte [ ] > requestObject = requestValidator . constructCompositeVoldemortRequestObject ( ) ; if ( requestObject != null ) { DynamicTimeoutStoreClient < ByteArray , byte [ ] > storeClient = null ; if ( ! requestValidator . getStoreName ( ) . equalsIgnoreCase ( RestMessageHeaders . SCHEMATA_STORE ) ) { storeClient = this . fatClientMap . get ( requestValidator . getStoreName ( ) ) ; if ( storeClient == null ) { logger . error ( "Error when getting store. Non Existing store client." ) ; RestErrorHandler . writeErrorResponse ( messageEvent , HttpResponseStatus . BAD_REQUEST , "Non Existing store client. Critical error." ) ; return ; } } else { requestObject . setOperationType ( VoldemortOpCode . GET_METADATA_OP_CODE ) ; } CoordinatorStoreClientRequest coordinatorRequest = new CoordinatorStoreClientRequest ( requestObject , storeClient ) ; Channels . fireMessageReceived ( ctx , coordinatorRequest ) ; } }
Constructs a valid request and passes it on to the next handler . It also creates the StoreClient object corresponding to the store name specified in the REST request .
24,163
public void handleExceptions ( MessageEvent messageEvent , Exception exception ) { if ( exception instanceof InvalidMetadataException ) { logger . error ( "Exception when deleting. The requested key does not exist in this partition" , exception ) ; writeErrorResponse ( messageEvent , HttpResponseStatus . REQUESTED_RANGE_NOT_SATISFIABLE , "The requested key does not exist in this partition" ) ; } else if ( exception instanceof PersistenceFailureException ) { logger . error ( "Exception when deleting. Operation failed" , exception ) ; writeErrorResponse ( messageEvent , HttpResponseStatus . INTERNAL_SERVER_ERROR , "Operation failed" ) ; } else if ( exception instanceof UnsupportedOperationException ) { logger . error ( "Exception when deleting. Operation not supported in read-only store " , exception ) ; writeErrorResponse ( messageEvent , HttpResponseStatus . METHOD_NOT_ALLOWED , "Operation not supported in read-only store" ) ; } else if ( exception instanceof StoreTimeoutException ) { String errorDescription = "DELETE Request timed out: " + exception . getMessage ( ) ; logger . error ( errorDescription ) ; writeErrorResponse ( messageEvent , HttpResponseStatus . REQUEST_TIMEOUT , errorDescription ) ; } else if ( exception instanceof InsufficientOperationalNodesException ) { String errorDescription = "DELETE Request failed: " + exception . getMessage ( ) ; logger . error ( errorDescription ) ; writeErrorResponse ( messageEvent , HttpResponseStatus . INTERNAL_SERVER_ERROR , errorDescription ) ; } else { super . handleExceptions ( messageEvent , exception ) ; } }
Handle exceptions thrown by the storage . Exceptions specific to DELETE go here . Pass other exceptions to the parent class .
24,164
public void recordPutTimeAndSize ( long timeNS , long valueSize , long keySize ) { recordTime ( Tracked . PUT , timeNS , 0 , valueSize , keySize , 0 ) ; }
Record the duration of a put operation along with the size of the values returned .
24,165
public void recordGetAllTime ( long timeNS , int requested , int returned , long totalValueBytes , long totalKeyBytes ) { recordTime ( Tracked . GET_ALL , timeNS , requested - returned , totalValueBytes , totalKeyBytes , requested ) ; }
Record the duration of a get_all operation along with how many values were requested how may were actually returned and the size of the values returned .
24,166
private void recordTime ( Tracked op , long timeNS , long numEmptyResponses , long valueSize , long keySize , long getAllAggregateRequests ) { counters . get ( op ) . addRequest ( timeNS , numEmptyResponses , valueSize , keySize , getAllAggregateRequests ) ; if ( logger . isTraceEnabled ( ) && ! storeName . contains ( "aggregate" ) && ! storeName . contains ( "voldsys$" ) ) logger . trace ( "Store '" + storeName + "' logged a " + op . toString ( ) + " request taking " + ( ( double ) timeNS / voldemort . utils . Time . NS_PER_MS ) + " ms" ) ; }
Method to service public recording APIs
24,167
public static List < String > getCommaSeparatedStringValues ( String paramValue , String type ) { List < String > commaSeparatedProps = Lists . newArrayList ( ) ; for ( String url : Utils . COMMA_SEP . split ( paramValue . trim ( ) ) ) if ( url . trim ( ) . length ( ) > 0 ) commaSeparatedProps . add ( url ) ; if ( commaSeparatedProps . size ( ) == 0 ) { throw new RuntimeException ( "Number of " + type + " should be greater than zero" ) ; } return commaSeparatedProps ; }
Given the comma separated list of properties as a string splits it multiple strings
24,168
private synchronized void initializeFatClient ( String storeName , Properties storeClientProps ) { updateCoordinatorMetadataWithLatestState ( ) ; logger . info ( "Creating a Fat client for store: " + storeName ) ; SocketStoreClientFactory fatClientFactory = getFatClientFactory ( this . coordinatorConfig . getBootstrapURLs ( ) , storeClientProps ) ; if ( this . fatClientMap == null ) { this . fatClientMap = new HashMap < String , DynamicTimeoutStoreClient < ByteArray , byte [ ] > > ( ) ; } DynamicTimeoutStoreClient < ByteArray , byte [ ] > fatClient = new DynamicTimeoutStoreClient < ByteArray , byte [ ] > ( storeName , fatClientFactory , 1 , this . coordinatorMetadata . getStoreDefs ( ) , this . coordinatorMetadata . getClusterXmlStr ( ) ) ; this . fatClientMap . put ( storeName , fatClient ) ; }
Initialize the fat client for the given store .
24,169
synchronized public void completeTask ( int taskId , int partitionStoresMigrated ) { tasksInFlight . remove ( taskId ) ; numTasksCompleted ++ ; numPartitionStoresMigrated += partitionStoresMigrated ; updateProgressBar ( ) ; }
Called whenever a rebalance task completes . This means one task is done and some number of partition stores have been migrated .
24,170
synchronized public String getPrettyProgressBar ( ) { StringBuilder sb = new StringBuilder ( ) ; double taskRate = numTasksCompleted / ( double ) totalTaskCount ; double partitionStoreRate = numPartitionStoresMigrated / ( double ) totalPartitionStoreCount ; long deltaTimeMs = System . currentTimeMillis ( ) - startTimeMs ; long taskTimeRemainingMs = Long . MAX_VALUE ; if ( taskRate > 0 ) { taskTimeRemainingMs = ( long ) ( deltaTimeMs * ( ( 1.0 / taskRate ) - 1.0 ) ) ; } long partitionStoreTimeRemainingMs = Long . MAX_VALUE ; if ( partitionStoreRate > 0 ) { partitionStoreTimeRemainingMs = ( long ) ( deltaTimeMs * ( ( 1.0 / partitionStoreRate ) - 1.0 ) ) ; } sb . append ( "Progress update on rebalancing batch " + batchId ) . append ( Utils . NEWLINE ) ; sb . append ( "There are currently " + tasksInFlight . size ( ) + " rebalance tasks executing: " ) . append ( tasksInFlight ) . append ( "." ) . append ( Utils . NEWLINE ) ; sb . append ( "\t" + numTasksCompleted + " out of " + totalTaskCount + " rebalance tasks complete." ) . append ( Utils . NEWLINE ) . append ( "\t" ) . append ( decimalFormatter . format ( taskRate * 100.0 ) ) . append ( "% done, estimate " ) . append ( taskTimeRemainingMs ) . append ( " ms (" ) . append ( TimeUnit . MILLISECONDS . toMinutes ( taskTimeRemainingMs ) ) . append ( " minutes) remaining." ) . append ( Utils . NEWLINE ) ; sb . append ( "\t" + numPartitionStoresMigrated + " out of " + totalPartitionStoreCount + " partition-stores migrated." ) . append ( Utils . NEWLINE ) . append ( "\t" ) . append ( decimalFormatter . format ( partitionStoreRate * 100.0 ) ) . append ( "% done, estimate " ) . append ( partitionStoreTimeRemainingMs ) . append ( " ms (" ) . append ( TimeUnit . MILLISECONDS . toMinutes ( partitionStoreTimeRemainingMs ) ) . append ( " minutes) remaining." ) . append ( Utils . NEWLINE ) ; return sb . toString ( ) ; }
Construct a pretty string documenting progress for this batch plan thus far .
24,171
protected void progressInfoMessage ( final String tag ) { if ( logger . isInfoEnabled ( ) ) { long totalTimeS = ( System . currentTimeMillis ( ) - startTimeMs ) / Time . MS_PER_SECOND ; logger . info ( tag + " : scanned " + scanned + " and fetched " + fetched + " for store '" + storageEngine . getName ( ) + "' partitionIds:" + partitionIds + " in " + totalTimeS + " s" ) ; } }
Progress info message
24,172
protected void sendMessage ( DataOutputStream outputStream , Message message ) throws IOException { long startNs = System . nanoTime ( ) ; ProtoUtils . writeMessage ( outputStream , message ) ; if ( streamStats != null ) { streamStats . reportNetworkTime ( operation , Utils . elapsedTimeNs ( startNs , System . nanoTime ( ) ) ) ; } }
Helper method to send message on outputStream and account for network time stats .
24,173
protected void reportStorageOpTime ( long startNs ) { if ( streamStats != null ) { streamStats . reportStreamingScan ( operation ) ; streamStats . reportStorageTime ( operation , Utils . elapsedTimeNs ( startNs , System . nanoTime ( ) ) ) ; } }
Helper method to track storage operations & time via StreamingStats .
24,174
public void awaitStartupCompletion ( ) { try { Object obj = startedStatusQueue . take ( ) ; if ( obj instanceof Throwable ) throw new VoldemortException ( ( Throwable ) obj ) ; } catch ( InterruptedException e ) { } }
Blocks until the server has started successfully or an exception is thrown .
24,175
protected synchronized void streamingSlopPut ( ByteArray key , Versioned < byte [ ] > value , String storeName , int failedNodeId ) throws IOException { Slop slop = new Slop ( storeName , Slop . Operation . PUT , key , value . getValue ( ) , null , failedNodeId , new Date ( ) ) ; ByteArray slopKey = slop . makeKey ( ) ; Versioned < byte [ ] > slopValue = new Versioned < byte [ ] > ( slopSerializer . toBytes ( slop ) , value . getVersion ( ) ) ; Node failedNode = adminClient . getAdminClientCluster ( ) . getNodeById ( failedNodeId ) ; HandoffToAnyStrategy slopRoutingStrategy = new HandoffToAnyStrategy ( adminClient . getAdminClientCluster ( ) , true , failedNode . getZoneId ( ) ) ; int slopDestination = slopRoutingStrategy . routeHint ( failedNode ) . get ( 0 ) . getId ( ) ; VAdminProto . PartitionEntry partitionEntry = VAdminProto . PartitionEntry . newBuilder ( ) . setKey ( ProtoUtils . encodeBytes ( slopKey ) ) . setVersioned ( ProtoUtils . encodeVersioned ( slopValue ) ) . build ( ) ; VAdminProto . UpdatePartitionEntriesRequest . Builder updateRequest = VAdminProto . UpdatePartitionEntriesRequest . newBuilder ( ) . setStore ( SLOP_STORE ) . setPartitionEntry ( partitionEntry ) ; DataOutputStream outputStream = nodeIdStoreToOutputStreamRequest . get ( new Pair < String , Integer > ( SLOP_STORE , slopDestination ) ) ; if ( nodeIdStoreInitialized . get ( new Pair < String , Integer > ( SLOP_STORE , slopDestination ) ) ) { ProtoUtils . writeMessage ( outputStream , updateRequest . build ( ) ) ; } else { ProtoUtils . writeMessage ( outputStream , VAdminProto . VoldemortAdminRequest . newBuilder ( ) . setType ( VAdminProto . AdminRequestType . UPDATE_PARTITION_ENTRIES ) . setUpdatePartitionEntries ( updateRequest ) . build ( ) ) ; outputStream . flush ( ) ; nodeIdStoreInitialized . put ( new Pair < String , Integer > ( SLOP_STORE , slopDestination ) , true ) ; } throttler . maybeThrottle ( 1 ) ; }
This is a method to stream slops to slop store when a node is detected faulty in a streaming session
24,176
@ SuppressWarnings ( "rawtypes" ) private void synchronousInvokeCallback ( Callable call ) { Future future = streamingSlopResults . submit ( call ) ; try { future . get ( ) ; } catch ( InterruptedException e1 ) { logger . error ( "Callback failed" , e1 ) ; throw new VoldemortException ( "Callback failed" ) ; } catch ( ExecutionException e1 ) { logger . error ( "Callback failed during execution" , e1 ) ; throw new VoldemortException ( "Callback failed during execution" ) ; } }
Helper method to synchronously invoke a callback
24,177
protected void handleResponse ( int responseCode , InputStream inputStream ) { BufferedReader rd = null ; try { rd = new BufferedReader ( new InputStreamReader ( inputStream ) ) ; StringBuilder sb = new StringBuilder ( ) ; String line ; while ( ( line = rd . readLine ( ) ) != null ) { sb . append ( line ) ; } log . info ( "HttpHook [" + hookName + "] received " + responseCode + " response: " + sb ) ; } catch ( IOException e ) { log . error ( "Error while reading response for HttpHook [" + hookName + "]" , e ) ; } finally { if ( rd != null ) { try { rd . close ( ) ; } catch ( IOException e ) { } } } }
Can be overridden if you want to replace or supplement the debug handling for responses .
24,178
@ SuppressWarnings ( { "unchecked" , "rawtypes" } ) protected void addStoreToSession ( String store ) { Exception initializationException = null ; storeNames . add ( store ) ; for ( Node node : nodesToStream ) { SocketDestination destination = null ; SocketAndStreams sands = null ; try { destination = new SocketDestination ( node . getHost ( ) , node . getAdminPort ( ) , RequestFormatType . ADMIN_PROTOCOL_BUFFERS ) ; sands = streamingSocketPool . checkout ( destination ) ; DataOutputStream outputStream = sands . getOutputStream ( ) ; DataInputStream inputStream = sands . getInputStream ( ) ; nodeIdStoreToSocketRequest . put ( new Pair ( store , node . getId ( ) ) , destination ) ; nodeIdStoreToOutputStreamRequest . put ( new Pair ( store , node . getId ( ) ) , outputStream ) ; nodeIdStoreToInputStreamRequest . put ( new Pair ( store , node . getId ( ) ) , inputStream ) ; nodeIdStoreToSocketAndStreams . put ( new Pair ( store , node . getId ( ) ) , sands ) ; nodeIdStoreInitialized . put ( new Pair ( store , node . getId ( ) ) , false ) ; remoteStoreDefs = adminClient . metadataMgmtOps . getRemoteStoreDefList ( node . getId ( ) ) . getValue ( ) ; } catch ( Exception e ) { logger . error ( e ) ; try { close ( sands . getSocket ( ) ) ; streamingSocketPool . checkin ( destination , sands ) ; } catch ( Exception ioE ) { logger . error ( ioE ) ; } if ( ! faultyNodes . contains ( node . getId ( ) ) ) faultyNodes . add ( node . getId ( ) ) ; initializationException = e ; } } if ( initializationException != null ) throw new VoldemortException ( initializationException ) ; if ( store . equals ( "slop" ) ) return ; boolean foundStore = false ; for ( StoreDefinition remoteStoreDef : remoteStoreDefs ) { if ( remoteStoreDef . getName ( ) . equals ( store ) ) { RoutingStrategyFactory factory = new RoutingStrategyFactory ( ) ; RoutingStrategy storeRoutingStrategy = factory . updateRoutingStrategy ( remoteStoreDef , adminClient . getAdminClientCluster ( ) ) ; storeToRoutingStrategy . put ( store , storeRoutingStrategy ) ; validateSufficientNodesAvailable ( blackListedNodes , remoteStoreDef ) ; foundStore = true ; break ; } } if ( ! foundStore ) { logger . error ( "Store Name not found on the cluster" ) ; throw new VoldemortException ( "Store Name not found on the cluster" ) ; } }
Add another store destination to an existing streaming session
24,179
@ SuppressWarnings ( { } ) public synchronized void removeStoreFromSession ( List < String > storeNameToRemove ) { logger . info ( "closing the Streaming session for a few stores" ) ; commitToVoldemort ( storeNameToRemove ) ; cleanupSessions ( storeNameToRemove ) ; }
Remove a list of stores from the session
24,180
@ SuppressWarnings ( { "rawtypes" , "unchecked" } ) public void blacklistNode ( int nodeId ) { Collection < Node > nodesInCluster = adminClient . getAdminClientCluster ( ) . getNodes ( ) ; if ( blackListedNodes == null ) { blackListedNodes = new ArrayList ( ) ; } blackListedNodes . add ( nodeId ) ; for ( Node node : nodesInCluster ) { if ( node . getId ( ) == nodeId ) { nodesToStream . remove ( node ) ; break ; } } for ( String store : storeNames ) { try { SocketAndStreams sands = nodeIdStoreToSocketAndStreams . get ( new Pair ( store , nodeId ) ) ; close ( sands . getSocket ( ) ) ; SocketDestination destination = nodeIdStoreToSocketRequest . get ( new Pair ( store , nodeId ) ) ; streamingSocketPool . checkin ( destination , sands ) ; } catch ( Exception ioE ) { logger . error ( ioE ) ; } } }
mark a node as blacklisted
24,181
@ SuppressWarnings ( { "unchecked" , "rawtypes" , "unused" } ) private void commitToVoldemort ( List < String > storeNamesToCommit ) { if ( logger . isDebugEnabled ( ) ) { logger . debug ( "Trying to commit to Voldemort" ) ; } boolean hasError = false ; if ( nodesToStream == null || nodesToStream . size ( ) == 0 ) { if ( logger . isDebugEnabled ( ) ) { logger . debug ( "No nodes to stream to. Returning." ) ; } return ; } for ( Node node : nodesToStream ) { for ( String store : storeNamesToCommit ) { if ( ! nodeIdStoreInitialized . get ( new Pair ( store , node . getId ( ) ) ) ) continue ; nodeIdStoreInitialized . put ( new Pair ( store , node . getId ( ) ) , false ) ; DataOutputStream outputStream = nodeIdStoreToOutputStreamRequest . get ( new Pair ( store , node . getId ( ) ) ) ; try { ProtoUtils . writeEndOfStream ( outputStream ) ; outputStream . flush ( ) ; DataInputStream inputStream = nodeIdStoreToInputStreamRequest . get ( new Pair ( store , node . getId ( ) ) ) ; VAdminProto . UpdatePartitionEntriesResponse . Builder updateResponse = ProtoUtils . readToBuilder ( inputStream , VAdminProto . UpdatePartitionEntriesResponse . newBuilder ( ) ) ; if ( updateResponse . hasError ( ) ) { hasError = true ; } } catch ( IOException e ) { logger . error ( "Exception during commit" , e ) ; hasError = true ; if ( ! faultyNodes . contains ( node . getId ( ) ) ) faultyNodes . add ( node . getId ( ) ) ; } } } if ( streamingresults == null ) { logger . warn ( "StreamingSession may not have been initialized since Variable streamingresults is null. Skipping callback " ) ; return ; } if ( hasError ) { logger . info ( "Invoking the Recovery Callback" ) ; Future future = streamingresults . submit ( recoveryCallback ) ; try { future . get ( ) ; } catch ( InterruptedException e1 ) { MARKED_BAD = true ; logger . error ( "Recovery Callback failed" , e1 ) ; throw new VoldemortException ( "Recovery Callback failed" ) ; } catch ( ExecutionException e1 ) { MARKED_BAD = true ; logger . error ( "Recovery Callback failed during execution" , e1 ) ; throw new VoldemortException ( "Recovery Callback failed during execution" ) ; } } else { if ( logger . isDebugEnabled ( ) ) { logger . debug ( "Commit successful" ) ; logger . debug ( "calling checkpoint callback" ) ; } Future future = streamingresults . submit ( checkpointCallback ) ; try { future . get ( ) ; } catch ( InterruptedException e1 ) { logger . warn ( "Checkpoint callback failed!" , e1 ) ; } catch ( ExecutionException e1 ) { logger . warn ( "Checkpoint callback failed during execution!" , e1 ) ; } } }
Flush the network buffer and write all entries to the serve . then wait for an ack from the server . This is a blocking call . It is invoked on every Commit batch size of entries It is also called on the close session call
24,182
@ SuppressWarnings ( { "rawtypes" , "unchecked" } ) private void cleanupSessions ( List < String > storeNamesToCleanUp ) { logger . info ( "Performing cleanup" ) ; for ( String store : storeNamesToCleanUp ) { for ( Node node : nodesToStream ) { try { SocketAndStreams sands = nodeIdStoreToSocketAndStreams . get ( new Pair ( store , node . getId ( ) ) ) ; close ( sands . getSocket ( ) ) ; SocketDestination destination = nodeIdStoreToSocketRequest . get ( new Pair ( store , node . getId ( ) ) ) ; streamingSocketPool . checkin ( destination , sands ) ; } catch ( Exception ioE ) { logger . error ( ioE ) ; } } } cleanedUp = true ; }
Helper method to Close all open socket connections and checkin back to the pool
24,183
private static List < Integer > stripNodeIds ( List < Node > nodeList ) { List < Integer > nodeidList = new ArrayList < Integer > ( ) ; if ( nodeList != null ) { for ( Node node : nodeList ) { nodeidList . add ( node . getId ( ) ) ; } } return nodeidList ; }
Helper method to get a list of node ids .
24,184
private static List < Node > difference ( List < Node > listA , List < Node > listB ) { if ( listA != null && listB != null ) listA . removeAll ( listB ) ; return listA ; }
Computes A - B
24,185
public static void main ( String [ ] args ) { if ( args . length != 2 ) { System . out . println ( "Usage: SchemaEvolutionValidator pathToOldSchema pathToNewSchema" ) ; return ; } Schema oldSchema ; Schema newSchema ; try { oldSchema = Schema . parse ( new File ( args [ 0 ] ) ) ; } catch ( Exception ex ) { oldSchema = null ; System . out . println ( "Could not open or parse the old schema (" + args [ 0 ] + ") due to " + ex ) ; } try { newSchema = Schema . parse ( new File ( args [ 1 ] ) ) ; } catch ( Exception ex ) { newSchema = null ; System . out . println ( "Could not open or parse the new schema (" + args [ 1 ] + ") due to " + ex ) ; } if ( oldSchema == null || newSchema == null ) { return ; } System . out . println ( "Comparing: " ) ; System . out . println ( "\t" + args [ 0 ] ) ; System . out . println ( "\t" + args [ 1 ] ) ; List < Message > messages = SchemaEvolutionValidator . checkBackwardCompatibility ( oldSchema , newSchema , oldSchema . getName ( ) ) ; Level maxLevel = Level . ALL ; for ( Message message : messages ) { System . out . println ( message . getLevel ( ) + ": " + message . getMessage ( ) ) ; if ( message . getLevel ( ) . isGreaterOrEqual ( maxLevel ) ) { maxLevel = message . getLevel ( ) ; } } if ( maxLevel . isGreaterOrEqual ( Level . ERROR ) ) { System . out . println ( Level . ERROR + ": The schema is not backward compatible. New clients will not be able to read existing data." ) ; } else if ( maxLevel . isGreaterOrEqual ( Level . WARN ) ) { System . out . println ( Level . WARN + ": The schema is partially backward compatible, but old clients will not be able to read data serialized in the new format." ) ; } else { System . out . println ( Level . INFO + ": The schema is backward compatible. Old and new clients will be able to read records serialized by one another." ) ; } }
This main method provides an easy command line tool to compare two schemas .
24,186
public static void validateAllAvroSchemas ( SerializerDefinition avroSerDef ) { Map < Integer , String > schemaVersions = avroSerDef . getAllSchemaInfoVersions ( ) ; if ( schemaVersions . size ( ) < 1 ) { throw new VoldemortException ( "No schema specified" ) ; } for ( Map . Entry < Integer , String > entry : schemaVersions . entrySet ( ) ) { Integer schemaVersionNumber = entry . getKey ( ) ; String schemaStr = entry . getValue ( ) ; try { Schema . parse ( schemaStr ) ; } catch ( Exception e ) { throw new VoldemortException ( "Unable to parse Avro schema version :" + schemaVersionNumber + ", schema string :" + schemaStr ) ; } } }
Given an AVRO serializer definition validates if all the avro schemas are valid i . e parseable .
24,187
public int getPartition ( byte [ ] key , byte [ ] value , int numReduceTasks ) { try { int partitionId = ByteUtils . readInt ( value , ByteUtils . SIZE_OF_INT ) ; int magicNumber = partitionId ; if ( getSaveKeys ( ) && ! buildPrimaryReplicasOnly ) { int replicaType = ( int ) ByteUtils . readBytes ( value , 2 * ByteUtils . SIZE_OF_INT , ByteUtils . SIZE_OF_BYTE ) ; magicNumber = magicNumber * getStoreDef ( ) . getReplicationFactor ( ) + replicaType ; } if ( ! getReducerPerBucket ( ) ) { int chunkId = ReadOnlyUtils . chunk ( key , getNumChunks ( ) ) ; magicNumber = magicNumber * getNumChunks ( ) + chunkId ; } return magicNumber % numReduceTasks ; } catch ( Exception e ) { throw new VoldemortException ( "Caught exception in getPartition()!" + " key: " + ByteUtils . toHexString ( key ) + ", value: " + ByteUtils . toHexString ( value ) + ", numReduceTasks: " + numReduceTasks , e ) ; } }
This function computes which reduce task to shuffle a record to .
24,188
private void initFileStreams ( int chunkId ) { if ( chunksHandled . add ( chunkId ) ) { try { this . indexFileSizeInBytes [ chunkId ] = 0L ; this . valueFileSizeInBytes [ chunkId ] = 0L ; this . checkSumDigestIndex [ chunkId ] = CheckSum . getInstance ( checkSumType ) ; this . checkSumDigestValue [ chunkId ] = CheckSum . getInstance ( checkSumType ) ; this . position [ chunkId ] = 0 ; this . taskIndexFileName [ chunkId ] = new Path ( FileOutputFormat . getOutputPath ( conf ) , getStoreName ( ) + "." + Integer . toString ( chunkId ) + "_" + this . taskId + INDEX_FILE_EXTENSION + fileExtension ) ; this . taskValueFileName [ chunkId ] = new Path ( FileOutputFormat . getOutputPath ( conf ) , getStoreName ( ) + "." + Integer . toString ( chunkId ) + "_" + this . taskId + DATA_FILE_EXTENSION + fileExtension ) ; if ( this . fs == null ) this . fs = this . taskIndexFileName [ chunkId ] . getFileSystem ( conf ) ; if ( isValidCompressionEnabled ) { this . indexFileStream [ chunkId ] = new DataOutputStream ( new BufferedOutputStream ( new GZIPOutputStream ( fs . create ( this . taskIndexFileName [ chunkId ] ) , DEFAULT_BUFFER_SIZE ) ) ) ; this . valueFileStream [ chunkId ] = new DataOutputStream ( new BufferedOutputStream ( new GZIPOutputStream ( fs . create ( this . taskValueFileName [ chunkId ] ) , DEFAULT_BUFFER_SIZE ) ) ) ; } else { this . indexFileStream [ chunkId ] = fs . create ( this . taskIndexFileName [ chunkId ] ) ; this . valueFileStream [ chunkId ] = fs . create ( this . taskValueFileName [ chunkId ] ) ; } fs . setPermission ( this . taskIndexFileName [ chunkId ] , new FsPermission ( HadoopStoreBuilder . HADOOP_FILE_PERMISSION ) ) ; logger . info ( "Setting permission to 755 for " + this . taskIndexFileName [ chunkId ] ) ; fs . setPermission ( this . taskValueFileName [ chunkId ] , new FsPermission ( HadoopStoreBuilder . HADOOP_FILE_PERMISSION ) ) ; logger . info ( "Setting permission to 755 for " + this . taskValueFileName [ chunkId ] ) ; logger . info ( "Opening " + this . taskIndexFileName [ chunkId ] + " and " + this . taskValueFileName [ chunkId ] + " for writing." ) ; } catch ( IOException e ) { throw new RuntimeException ( "Failed to open Input/OutputStream" , e ) ; } } }
The MapReduce framework should operate sequentially so thread safety shouldn t be a problem .
24,189
public boolean parseAndValidateRequest ( ) { if ( ! super . parseAndValidateRequest ( ) ) { return false ; } isGetVersionRequest = hasGetVersionRequestHeader ( ) ; if ( isGetVersionRequest && this . parsedKeys . size ( ) > 1 ) { RestErrorHandler . writeErrorResponse ( messageEvent , HttpResponseStatus . BAD_REQUEST , "Get version request cannot have multiple keys" ) ; return false ; } return true ; }
Validations specific to GET and GET ALL
24,190
public static Map < String , String > getMetadataFromSequenceFile ( FileSystem fs , Path path ) { try { Configuration conf = new Configuration ( ) ; conf . setInt ( "io.file.buffer.size" , 4096 ) ; SequenceFile . Reader reader = new SequenceFile . Reader ( fs , path , new Configuration ( ) ) ; SequenceFile . Metadata meta = reader . getMetadata ( ) ; reader . close ( ) ; TreeMap < Text , Text > map = meta . getMetadata ( ) ; Map < String , String > values = new HashMap < String , String > ( ) ; for ( Map . Entry < Text , Text > entry : map . entrySet ( ) ) values . put ( entry . getKey ( ) . toString ( ) , entry . getValue ( ) . toString ( ) ) ; return values ; } catch ( IOException e ) { throw new RuntimeException ( e ) ; } }
Read the metadata from a hadoop SequenceFile
24,191
private void recordBackupSet ( File backupDir ) throws IOException { String [ ] filesInEnv = env . getHome ( ) . list ( ) ; SimpleDateFormat format = new SimpleDateFormat ( "yyyy_MM_dd_kk_mm_ss" ) ; String recordFileName = "backupset-" + format . format ( new Date ( ) ) ; File recordFile = new File ( backupDir , recordFileName ) ; if ( recordFile . exists ( ) ) { recordFile . renameTo ( new File ( backupDir , recordFileName + ".old" ) ) ; } PrintStream backupRecord = new PrintStream ( new FileOutputStream ( recordFile ) ) ; backupRecord . println ( "Lastfile:" + Long . toHexString ( backupHelper . getLastFileInBackupSet ( ) ) ) ; if ( filesInEnv != null ) { for ( String file : filesInEnv ) { if ( file . endsWith ( BDB_EXT ) ) backupRecord . println ( file ) ; } } backupRecord . close ( ) ; }
Records the list of backedup files into a text file
24,192
private void cleanStaleFiles ( File backupDir , AsyncOperationStatus status ) { String [ ] filesInEnv = env . getHome ( ) . list ( ) ; String [ ] filesInBackupDir = backupDir . list ( ) ; if ( filesInEnv != null && filesInBackupDir != null ) { HashSet < String > envFileSet = new HashSet < String > ( ) ; for ( String file : filesInEnv ) envFileSet . add ( file ) ; for ( String file : filesInBackupDir ) { if ( file . endsWith ( BDB_EXT ) && ! envFileSet . contains ( file ) ) { status . setStatus ( "Deleting stale jdb file :" + file ) ; File staleJdbFile = new File ( backupDir , file ) ; staleJdbFile . delete ( ) ; } } } }
For recovery from the latest consistent snapshot we should clean up the old files from the previous backup set else we will fill the disk with useless log files
24,193
private void verifiedCopyFile ( File sourceFile , File destFile ) throws IOException { if ( ! destFile . exists ( ) ) { destFile . createNewFile ( ) ; } FileInputStream source = null ; FileOutputStream destination = null ; LogVerificationInputStream verifyStream = null ; try { source = new FileInputStream ( sourceFile ) ; destination = new FileOutputStream ( destFile ) ; verifyStream = new LogVerificationInputStream ( env , source , sourceFile . getName ( ) ) ; final byte [ ] buf = new byte [ LOGVERIFY_BUFSIZE ] ; while ( true ) { final int len = verifyStream . read ( buf ) ; if ( len < 0 ) { break ; } destination . write ( buf , 0 , len ) ; } } finally { if ( verifyStream != null ) { verifyStream . close ( ) ; } if ( destination != null ) { destination . close ( ) ; } } }
Copies the jdb log files with additional verification of the checksums .
24,194
public List < Integer > getReplicatingPartitionList ( int index ) { List < Node > preferenceNodesList = new ArrayList < Node > ( getNumReplicas ( ) ) ; List < Integer > replicationPartitionsList = new ArrayList < Integer > ( getNumReplicas ( ) ) ; HashMap < Integer , Integer > requiredRepFactor = new HashMap < Integer , Integer > ( ) ; requiredRepFactor . putAll ( zoneReplicationFactor ) ; int sum = 0 ; for ( Integer zoneRepFactor : requiredRepFactor . values ( ) ) { sum += zoneRepFactor ; } if ( sum != getNumReplicas ( ) ) throw new IllegalArgumentException ( "Number of zone replicas is not equal to the total replication factor" ) ; if ( getPartitionToNode ( ) . length == 0 ) { return new ArrayList < Integer > ( 0 ) ; } for ( int i = 0 ; i < getPartitionToNode ( ) . length ; i ++ ) { Node currentNode = getNodeByPartition ( index ) ; if ( ! preferenceNodesList . contains ( currentNode ) ) { preferenceNodesList . add ( currentNode ) ; if ( checkZoneRequirement ( requiredRepFactor , currentNode . getZoneId ( ) ) ) replicationPartitionsList . add ( index ) ; } if ( replicationPartitionsList . size ( ) >= getNumReplicas ( ) ) return replicationPartitionsList ; index = ( index + 1 ) % getPartitionToNode ( ) . length ; } return replicationPartitionsList ; }
Get the replication partitions list for the given partition .
24,195
private boolean checkZoneRequirement ( HashMap < Integer , Integer > requiredRepFactor , int zoneId ) { if ( requiredRepFactor . containsKey ( zoneId ) ) { if ( requiredRepFactor . get ( zoneId ) == 0 ) { return false ; } else { requiredRepFactor . put ( zoneId , requiredRepFactor . get ( zoneId ) - 1 ) ; return true ; } } return false ; }
Check if we still need more nodes from the given zone and reduce the zoneReplicationFactor count accordingly .
24,196
public static void acceptsUrlMultiple ( OptionParser parser ) { parser . acceptsAll ( Arrays . asList ( OPT_U , OPT_URL ) , "coordinator bootstrap urls" ) . withRequiredArg ( ) . describedAs ( "url-list" ) . withValuesSeparatedBy ( ',' ) . ofType ( String . class ) ; }
Adds OPT_U | OPT_URL option to OptionParser with multiple arguments .
24,197
public static String [ ] copyArrayCutFirst ( String [ ] arr ) { if ( arr . length > 1 ) { String [ ] arrCopy = new String [ arr . length - 1 ] ; System . arraycopy ( arr , 1 , arrCopy , 0 , arrCopy . length ) ; return arrCopy ; } else { return new String [ 0 ] ; } }
Utility function that copies a string array except for the first element
24,198
public static String [ ] copyArrayAddFirst ( String [ ] arr , String add ) { String [ ] arrCopy = new String [ arr . length + 1 ] ; arrCopy [ 0 ] = add ; System . arraycopy ( arr , 0 , arrCopy , 1 , arr . length ) ; return arrCopy ; }
Utility function that copies a string array and add another string to first
24,199
@ SuppressWarnings ( "unchecked" ) public void put ( String key , Versioned < Object > value ) { writeLock . lock ( ) ; try { if ( this . storeNames . contains ( key ) || key . equals ( STORES_KEY ) ) { List < StoreDefinition > storeDefinitions = ( List < StoreDefinition > ) value . getValue ( ) ; StoreDefinitionUtils . validateSchemasAsNeeded ( storeDefinitions ) ; Set < String > storeNamesToDelete = new HashSet < String > ( ) ; for ( String storeName : this . storeNames ) { storeNamesToDelete . add ( storeName ) ; } StoreDefinitionsMapper mapper = new StoreDefinitionsMapper ( ) ; Set < String > specifiedStoreNames = new HashSet < String > ( ) ; for ( StoreDefinition storeDef : storeDefinitions ) { specifiedStoreNames . add ( storeDef . getName ( ) ) ; String storeDefStr = mapper . writeStore ( storeDef ) ; Versioned < String > versionedValueStr = new Versioned < String > ( storeDefStr , value . getVersion ( ) ) ; this . storeDefinitionsStorageEngine . put ( storeDef . getName ( ) , versionedValueStr , "" ) ; this . metadataCache . put ( storeDef . getName ( ) , new Versioned < Object > ( storeDefStr , value . getVersion ( ) ) ) ; } if ( key . equals ( STORES_KEY ) ) { storeNamesToDelete . removeAll ( specifiedStoreNames ) ; resetStoreDefinitions ( storeNamesToDelete ) ; } initStoreDefinitions ( value . getVersion ( ) ) ; updateRoutingStrategies ( getCluster ( ) , getStoreDefList ( ) ) ; } else if ( METADATA_KEYS . contains ( key ) ) { putInner ( key , convertObjectToString ( key , value ) ) ; metadataCache . put ( key , value ) ; if ( CLUSTER_KEY . equals ( key ) ) { updateRoutingStrategies ( ( Cluster ) value . getValue ( ) , getStoreDefList ( ) ) ; } else if ( NODE_ID_KEY . equals ( key ) ) { initNodeId ( getNodeIdNoLock ( ) ) ; } else if ( SYSTEM_STORES_KEY . equals ( key ) ) throw new VoldemortException ( "Cannot overwrite system store definitions" ) ; } else { throw new VoldemortException ( "Unhandled Key:" + key + " for MetadataStore put()" ) ; } } finally { writeLock . unlock ( ) ; } }
helper function to convert strings to bytes as needed .