idx
int64 0
165k
| question
stringlengths 73
4.15k
| target
stringlengths 5
918
| len_question
int64 21
890
| len_target
int64 3
255
|
|---|---|---|---|---|
154,800
|
private static Object convertStringToPrimitiveOrPrimitiveWrapper ( String value , final Class < ? > expectedClz ) throws VoltTypeException { value = value . trim ( ) ; // detect CSV null if ( value . equals ( Constants . CSV_NULL ) ) return nullValueForType ( expectedClz ) ; // Remove commas. Doing this seems kind of dubious since it lets strings like // ,,,3.1,4,,e,+,,16 // be parsed as a valid double value (for example). String commaFreeValue = thousandSeparator . matcher ( value ) . replaceAll ( "" ) ; try { // autoboxing converts to boxed types since this method returns a java Object if ( isLongClass ( expectedClz ) ) { return Long . parseLong ( commaFreeValue ) ; } if ( isIntClass ( expectedClz ) ) { return Integer . parseInt ( commaFreeValue ) ; } if ( isShortClass ( expectedClz ) ) { return Short . parseShort ( commaFreeValue ) ; } if ( isByteClass ( expectedClz ) ) { return Byte . parseByte ( commaFreeValue ) ; } if ( isDoubleClass ( expectedClz ) ) { return Double . parseDouble ( commaFreeValue ) ; } } // ignore the exception and fail through below catch ( NumberFormatException nfe ) { // If we failed to parse the string in decimal form it could still // be a numeric value specified as X'....' // // Do this only after trying to parse a decimal literal, which is the // most common case. if ( expectedClz != double . class ) { String hexDigits = SQLParser . getDigitsFromHexLiteral ( value ) ; if ( hexDigits != null ) { try { return SQLParser . hexDigitsToLong ( hexDigits ) ; } catch ( SQLParser . Exception spe ) { } } } } throw new VoltTypeException ( "Unable to convert string " + value + " to " + expectedClz . getName ( ) + " value for target parameter." ) ; }
|
Given a string covert it to a primitive type or boxed type of the primitive type or return null .
| 444
| 20
|
154,801
|
private static Object tryToMakeCompatibleArray ( final Class < ? > expectedComponentClz , final Class < ? > inputComponentClz , Object param ) throws VoltTypeException { int inputLength = Array . getLength ( param ) ; if ( inputComponentClz == expectedComponentClz ) { return param ; } // if it's an empty array, let it through // this is a bit ugly as it might hide passing // arrays of the wrong type, but it "does the right thing" // more often that not I guess... else if ( inputLength == 0 ) { return Array . newInstance ( expectedComponentClz , 0 ) ; } // hack to make strings work with input as bytes else if ( isByteArrayClass ( inputComponentClz ) && ( expectedComponentClz == String . class ) ) { String [ ] values = new String [ inputLength ] ; for ( int i = 0 ; i < inputLength ; i ++ ) { try { values [ i ] = new String ( ( byte [ ] ) Array . get ( param , i ) , "UTF-8" ) ; } catch ( UnsupportedEncodingException ex ) { throw new VoltTypeException ( "tryScalarMakeCompatible: Unsupported encoding:" + expectedComponentClz . getName ( ) + " to provided " + inputComponentClz . getName ( ) ) ; } } return values ; } // hack to make varbinary work with input as hex string else if ( ( inputComponentClz == String . class ) && ( expectedComponentClz == byte [ ] . class ) ) { byte [ ] [ ] values = new byte [ inputLength ] [ ] ; for ( int i = 0 ; i < inputLength ; i ++ ) { values [ i ] = Encoder . hexDecode ( ( String ) Array . get ( param , i ) ) ; } return values ; } else if ( ( inputComponentClz == String . class ) && ( expectedComponentClz == Byte [ ] . class ) ) { Byte [ ] [ ] boxvalues = new Byte [ inputLength ] [ ] ; for ( int i = 0 ; i < inputLength ; i ++ ) { boxvalues [ i ] = ArrayUtils . toObject ( Encoder . hexDecode ( ( String ) Array . get ( param , i ) ) ) ; } return boxvalues ; } else { /* * Arrays can be quite large so it doesn't make sense to silently do the conversion * and incur the performance hit. The client should serialize the correct invocation * parameters */ throw new VoltTypeException ( "tryScalarMakeCompatible: Unable to match parameter array:" + expectedComponentClz . getName ( ) + " to provided " + inputComponentClz . getName ( ) ) ; } }
|
Factored out code to handle array parameter types .
| 586
| 10
|
154,802
|
final static public VoltTable [ ] getResultsFromRawResults ( String procedureName , Object result ) throws InvocationTargetException { if ( result == null ) { return new VoltTable [ 0 ] ; } if ( result instanceof VoltTable [ ] ) { VoltTable [ ] retval = ( VoltTable [ ] ) result ; for ( VoltTable table : retval ) { if ( table == null ) { Exception e = new RuntimeException ( "VoltTable arrays with non-zero length cannot contain null values." ) ; throw new InvocationTargetException ( e ) ; } // Make sure this table does not use an ee cache buffer table . convertToHeapBuffer ( ) ; } return retval ; } if ( result instanceof VoltTable ) { VoltTable vt = ( VoltTable ) result ; // Make sure this table does not use an ee cache buffer vt . convertToHeapBuffer ( ) ; return new VoltTable [ ] { vt } ; } if ( result instanceof Long ) { VoltTable t = new VoltTable ( new VoltTable . ColumnInfo ( "" , VoltType . BIGINT ) ) ; t . addRow ( result ) ; return new VoltTable [ ] { t } ; } throw new RuntimeException ( String . format ( "Procedure %s unsupported procedure return type %s." , procedureName , result . getClass ( ) . getSimpleName ( ) ) ) ; }
|
Given the results of a procedure convert it into a sensible array of VoltTables .
| 299
| 17
|
154,803
|
public static void main ( String [ ] args ) { if ( args . length > 1 ) { printUsage ( ) ; } if ( args . length == 0 || ( args . length == 1 && args [ 0 ] . equals ( "--full" ) ) ) { System . out . println ( getFullVersion ( ) ) ; System . exit ( 0 ) ; } if ( args [ 0 ] . equals ( "--short" ) ) System . out . println ( getVersion ( ) ) ; else if ( args [ 0 ] . equals ( "--revision" ) ) System . out . println ( getVersionRevision ( ) ) ; else printUsage ( ) ; System . exit ( 0 ) ; }
|
Prints the current version revision and build date to the standard out .
| 149
| 14
|
154,804
|
void setFinal ( boolean isFinal ) throws IOException { if ( isFinal != m_isFinal ) { if ( PBDSegment . setFinal ( m_file , isFinal ) ) { if ( ! isFinal ) { // It is dangerous to leave final on a segment so make sure the metadata is flushed m_fc . force ( true ) ; } } else if ( PBDSegment . isFinal ( m_file ) && ! isFinal ) { throw new IOException ( "Could not remove the final attribute from " + m_file . getName ( ) ) ; } // It is OK for m_isFinal to be true when isFinal(File) returns false but not the other way m_isFinal = isFinal ; } }
|
Set or clear segment as final i . e . whether segment is complete and logically immutable .
| 159
| 18
|
154,805
|
public static String createParticipantNode ( ZooKeeper zk , String dir , String prefix , byte [ ] data ) throws KeeperException , InterruptedException { createRootIfNotExist ( zk , dir ) ; String node = zk . create ( ZKUtil . joinZKPath ( dir , prefix + "_" ) , data , Ids . OPEN_ACL_UNSAFE , CreateMode . EPHEMERAL_SEQUENTIAL ) ; // Unlock the dir as initialized zk . setData ( dir , new byte [ ] { INITIALIZED } , - 1 ) ; return node ; }
|
Provide a way for clients to create nodes which comply with the leader election format without participating in a leader election
| 134
| 22
|
154,806
|
synchronized public void shutdown ( ) throws InterruptedException , KeeperException { m_shutdown = true ; es . shutdown ( ) ; es . awaitTermination ( 365 , TimeUnit . DAYS ) ; }
|
Deletes the ephemeral node . Make sure that no future watches will fire .
| 45
| 17
|
154,807
|
private boolean watchNextLowerNode ( ) throws KeeperException , InterruptedException { /* * Iterate through the sorted list of children and find the given node, * then setup a electionWatcher on the previous node if it exists, otherwise the * previous of the previous...until we reach the beginning, then we are * the lowest node. */ List < String > children = zk . getChildren ( dir , false ) ; Collections . sort ( children ) ; ListIterator < String > iter = children . listIterator ( ) ; String me = null ; //Go till I find myself. while ( iter . hasNext ( ) ) { me = ZKUtil . joinZKPath ( dir , iter . next ( ) ) ; if ( me . equals ( node ) ) { break ; } } assert ( me != null ) ; //Back on me iter . previous ( ) ; //Until we have previous nodes and we set a watch on previous node. while ( iter . hasPrevious ( ) ) { //Proess my lower nodes and put a watch on whats live String previous = ZKUtil . joinZKPath ( dir , iter . previous ( ) ) ; if ( zk . exists ( previous , electionWatcher ) != null ) { return false ; } } return true ; }
|
Set a watch on the node that comes before the specified node in the directory .
| 268
| 16
|
154,808
|
@ CanIgnoreReturnValue public CharEscaperBuilder addEscape ( char c , String r ) { map . put ( c , checkNotNull ( r ) ) ; if ( c > max ) { max = c ; } return this ; }
|
Add a new mapping from an index to an object to the escaping .
| 52
| 14
|
154,809
|
@ CanIgnoreReturnValue public CharEscaperBuilder addEscapes ( char [ ] cs , String r ) { checkNotNull ( r ) ; for ( char c : cs ) { addEscape ( c , r ) ; } return this ; }
|
Add multiple mappings at once for a particular index .
| 53
| 11
|
154,810
|
private void deliverReadyTxns ( ) { // First, pull all the sequenced messages, if any. VoltMessage m = m_replaySequencer . poll ( ) ; while ( m != null ) { deliver ( m ) ; m = m_replaySequencer . poll ( ) ; } // Then, try to pull all the drainable messages, if any. m = m_replaySequencer . drain ( ) ; while ( m != null ) { if ( m instanceof Iv2InitiateTaskMessage ) { // Send IGNORED response for all SPs Iv2InitiateTaskMessage task = ( Iv2InitiateTaskMessage ) m ; final InitiateResponseMessage response = new InitiateResponseMessage ( task ) ; response . setResults ( new ClientResponseImpl ( ClientResponse . UNEXPECTED_FAILURE , new VoltTable [ 0 ] , ClientResponseImpl . IGNORED_TRANSACTION ) ) ; m_mailbox . send ( response . getInitiatorHSId ( ) , response ) ; } m = m_replaySequencer . drain ( ) ; } }
|
Poll the replay sequencer and process the messages until it returns null
| 236
| 13
|
154,811
|
@ Override public boolean sequenceForReplay ( VoltMessage message ) { boolean canDeliver = false ; long sequenceWithUniqueId = Long . MIN_VALUE ; boolean commandLog = ( message instanceof TransactionInfoBaseMessage && ( ( ( TransactionInfoBaseMessage ) message ) . isForReplay ( ) ) ) ; boolean sentinel = message instanceof MultiPartitionParticipantMessage ; boolean replay = commandLog || sentinel ; boolean sequenceForReplay = m_isLeader && replay ; if ( replay ) { sequenceWithUniqueId = ( ( TransactionInfoBaseMessage ) message ) . getUniqueId ( ) ; } if ( sequenceForReplay ) { InitiateResponseMessage dupe = m_replaySequencer . dedupe ( sequenceWithUniqueId , ( TransactionInfoBaseMessage ) message ) ; if ( dupe != null ) { // Duplicate initiate task message, send response m_mailbox . send ( dupe . getInitiatorHSId ( ) , dupe ) ; } else if ( ! m_replaySequencer . offer ( sequenceWithUniqueId , ( TransactionInfoBaseMessage ) message ) ) { canDeliver = true ; } else { deliverReadyTxns ( ) ; } // If it's a DR sentinel, send an acknowledgement if ( sentinel && ! commandLog ) { MultiPartitionParticipantMessage mppm = ( MultiPartitionParticipantMessage ) message ; final InitiateResponseMessage response = new InitiateResponseMessage ( mppm ) ; ClientResponseImpl clientResponse = new ClientResponseImpl ( ClientResponseImpl . UNEXPECTED_FAILURE , new VoltTable [ 0 ] , ClientResponseImpl . IGNORED_TRANSACTION ) ; response . setResults ( clientResponse ) ; m_mailbox . send ( response . getInitiatorHSId ( ) , response ) ; } } else { if ( replay ) { // Update last seen and last polled uniqueId for replicas m_replaySequencer . updateLastSeenUniqueId ( sequenceWithUniqueId , ( TransactionInfoBaseMessage ) message ) ; m_replaySequencer . updateLastPolledUniqueId ( sequenceWithUniqueId , ( TransactionInfoBaseMessage ) message ) ; } canDeliver = true ; } return canDeliver ; }
|
Sequence the message for replay if it s for CL or DR .
| 479
| 14
|
154,812
|
private void doLocalInitiateOffer ( Iv2InitiateTaskMessage msg ) { final VoltTrace . TraceEventBatch traceLog = VoltTrace . log ( VoltTrace . Category . SPI ) ; if ( traceLog != null ) { final String threadName = Thread . currentThread ( ) . getName ( ) ; // Thread name has to be materialized here traceLog . add ( ( ) -> VoltTrace . meta ( "process_name" , "name" , CoreUtils . getHostnameOrAddress ( ) ) ) . add ( ( ) -> VoltTrace . meta ( "thread_name" , "name" , threadName ) ) . add ( ( ) -> VoltTrace . meta ( "thread_sort_index" , "sort_index" , Integer . toString ( 10000 ) ) ) . add ( ( ) -> VoltTrace . beginAsync ( "initsp" , MiscUtils . hsIdPairTxnIdToString ( m_mailbox . getHSId ( ) , m_mailbox . getHSId ( ) , msg . getSpHandle ( ) , msg . getClientInterfaceHandle ( ) ) , "ciHandle" , msg . getClientInterfaceHandle ( ) , "txnId" , TxnEgo . txnIdToString ( msg . getTxnId ( ) ) , "partition" , m_partitionId , "read" , msg . isReadOnly ( ) , "name" , msg . getStoredProcedureName ( ) , "hsId" , CoreUtils . hsIdToString ( m_mailbox . getHSId ( ) ) ) ) ; } final String procedureName = msg . getStoredProcedureName ( ) ; final SpProcedureTask task = new SpProcedureTask ( m_mailbox , procedureName , m_pendingTasks , msg ) ; ListenableFuture < Object > durabilityBackpressureFuture = m_cl . log ( msg , msg . getSpHandle ( ) , null , m_durabilityListener , task ) ; if ( traceLog != null && durabilityBackpressureFuture != null ) { traceLog . add ( ( ) -> VoltTrace . beginAsync ( "durability" , MiscUtils . hsIdTxnIdToString ( m_mailbox . getHSId ( ) , msg . getSpHandle ( ) ) , "txnId" , TxnEgo . txnIdToString ( msg . getTxnId ( ) ) , "partition" , Integer . toString ( m_partitionId ) ) ) ; } //Durability future is always null for sync command logging //the transaction will be delivered again by the CL for execution once durable //Async command logging has to offer the task immediately with a Future for backpressure if ( m_cl . canOfferTask ( ) ) { m_pendingTasks . offer ( task . setDurabilityBackpressureFuture ( durabilityBackpressureFuture ) ) ; } }
|
Do the work necessary to turn the Iv2InitiateTaskMessage into a TransactionTask which can be queued to the TransactionTaskQueue . This is reused by both the normal message handling path and the repair path and assumes that the caller has dealt with or ensured that the necessary ID SpHandles and replication issues are resolved .
| 652
| 66
|
154,813
|
private void handleBorrowTaskMessage ( BorrowTaskMessage message ) { // borrows do not advance the sp handle. The handle would // move backwards anyway once the next message is received // from the SP leader. long newSpHandle = getMaxScheduledTxnSpHandle ( ) ; Iv2Trace . logFragmentTaskMessage ( message . getFragmentTaskMessage ( ) , m_mailbox . getHSId ( ) , newSpHandle , true ) ; final VoltTrace . TraceEventBatch traceLog = VoltTrace . log ( VoltTrace . Category . SPI ) ; if ( traceLog != null ) { traceLog . add ( ( ) -> VoltTrace . beginAsync ( "recvfragment" , MiscUtils . hsIdPairTxnIdToString ( m_mailbox . getHSId ( ) , m_mailbox . getHSId ( ) , newSpHandle , 0 ) , "txnId" , TxnEgo . txnIdToString ( message . getTxnId ( ) ) , "partition" , m_partitionId , "hsId" , CoreUtils . hsIdToString ( m_mailbox . getHSId ( ) ) ) ) ; } TransactionState txn = m_outstandingTxns . get ( message . getTxnId ( ) ) ; if ( txn == null ) { // If the borrow is the first fragment for a transaction, run it as // a single partition fragment; Must not engage/pause this // site on a MP transaction before the SP instructs to do so. // Do not track the borrow task as outstanding - it completes // immediately and is not a valid transaction state for // full MP participation (it claims everything can run as SP). txn = new BorrowTransactionState ( newSpHandle , message ) ; } // BorrowTask is a read only task embedded in a MP transaction // and its response (FragmentResponseMessage) should not be buffered if ( message . getFragmentTaskMessage ( ) . isSysProcTask ( ) ) { final SysprocBorrowedTask task = new SysprocBorrowedTask ( m_mailbox , ( ParticipantTransactionState ) txn , m_pendingTasks , message . getFragmentTaskMessage ( ) , message . getInputDepMap ( ) ) ; task . setResponseNotBufferable ( ) ; m_pendingTasks . offer ( task ) ; } else { final BorrowedTask task = new BorrowedTask ( m_mailbox , ( ParticipantTransactionState ) txn , m_pendingTasks , message . getFragmentTaskMessage ( ) , message . getInputDepMap ( ) ) ; task . setResponseNotBufferable ( ) ; m_pendingTasks . offer ( task ) ; } }
|
to perform replicated reads or aggregation fragment work .
| 608
| 9
|
154,814
|
public void offerPendingMPTasks ( long txnId ) { Queue < TransactionTask > pendingTasks = m_mpsPendingDurability . get ( txnId ) ; if ( pendingTasks != null ) { for ( TransactionTask task : pendingTasks ) { if ( task instanceof SpProcedureTask ) { final VoltTrace . TraceEventBatch traceLog = VoltTrace . log ( VoltTrace . Category . SPI ) ; if ( traceLog != null ) { traceLog . add ( ( ) -> VoltTrace . endAsync ( "durability" , MiscUtils . hsIdTxnIdToString ( m_mailbox . getHSId ( ) , task . getSpHandle ( ) ) ) ) ; } } else if ( task instanceof FragmentTask ) { final VoltTrace . TraceEventBatch traceLog = VoltTrace . log ( VoltTrace . Category . SPI ) ; if ( traceLog != null ) { traceLog . add ( ( ) -> VoltTrace . endAsync ( "durability" , MiscUtils . hsIdTxnIdToString ( m_mailbox . getHSId ( ) , ( ( FragmentTask ) task ) . m_fragmentMsg . getSpHandle ( ) ) ) ) ; } } m_pendingTasks . offer ( task ) ; } m_mpsPendingDurability . remove ( txnId ) ; } }
|
Offer all fragment tasks and complete transaction tasks queued for durability for the given MP transaction and remove the entry from the pending map so that future ones won t be queued .
| 315
| 36
|
154,815
|
private void queueOrOfferMPTask ( TransactionTask task ) { // The pending map will only have an entry for the transaction if the first fragment is // still pending durability. Queue < TransactionTask > pendingTasks = m_mpsPendingDurability . get ( task . getTxnId ( ) ) ; if ( pendingTasks != null ) { pendingTasks . offer ( task ) ; } else { m_pendingTasks . offer ( task ) ; } }
|
Check if the MP task has to be queued because the first fragment is still being logged synchronously to the command log . If not offer it to the transaction task queue .
| 102
| 35
|
154,816
|
private void handleIv2LogFaultMessage ( Iv2LogFaultMessage message ) { //call the internal log write with the provided SP handle and wait for the fault log IO to complete SettableFuture < Boolean > written = writeIv2ViableReplayEntryInternal ( message . getSpHandle ( ) ) ; // Get the Fault Log Status here to ensure the replica completes the log fault task is finished before // it starts processing transactions again blockFaultLogWriteStatus ( written ) ; setMaxSeenTxnId ( message . getSpHandle ( ) ) ; // Also initialize the unique ID generator and the last durable unique ID using // the value sent by the master m_uniqueIdGenerator . updateMostRecentlyGeneratedUniqueId ( message . getSpUniqueId ( ) ) ; m_cl . initializeLastDurableUniqueId ( m_durabilityListener , m_uniqueIdGenerator . getLastUniqueId ( ) ) ; }
|
Should only receive these messages at replicas when told by the leader
| 198
| 13
|
154,817
|
private void blockFaultLogWriteStatus ( SettableFuture < Boolean > written ) { boolean logWritten = false ; if ( written != null ) { try { logWritten = written . get ( ) ; } catch ( InterruptedException e ) { } catch ( ExecutionException e ) { if ( tmLog . isDebugEnabled ( ) ) { tmLog . debug ( "Could not determine fault log state for partition: " + m_partitionId , e ) ; } } if ( ! logWritten ) { tmLog . warn ( "Attempted fault log not written for partition: " + m_partitionId ) ; } } }
|
Wait to get the status of a fault log write
| 136
| 10
|
154,818
|
SettableFuture < Boolean > writeIv2ViableReplayEntryInternal ( long spHandle ) { SettableFuture < Boolean > written = null ; if ( m_replayComplete ) { written = m_cl . logIv2Fault ( m_mailbox . getHSId ( ) , new HashSet < Long > ( m_replicaHSIds ) , m_partitionId , spHandle ) ; } return written ; }
|
Write the viable replay set to the command log with the provided SP Handle . Pass back the future that is set after the fault log is written to disk .
| 95
| 31
|
154,819
|
public void updateReplicasFromMigrationLeaderFailedHost ( int failedHostId ) { List < Long > replicas = new ArrayList <> ( ) ; for ( long hsid : m_replicaHSIds ) { if ( failedHostId != CoreUtils . getHostIdFromHSId ( hsid ) ) { replicas . add ( hsid ) ; } } ( ( InitiatorMailbox ) m_mailbox ) . updateReplicas ( replicas , null ) ; }
|
update the duplicated counters after the host failure .
| 106
| 10
|
154,820
|
public void forwardPendingTaskToRejoinNode ( long [ ] replicasAdded , long snapshotSpHandle ) { if ( tmLog . isDebugEnabled ( ) ) { tmLog . debug ( "Forward pending tasks in backlog to rejoin node: " + Arrays . toString ( replicasAdded ) ) ; } if ( replicasAdded . length == 0 ) { return ; } boolean sentAny = false ; for ( Map . Entry < DuplicateCounterKey , DuplicateCounter > entry : m_duplicateCounters . entrySet ( ) ) { if ( snapshotSpHandle < entry . getKey ( ) . m_spHandle ) { if ( ! sentAny ) { sentAny = true ; if ( tmLog . isDebugEnabled ( ) ) { tmLog . debug ( "Start forwarding pending tasks to rejoin node." ) ; } } // Then forward any message after the MP txn, I expect them are all Iv2InitiateMessages if ( tmLog . isDebugEnabled ( ) ) { tmLog . debug ( entry . getValue ( ) . getOpenMessage ( ) . getMessageInfo ( ) ) ; } m_mailbox . send ( replicasAdded , entry . getValue ( ) . getOpenMessage ( ) ) ; } } if ( sentAny && tmLog . isDebugEnabled ( ) ) { tmLog . debug ( "Finish forwarding pending tasks to rejoin node." ) ; } }
|
first fragment of stream snapshot and site runs the first fragment .
| 310
| 12
|
154,821
|
@ Override public void cleanupTransactionBacklogOnRepair ( ) { if ( m_isLeader && m_sendToHSIds . length > 0 ) { m_mailbox . send ( m_sendToHSIds , new MPBacklogFlushMessage ( ) ) ; } Iterator < Entry < Long , TransactionState > > iter = m_outstandingTxns . entrySet ( ) . iterator ( ) ; while ( iter . hasNext ( ) ) { Entry < Long , TransactionState > entry = iter . next ( ) ; TransactionState txnState = entry . getValue ( ) ; if ( TxnEgo . getPartitionId ( entry . getKey ( ) ) == MpInitiator . MP_INIT_PID ) { if ( txnState . isReadOnly ( ) ) { txnState . setDone ( ) ; m_duplicateCounters . entrySet ( ) . removeIf ( ( e ) - > e . getKey ( ) . m_txnId == entry . getKey ( ) ) ; iter . remove ( ) ; } } } // flush all RO transactions out of backlog m_pendingTasks . removeMPReadTransactions ( ) ; }
|
site leaders also forward the message to its replicas .
| 263
| 11
|
154,822
|
synchronized void reset ( ) { schemaMap . clear ( ) ; sqlLookup . clear ( ) ; csidMap . clear ( ) ; sessionUseMap . clear ( ) ; useMap . clear ( ) ; next_cs_id = 0 ; }
|
Clears all internal data structures removing any references to compiled statements .
| 55
| 13
|
154,823
|
synchronized void resetStatements ( ) { Iterator it = csidMap . values ( ) . iterator ( ) ; while ( it . hasNext ( ) ) { Statement cs = ( Statement ) it . next ( ) ; cs . clearVariables ( ) ; } }
|
Used after a DDL change that could impact the compiled statements . Clears references to CompiledStatement objects while keeping the counts and references to the sql strings .
| 58
| 32
|
154,824
|
private long getStatementID ( HsqlName schema , String sql ) { LongValueHashMap sqlMap = ( LongValueHashMap ) schemaMap . get ( schema . hashCode ( ) ) ; if ( sqlMap == null ) { return - 1 ; } return sqlMap . get ( sql , - 1 ) ; }
|
Retrieves the registered compiled statement identifier associated with the specified SQL String or a value less than zero if no such statement has been registered .
| 67
| 28
|
154,825
|
public synchronized Statement getStatement ( Session session , long csid ) { Statement cs = ( Statement ) csidMap . get ( csid ) ; if ( cs == null ) { return null ; } if ( ! cs . isValid ( ) ) { String sql = ( String ) sqlLookup . get ( csid ) ; // revalidate with the original schema try { Session sys = database . sessionManager . getSysSession ( session . currentSchema . name , session . getUser ( ) ) ; cs = sys . compileStatement ( sql ) ; cs . setID ( csid ) ; csidMap . put ( csid , cs ) ; } catch ( Throwable t ) { freeStatement ( csid , session . getId ( ) , true ) ; return null ; } } return cs ; }
|
Returns an existing CompiledStatement object with the given statement identifier . Returns null if the CompiledStatement object has been invalidated and cannot be recompiled
| 170
| 30
|
154,826
|
private void linkSession ( long csid , long sessionID ) { LongKeyIntValueHashMap scsMap ; scsMap = ( LongKeyIntValueHashMap ) sessionUseMap . get ( sessionID ) ; if ( scsMap == null ) { scsMap = new LongKeyIntValueHashMap ( ) ; sessionUseMap . put ( sessionID , scsMap ) ; } int count = scsMap . get ( csid , 0 ) ; scsMap . put ( csid , count + 1 ) ; if ( count == 0 ) { useMap . put ( csid , useMap . get ( csid , 0 ) + 1 ) ; } }
|
Links a session with a registered compiled statement . If this session has not already been linked with the given statement then the statement use count is incremented .
| 144
| 30
|
154,827
|
private long registerStatement ( long csid , Statement cs ) { if ( csid < 0 ) { csid = nextID ( ) ; int schemaid = cs . getSchemaName ( ) . hashCode ( ) ; LongValueHashMap sqlMap = ( LongValueHashMap ) schemaMap . get ( schemaid ) ; if ( sqlMap == null ) { sqlMap = new LongValueHashMap ( ) ; schemaMap . put ( schemaid , sqlMap ) ; } sqlMap . put ( cs . getSQL ( ) , csid ) ; sqlLookup . put ( csid , cs . getSQL ( ) ) ; } cs . setID ( csid ) ; csidMap . put ( csid , cs ) ; return csid ; }
|
Registers a compiled statement to be managed .
| 161
| 9
|
154,828
|
synchronized void removeSession ( long sessionID ) { LongKeyIntValueHashMap scsMap ; long csid ; Iterator i ; scsMap = ( LongKeyIntValueHashMap ) sessionUseMap . remove ( sessionID ) ; if ( scsMap == null ) { return ; } i = scsMap . keySet ( ) . iterator ( ) ; while ( i . hasNext ( ) ) { csid = i . nextLong ( ) ; int usecount = useMap . get ( csid , 1 ) - 1 ; if ( usecount == 0 ) { Statement cs = ( Statement ) csidMap . remove ( csid ) ; if ( cs != null ) { int schemaid = cs . getSchemaName ( ) . hashCode ( ) ; LongValueHashMap sqlMap = ( LongValueHashMap ) schemaMap . get ( schemaid ) ; String sql = ( String ) sqlLookup . remove ( csid ) ; sqlMap . remove ( sql ) ; } useMap . remove ( csid ) ; } else { useMap . put ( csid , usecount ) ; } } }
|
Releases the link betwen the session and all compiled statement objects it is linked to . If any such statement is not linked with any other session it is removed from management .
| 239
| 35
|
154,829
|
synchronized Statement compile ( Session session , Result cmd ) throws Throwable { String sql = cmd . getMainString ( ) ; long csid = getStatementID ( session . currentSchema , sql ) ; Statement cs = ( Statement ) csidMap . get ( csid ) ; if ( cs == null || ! cs . isValid ( ) || ! session . isAdmin ( ) ) { Session sys = database . sessionManager . getSysSession ( session . currentSchema . name , session . getUser ( ) ) ; cs = sys . compileStatement ( sql ) ; csid = registerStatement ( csid , cs ) ; } linkSession ( csid , session . getId ( ) ) ; return cs ; }
|
Compiles an SQL statement and returns a CompiledStatement Object
| 152
| 12
|
154,830
|
private void startupInstance ( ) throws IOException { assert ( m_blockPathMap . isEmpty ( ) ) ; try { clearSwapDir ( ) ; } catch ( Exception e ) { throw new IOException ( "Unable to clear large query swap directory: " + e . getMessage ( ) ) ; } }
|
On startup clear out the large query swap directory .
| 67
| 10
|
154,831
|
void storeBlock ( BlockId blockId , ByteBuffer block ) throws IOException { synchronized ( m_accessLock ) { if ( m_blockPathMap . containsKey ( blockId ) ) { throw new IllegalArgumentException ( "Request to store block that is already stored: " + blockId . toString ( ) ) ; } int origPosition = block . position ( ) ; block . position ( 0 ) ; Path blockPath = makeBlockPath ( blockId ) ; try ( SeekableByteChannel channel = Files . newByteChannel ( blockPath , OPEN_OPTIONS , PERMISSIONS ) ) { channel . write ( block ) ; } finally { block . position ( origPosition ) ; } m_blockPathMap . put ( blockId , blockPath ) ; } }
|
Store the given block with the given ID to disk .
| 165
| 11
|
154,832
|
void loadBlock ( BlockId blockId , ByteBuffer block ) throws IOException { synchronized ( m_accessLock ) { if ( ! m_blockPathMap . containsKey ( blockId ) ) { throw new IllegalArgumentException ( "Request to load block that is not stored: " + blockId ) ; } int origPosition = block . position ( ) ; block . position ( 0 ) ; Path blockPath = m_blockPathMap . get ( blockId ) ; try ( SeekableByteChannel channel = Files . newByteChannel ( blockPath ) ) { channel . read ( block ) ; } finally { block . position ( origPosition ) ; } } }
|
Read the block with the given ID into the given byte buffer .
| 139
| 13
|
154,833
|
void releaseBlock ( BlockId blockId ) throws IOException { synchronized ( m_accessLock ) { if ( ! m_blockPathMap . containsKey ( blockId ) ) { throw new IllegalArgumentException ( "Request to release block that is not stored: " + blockId ) ; } Path blockPath = m_blockPathMap . get ( blockId ) ; Files . delete ( blockPath ) ; m_blockPathMap . remove ( blockId ) ; } }
|
The block with the given site id and block counter is no longer needed so delete it from disk .
| 100
| 20
|
154,834
|
private void releaseAllBlocks ( ) throws IOException { synchronized ( m_accessLock ) { Set < Map . Entry < BlockId , Path > > entries = m_blockPathMap . entrySet ( ) ; while ( ! entries . isEmpty ( ) ) { Map . Entry < BlockId , Path > entry = entries . iterator ( ) . next ( ) ; Files . delete ( entry . getValue ( ) ) ; m_blockPathMap . remove ( entry . getKey ( ) ) ; entries = m_blockPathMap . entrySet ( ) ; } } }
|
Release all the blocks that are on disk and delete them from the map that tracks them .
| 120
| 18
|
154,835
|
Path makeBlockPath ( BlockId id ) { String filename = id . fileNameString ( ) ; return m_largeQuerySwapPath . resolve ( filename ) ; }
|
Given package visibility for unit testing purposes .
| 36
| 8
|
154,836
|
public static List < Shard > discoverShards ( String regionName , String streamName , String accessKey , String secretKey , String appName ) { try { Region region = RegionUtils . getRegion ( regionName ) ; if ( region != null ) { final AWSCredentials credentials = new BasicAWSCredentials ( accessKey , secretKey ) ; AmazonKinesis kinesisClient = new AmazonKinesisClient ( credentials , getClientConfigWithUserAgent ( appName ) ) ; kinesisClient . setRegion ( region ) ; DescribeStreamResult result = kinesisClient . describeStream ( streamName ) ; if ( ! "ACTIVE" . equals ( result . getStreamDescription ( ) . getStreamStatus ( ) ) ) { throw new IllegalArgumentException ( "Kinesis stream " + streamName + " is not active." ) ; } return result . getStreamDescription ( ) . getShards ( ) ; } } catch ( ResourceNotFoundException e ) { LOGGER . warn ( "Kinesis stream " + streamName + " does not exist." , e ) ; } catch ( Exception e ) { LOGGER . warn ( "Error found while describing the kinesis stream " + streamName , e ) ; } return null ; }
|
connect to kinesis stream to discover the shards on the stream
| 270
| 13
|
154,837
|
public static String getProperty ( Properties props , String propertyName , String defaultValue ) { String value = props . getProperty ( propertyName , defaultValue ) . trim ( ) ; if ( value . isEmpty ( ) ) { throw new IllegalArgumentException ( "Property " + propertyName + " is missing in Kinesis importer configuration." ) ; } return value ; }
|
get property value . If no value is available throw IllegalArgumentException
| 79
| 14
|
154,838
|
public static long getPropertyAsLong ( Properties props , String propertyName , long defaultValue ) { String value = props . getProperty ( propertyName , "" ) . trim ( ) ; if ( value . isEmpty ( ) ) { return defaultValue ; } try { long val = Long . parseLong ( value ) ; if ( val <= 0 ) { throw new IllegalArgumentException ( "Value of " + propertyName + " should be positive, but current value is " + val ) ; } return val ; } catch ( NumberFormatException e ) { throw new IllegalArgumentException ( "Property " + propertyName + " must be a number in Kinesis importer configuration." ) ; } }
|
get property value as long .
| 146
| 6
|
154,839
|
public synchronized boolean addChild ( String child ) { if ( children == null ) { // let's be conservative on the typical number of children children = new HashSet < String > ( 8 ) ; } return children . add ( child ) ; }
|
Method that inserts a child into the children set
| 50
| 9
|
154,840
|
protected String getHostHeader ( ) { if ( m_hostHeader != null ) { return m_hostHeader ; } if ( ! httpAdminListener . m_publicIntf . isEmpty ( ) ) { m_hostHeader = httpAdminListener . m_publicIntf ; return m_hostHeader ; } InetAddress addr = null ; int httpPort = VoltDB . DEFAULT_HTTP_PORT ; try { String localMetadata = VoltDB . instance ( ) . getLocalMetadata ( ) ; JSONObject jsObj = new JSONObject ( localMetadata ) ; JSONArray interfaces = jsObj . getJSONArray ( "interfaces" ) ; //The first interface is external interface if specified. String iface = interfaces . getString ( 0 ) ; addr = InetAddress . getByName ( iface ) ; httpPort = jsObj . getInt ( "httpPort" ) ; } catch ( Exception e ) { m_log . warn ( "Failed to get HTTP interface information." , e ) ; } if ( addr == null ) { addr = org . voltcore . utils . CoreUtils . getLocalAddress ( ) ; } //Make the header string. m_hostHeader = addr . getHostAddress ( ) + ":" + httpPort ; return m_hostHeader ; }
|
like behind a NATed network .
| 277
| 7
|
154,841
|
void handleReportPage ( HttpServletRequest request , HttpServletResponse response ) { try { String report = ReportMaker . liveReport ( ) ; response . setContentType ( HTML_CONTENT_TYPE ) ; response . setStatus ( HttpServletResponse . SC_OK ) ; response . getWriter ( ) . print ( report ) ; } catch ( IOException ex ) { m_log . warn ( "Failed to get catalog report." , ex ) ; } }
|
Draw the catalog report page mostly by pulling it from the JAR .
| 103
| 14
|
154,842
|
public int getIntegerProperty ( String key , int defaultValue , int [ ] values ) { String prop = getProperty ( key ) ; int value = defaultValue ; try { if ( prop != null ) { value = Integer . parseInt ( prop ) ; } } catch ( NumberFormatException e ) { } if ( ArrayUtil . find ( values , value ) == - 1 ) { return defaultValue ; } return value ; }
|
Choice limited to values list defaultValue must be in the values list .
| 90
| 14
|
154,843
|
public void save ( ) throws Exception { if ( fileName == null || fileName . length ( ) == 0 ) { throw new java . io . FileNotFoundException ( Error . getMessage ( ErrorCode . M_HsqlProperties_load ) ) ; } String filestring = fileName + ".properties" ; save ( filestring ) ; }
|
Saves the properties .
| 76
| 5
|
154,844
|
public void save ( String fileString ) throws Exception { // oj@openoffice.org fa . createParentDirs ( fileString ) ; OutputStream fos = fa . openOutputStreamElement ( fileString ) ; FileAccess . FileSync outDescriptor = fa . getFileSync ( fos ) ; JavaSystem . saveProperties ( stringProps , HsqlDatabaseProperties . PRODUCT_NAME + " " + HsqlDatabaseProperties . THIS_FULL_VERSION , fos ) ; fos . flush ( ) ; outDescriptor . sync ( ) ; fos . close ( ) ; return ; }
|
Saves the properties using JDK2 method if present otherwise JDK1 .
| 133
| 16
|
154,845
|
private void addError ( int code , String key ) { errorCodes = ( int [ ] ) ArrayUtil . resizeArray ( errorCodes , errorCodes . length + 1 ) ; errorKeys = ( String [ ] ) ArrayUtil . resizeArray ( errorKeys , errorKeys . length + 1 ) ; errorCodes [ errorCodes . length - 1 ] = code ; errorKeys [ errorKeys . length - 1 ] = key ; }
|
Adds the error code and the key to the list of errors . This list is populated during construction or addition of elements and is used outside this class to act upon the errors .
| 96
| 35
|
154,846
|
public void checkPassword ( String value ) { if ( ! value . equals ( password ) ) { throw Error . error ( ErrorCode . X_28000 ) ; } }
|
Checks if this object s password attibute equals specified argument else throws .
| 36
| 16
|
154,847
|
public String getCreateUserSQL ( ) { StringBuffer sb = new StringBuffer ( 64 ) ; sb . append ( Tokens . T_CREATE ) . append ( ' ' ) ; sb . append ( Tokens . T_USER ) . append ( ' ' ) ; sb . append ( getStatementName ( ) ) . append ( ' ' ) ; sb . append ( Tokens . T_PASSWORD ) . append ( ' ' ) ; sb . append ( StringConverter . toQuotedString ( password , ' ' , true ) ) ; return sb . toString ( ) ; }
|
Returns the DDL string sequence that creates this user .
| 130
| 11
|
154,848
|
public String getConnectUserSQL ( ) { StringBuffer sb = new StringBuffer ( ) ; sb . append ( Tokens . T_SET ) . append ( ' ' ) ; sb . append ( Tokens . T_SESSION ) . append ( ' ' ) ; sb . append ( Tokens . T_AUTHORIZATION ) . append ( ' ' ) ; sb . append ( StringConverter . toQuotedString ( getNameString ( ) , ' ' , true ) ) ; return sb . toString ( ) ; }
|
Retrieves the redo log character sequence for connecting this user
| 117
| 13
|
154,849
|
static ImmutableMap < String , PublicSuffixType > parseTrie ( CharSequence encoded ) { ImmutableMap . Builder < String , PublicSuffixType > builder = ImmutableMap . builder ( ) ; int encodedLen = encoded . length ( ) ; int idx = 0 ; while ( idx < encodedLen ) { idx += doParseTrieToBuilder ( Lists . < CharSequence > newLinkedList ( ) , encoded . subSequence ( idx , encodedLen ) , builder ) ; } return builder . build ( ) ; }
|
Parses a serialized trie representation of a map of reversed public suffixes into an immutable map of public suffixes .
| 122
| 26
|
154,850
|
private static int doParseTrieToBuilder ( List < CharSequence > stack , CharSequence encoded , ImmutableMap . Builder < String , PublicSuffixType > builder ) { int encodedLen = encoded . length ( ) ; int idx = 0 ; char c = ' ' ; // Read all of the characters for this node. for ( ; idx < encodedLen ; idx ++ ) { c = encoded . charAt ( idx ) ; if ( c == ' ' || c == ' ' || c == ' ' || c == ' ' || c == ' ' ) { break ; } } stack . add ( 0 , reverse ( encoded . subSequence ( 0 , idx ) ) ) ; if ( c == ' ' || c == ' ' || c == ' ' || c == ' ' ) { // '!' represents an interior node that represents an ICANN entry in the map. // '?' represents a leaf node, which represents an ICANN entry in map. // ':' represents an interior node that represents a private entry in the map // ',' represents a leaf node, which represents a private entry in the map. String domain = PREFIX_JOINER . join ( stack ) ; if ( domain . length ( ) > 0 ) { builder . put ( domain , PublicSuffixType . fromCode ( c ) ) ; } } idx ++ ; if ( c != ' ' && c != ' ' ) { while ( idx < encodedLen ) { // Read all the children idx += doParseTrieToBuilder ( stack , encoded . subSequence ( idx , encodedLen ) , builder ) ; if ( encoded . charAt ( idx ) == ' ' || encoded . charAt ( idx ) == ' ' ) { // An extra '?' or ',' after a child node indicates the end of all children of this node. idx ++ ; break ; } } } stack . remove ( 0 ) ; return idx ; }
|
Parses a trie node and returns the number of characters consumed .
| 418
| 15
|
154,851
|
public static synchronized void initialize ( int myHostId , CatalogContext catalogContext , boolean isRejoin , boolean forceCreate , HostMessenger messenger , List < Pair < Integer , Integer > > partitions ) throws ExportManager . SetupException { ExportManager em = new ExportManager ( myHostId , catalogContext , messenger ) ; m_self = em ; if ( forceCreate ) { em . clearOverflowData ( ) ; } em . initialize ( catalogContext , partitions , isRejoin ) ; RealVoltDB db = ( RealVoltDB ) VoltDB . instance ( ) ; db . getStatsAgent ( ) . registerStatsSource ( StatsSelector . EXPORT , myHostId , // m_siteId, em . getExportStats ( ) ) ; }
|
FIXME - this synchronizes on the ExportManager class but everyone else synchronizes on the instance .
| 160
| 20
|
154,852
|
private void initialize ( CatalogContext catalogContext , List < Pair < Integer , Integer > > localPartitionsToSites , boolean isRejoin ) { try { CatalogMap < Connector > connectors = CatalogUtil . getConnectors ( catalogContext ) ; if ( exportLog . isDebugEnabled ( ) ) { exportLog . debug ( "initialize for " + connectors . size ( ) + " connectors." ) ; CatalogUtil . dumpConnectors ( exportLog , connectors ) ; } if ( ! CatalogUtil . hasExportedTables ( connectors ) ) { return ; } if ( exportLog . isDebugEnabled ( ) ) { exportLog . debug ( "Creating processor " + m_loaderClass ) ; } ExportDataProcessor newProcessor = getNewProcessorWithProcessConfigSet ( m_processorConfig ) ; m_processor . set ( newProcessor ) ; File exportOverflowDirectory = new File ( VoltDB . instance ( ) . getExportOverflowPath ( ) ) ; ExportGeneration generation = new ExportGeneration ( exportOverflowDirectory , m_messenger ) ; generation . initialize ( m_hostId , catalogContext , connectors , newProcessor , localPartitionsToSites , exportOverflowDirectory ) ; m_generation . set ( generation ) ; newProcessor . setExportGeneration ( generation ) ; newProcessor . readyForData ( ) ; } catch ( final ClassNotFoundException e ) { exportLog . l7dlog ( Level . ERROR , LogKeys . export_ExportManager_NoLoaderExtensions . name ( ) , e ) ; throw new RuntimeException ( e ) ; } catch ( final Exception e ) { exportLog . error ( "Initialize failed with:" , e ) ; throw new RuntimeException ( e ) ; } }
|
Creates the initial export processor if export is enabled
| 377
| 10
|
154,853
|
private void swapWithNewProcessor ( final CatalogContext catalogContext , ExportGeneration generation , CatalogMap < Connector > connectors , List < Pair < Integer , Integer > > partitions , Map < String , Pair < Properties , Set < String > > > config ) { ExportDataProcessor oldProcessor = m_processor . get ( ) ; if ( exportLog . isDebugEnabled ( ) ) { exportLog . debug ( "Shutdown guestprocessor" ) ; } oldProcessor . shutdown ( ) ; if ( exportLog . isDebugEnabled ( ) ) { exportLog . debug ( "Processor shutdown completed, install new export processor" ) ; } generation . unacceptMastership ( ) ; if ( exportLog . isDebugEnabled ( ) ) { exportLog . debug ( "Existing export datasources unassigned." ) ; } try { ExportDataProcessor newProcessor = getNewProcessorWithProcessConfigSet ( config ) ; //Load any missing tables. generation . initializeGenerationFromCatalog ( catalogContext , connectors , newProcessor , m_hostId , partitions , true ) ; for ( Pair < Integer , Integer > partition : partitions ) { generation . updateAckMailboxes ( partition . getFirst ( ) , null ) ; } //We create processor even if we dont have any streams. newProcessor . setExportGeneration ( generation ) ; if ( m_startPolling && ! config . isEmpty ( ) ) { newProcessor . startPolling ( ) ; } m_processor . getAndSet ( newProcessor ) ; newProcessor . readyForData ( ) ; } catch ( Exception crash ) { VoltDB . crashLocalVoltDB ( "Error creating next export processor" , true , crash ) ; } for ( int partitionId : m_masterOfPartitions ) { generation . acceptMastership ( partitionId ) ; } }
|
remove and install new processor
| 395
| 5
|
154,854
|
@ Override public String calculateContentDeterminismMessage ( ) { String ans = getContentDeterminismMessage ( ) ; if ( ans != null ) { return ans ; } if ( m_subquery != null ) { updateContentDeterminismMessage ( m_subquery . calculateContentDeterminismMessage ( ) ) ; return getContentDeterminismMessage ( ) ; } if ( m_columns != null ) { for ( AbstractExpression expr : m_columns . values ( ) ) { String emsg = expr . getContentDeterminismMessage ( ) ; if ( emsg != null ) { updateContentDeterminismMessage ( emsg ) ; return emsg ; } } } return null ; }
|
Return the content determinism string of the subquery if there is one .
| 152
| 15
|
154,855
|
public int getSortIndexOfOrderByExpression ( AbstractExpression partitionByExpression ) { for ( int idx = 0 ; idx < m_orderByExpressions . size ( ) ; ++ idx ) { if ( m_orderByExpressions . get ( idx ) . equals ( partitionByExpression ) ) { return idx ; } } return - 1 ; }
|
Return the index of the given partition by expression in the order by list . This is used when trying to rationalize partition by and order by expressions .
| 82
| 30
|
154,856
|
private boolean rewriteSelectStmt ( ) { if ( m_mvi != null ) { final Table view = m_mvi . getDest ( ) ; final String viewName = view . getTypeName ( ) ; // Get the map of select stmt's display column index -> view table (column name, column index) m_selectStmt . getFinalProjectionSchema ( ) . resetTableName ( viewName , viewName ) . toTVEAndFixColumns ( m_QueryColumnNameAndIndx_to_MVColumnNameAndIndx . entrySet ( ) . stream ( ) . collect ( Collectors . toMap ( kv -> kv . getKey ( ) . getFirst ( ) , Map . Entry :: getValue ) ) ) ; // change to display column index-keyed map final Map < Integer , Pair < String , Integer > > colSubIndx = m_QueryColumnNameAndIndx_to_MVColumnNameAndIndx . entrySet ( ) . stream ( ) . collect ( Collectors . toMap ( kv -> kv . getKey ( ) . getSecond ( ) , Map . Entry :: getValue ) ) ; ParsedSelectStmt . updateTableNames ( m_selectStmt . m_aggResultColumns , viewName ) ; ParsedSelectStmt . fixColumns ( m_selectStmt . m_aggResultColumns , colSubIndx ) ; ParsedSelectStmt . updateTableNames ( m_selectStmt . m_displayColumns , viewName ) ; ParsedSelectStmt . fixColumns ( m_selectStmt . m_displayColumns , colSubIndx ) ; m_selectStmt . rewriteAsMV ( view ) ; m_mvi = null ; // makes this method re-entrant safe return true ; } else { // scans all sub-queries for rewriting opportunities return m_selectStmt . allScans ( ) . stream ( ) . map ( scan -> scan instanceof StmtSubqueryScan && rewriteTableAlias ( ( StmtSubqueryScan ) scan ) ) . reduce ( Boolean :: logicalOr ) . get ( ) ; } }
|
Try to rewrite SELECT stmt if there is a matching materialized view .
| 467
| 15
|
154,857
|
private static boolean rewriteTableAlias ( StmtSubqueryScan scan ) { final AbstractParsedStmt stmt = scan . getSubqueryStmt ( ) ; return stmt instanceof ParsedSelectStmt && ( new MVQueryRewriter ( ( ParsedSelectStmt ) stmt ) ) . rewrite ( ) ; }
|
Checks for any opportunity to rewrite sub - queries
| 70
| 10
|
154,858
|
private static List < Integer > extractTVEIndices ( AbstractExpression e , List < Integer > accum ) { if ( e != null ) { if ( e instanceof TupleValueExpression ) { accum . add ( ( ( TupleValueExpression ) e ) . getColumnIndex ( ) ) ; } else { extractTVEIndices ( e . getRight ( ) , extractTVEIndices ( e . getLeft ( ) , accum ) ) ; if ( e . getArgs ( ) != null ) { e . getArgs ( ) . forEach ( ex -> extractTVEIndices ( ex , accum ) ) ; } } } return accum ; }
|
Helper method to extract all TVE column indices from an expression .
| 141
| 13
|
154,859
|
private Map < Pair < String , Integer > , Pair < String , Integer > > gbyMatches ( MaterializedViewInfo mv ) { final FilterMatcher filter = new FilterMatcher ( m_selectStmt . m_joinTree . getJoinExpression ( ) , predicate_of ( mv ) ) ; // *** Matching criteria/order: *** // 1. Filters match; // 2. Group-by-columns' table is same as MV's source table; // 3. Those group-by-column's columns are same as MV's group-by columns; // 4. Select stmt's group-by column names match with MV's // 5. Each column's aggregation type match, in the sense of set equality; if ( filter . match ( ) && gbyTablesEqual ( mv ) && gbyColumnsMatch ( mv ) ) { return getViewColumnMaps ( mv ) ; } else { return null ; } }
|
Apply matching rules of SELECT stmt against a materialized view and gives back column relationship between the two .
| 205
| 21
|
154,860
|
private static Map < MaterializedViewInfo , Table > getMviAndViews ( List < Table > tbls ) { return tbls . stream ( ) . flatMap ( tbl -> StreamSupport . stream ( ( ( Iterable < MaterializedViewInfo > ) ( ) -> tbl . getViews ( ) . iterator ( ) ) . spliterator ( ) , false ) . map ( mv -> Pair . of ( mv , mv . getDest ( ) ) ) ) . collect ( Collectors . toMap ( Pair :: getFirst , Pair :: getSecond ) ) ; }
|
returns all materialized view info = > view table from table list
| 127
| 14
|
154,861
|
private static AbstractExpression transformExpressionRidofPVE ( AbstractExpression src ) { AbstractExpression left = src . getLeft ( ) , right = src . getRight ( ) ; if ( left != null ) { left = transformExpressionRidofPVE ( left ) ; } if ( right != null ) { right = transformExpressionRidofPVE ( right ) ; } final AbstractExpression dst ; if ( src instanceof ParameterValueExpression ) { // assert ( ( ( ParameterValueExpression ) src ) . getOriginalValue ( ) != null ) ; dst = ( ( ParameterValueExpression ) src ) . getOriginalValue ( ) . clone ( ) ; } else { dst = src . clone ( ) ; } dst . setLeft ( left ) ; dst . setRight ( right ) ; return dst ; }
|
For scope of ENG - 2878 caching would not cause this trouble because parameter
| 181
| 15
|
154,862
|
private static List < AbstractExpression > getGbyExpressions ( MaterializedViewInfo mv ) { try { return AbstractExpression . fromJSONArrayString ( mv . getGroupbyexpressionsjson ( ) , null ) ; } catch ( JSONException e ) { return new ArrayList <> ( ) ; } }
|
Get group - by expression
| 68
| 5
|
154,863
|
private boolean isNpTxn ( Iv2InitiateTaskMessage msg ) { return msg . getStoredProcedureName ( ) . startsWith ( "@" ) && msg . getStoredProcedureName ( ) . equalsIgnoreCase ( "@BalancePartitions" ) && ( byte ) msg . getParameters ( ) [ 1 ] != 1 ; // clearIndex is MP, normal rebalance is NP }
|
Hacky way to only run
| 90
| 6
|
154,864
|
private Set < Integer > getBalancePartitions ( Iv2InitiateTaskMessage msg ) { try { JSONObject jsObj = new JSONObject ( ( String ) msg . getParameters ( ) [ 0 ] ) ; BalancePartitionsRequest request = new BalancePartitionsRequest ( jsObj ) ; return Sets . newHashSet ( request . partitionPairs . get ( 0 ) . srcPartition , request . partitionPairs . get ( 0 ) . destPartition ) ; } catch ( JSONException e ) { hostLog . warn ( "Unable to determine partitions for @BalancePartitions" , e ) ; return null ; } }
|
Extract the two involved partitions from the
| 133
| 8
|
154,865
|
public void handleEOLMessage ( ) { Iv2EndOfLogMessage msg = new Iv2EndOfLogMessage ( m_partitionId ) ; MPIEndOfLogTransactionState txnState = new MPIEndOfLogTransactionState ( msg ) ; MPIEndOfLogTask task = new MPIEndOfLogTask ( m_mailbox , m_pendingTasks , txnState , m_iv2Masters ) ; m_pendingTasks . offer ( task ) ; }
|
Inject a task into the transaction task queue to flush it . When it executes it will send out MPI end of log messages to all partition initiators .
| 108
| 32
|
154,866
|
private static ProClass < MpProcedureTask > loadNpProcedureTaskClass ( ) { return ProClass . < MpProcedureTask > load ( "org.voltdb.iv2.NpProcedureTask" , "N-Partition" , MiscUtils . isPro ( ) ? ProClass . HANDLER_LOG : ProClass . HANDLER_IGNORE ) . errorHandler ( tmLog :: error ) . useConstructorFor ( Mailbox . class , String . class , TransactionTaskQueue . class , Iv2InitiateTaskMessage . class , Map . class , long . class , boolean . class , int . class ) ; }
|
Load the pro class for n - partition transactions .
| 148
| 10
|
154,867
|
void safeAddToDuplicateCounterMap ( long dpKey , DuplicateCounter counter ) { DuplicateCounter existingDC = m_duplicateCounters . get ( dpKey ) ; if ( existingDC != null ) { // this is a collision and is bad existingDC . logWithCollidingDuplicateCounters ( counter ) ; VoltDB . crashGlobalVoltDB ( "DUPLICATE COUNTER MISMATCH: two duplicate counter keys collided." , true , null ) ; } else { m_duplicateCounters . put ( dpKey , counter ) ; } }
|
Just using put on the dup counter map is unsafe . It won t detect the case where keys collide from two different transactions .
| 128
| 25
|
154,868
|
Table ADMINISTRABLE_ROLE_AUTHORIZATIONS ( ) { Table t = sysTables [ ADMINISTRABLE_ROLE_AUTHORIZATIONS ] ; if ( t == null ) { t = createBlankTable ( sysTableHsqlNames [ ADMINISTRABLE_ROLE_AUTHORIZATIONS ] ) ; addColumn ( t , "GRANTEE" , SQL_IDENTIFIER ) ; addColumn ( t , "ROLE_NAME" , SQL_IDENTIFIER ) ; addColumn ( t , "IS_GRANTABLE" , SQL_IDENTIFIER ) ; HsqlName name = HsqlNameManager . newInfoSchemaObjectName ( sysTableHsqlNames [ ADMINISTRABLE_ROLE_AUTHORIZATIONS ] . name , false , SchemaObject . INDEX ) ; t . createPrimaryKey ( name , new int [ ] { 0 , 1 , 2 } , false ) ; return t ; } if ( session . isAdmin ( ) ) { insertRoles ( t , session . getGrantee ( ) , true ) ; } return t ; }
|
Returns roles that are grantable by an admin user which means all the roles
| 245
| 15
|
154,869
|
Table ROUTINE_ROUTINE_USAGE ( ) { Table t = sysTables [ ROUTINE_ROUTINE_USAGE ] ; if ( t == null ) { t = createBlankTable ( sysTableHsqlNames [ ROUTINE_ROUTINE_USAGE ] ) ; addColumn ( t , "SPECIFIC_CATALOG" , SQL_IDENTIFIER ) ; addColumn ( t , "SPECIFIC_SCHEMA" , SQL_IDENTIFIER ) ; addColumn ( t , "SPECIFIC_NAME" , SQL_IDENTIFIER ) ; addColumn ( t , "ROUTINE_CATALOG" , SQL_IDENTIFIER ) ; addColumn ( t , "ROUTINE_SCHEMA" , SQL_IDENTIFIER ) ; addColumn ( t , "ROUTINE_NAME" , SQL_IDENTIFIER ) ; HsqlName name = HsqlNameManager . newInfoSchemaObjectName ( sysTableHsqlNames [ ROUTINE_ROUTINE_USAGE ] . name , false , SchemaObject . INDEX ) ; t . createPrimaryKey ( name , new int [ ] { 0 , 1 , 2 , 3 , 4 , 5 } , false ) ; return t ; } // column number mappings final int specific_catalog = 0 ; final int specific_schema = 1 ; final int specific_name = 2 ; final int routine_catalog = 3 ; final int routine_schema = 4 ; final int routine_name = 5 ; // PersistentStore store = database . persistentStoreCollection . getStore ( t ) ; Iterator it ; Object [ ] row ; it = database . schemaManager . databaseObjectIterator ( SchemaObject . ROUTINE ) ; while ( it . hasNext ( ) ) { RoutineSchema routine = ( RoutineSchema ) it . next ( ) ; if ( ! session . getGrantee ( ) . isAccessible ( routine ) ) { continue ; } Routine [ ] specifics = routine . getSpecificRoutines ( ) ; for ( int m = 0 ; m < specifics . length ; m ++ ) { OrderedHashSet set = specifics [ m ] . getReferences ( ) ; for ( int i = 0 ; i < set . size ( ) ; i ++ ) { HsqlName refName = ( HsqlName ) set . get ( i ) ; if ( refName . type != SchemaObject . FUNCTION && refName . type != SchemaObject . PROCEDURE ) { continue ; } if ( ! session . getGrantee ( ) . isAccessible ( refName ) ) { continue ; } row = t . getEmptyRowData ( ) ; row [ specific_catalog ] = database . getCatalogName ( ) . name ; row [ specific_schema ] = specifics [ m ] . getSchemaName ( ) . name ; row [ specific_name ] = specifics [ m ] . getName ( ) . name ; row [ routine_catalog ] = database . getCatalogName ( ) . name ; row [ routine_schema ] = refName . schema . name ; row [ routine_name ] = refName . name ; try { t . insertSys ( store , row ) ; } catch ( HsqlException e ) { } } } } return t ; }
|
needs to provide list of specific referenced routines
| 712
| 8
|
154,870
|
public ListenableFuture < ? > closeAndDelete ( ) { // We're going away, so shut ourselves from the external world m_closed = true ; m_ackMailboxRefs . set ( null ) ; // Export mastership should have been released: force it. m_mastershipAccepted . set ( false ) ; // FIXME: necessary? Old processor should have been shut down. // Returning null indicates end of stream try { if ( m_pollTask != null ) { m_pollTask . setFuture ( null ) ; } } catch ( RejectedExecutionException reex ) { // Ignore, {@code GuestProcessor} was closed } m_pollTask = null ; return m_es . submit ( new Runnable ( ) { @ Override public void run ( ) { try { // Discard the pending container, shortcutting the standard discard logic AckingContainer ack = m_pendingContainer . getAndSet ( null ) ; if ( ack != null ) { if ( exportLog . isDebugEnabled ( ) ) { exportLog . debug ( "Discard pending container, lastSeqNo: " + ack . getLastSeqNo ( ) ) ; } ack . internalDiscard ( ) ; } m_committedBuffers . closeAndDelete ( ) ; m_adFile . delete ( ) ; } catch ( IOException e ) { exportLog . rateLimitedLog ( 60 , Level . WARN , e , "Error closing commit buffers" ) ; } finally { m_es . shutdown ( ) ; } } } ) ; }
|
This is called on updateCatalog when an exporting stream is dropped .
| 334
| 13
|
154,871
|
public void setPendingContainer ( AckingContainer container ) { Preconditions . checkNotNull ( m_pendingContainer . get ( ) != null , "Pending container must be null." ) ; if ( m_closed ) { // A very slow export decoder must have noticed the export processor shutting down exportLog . info ( "Discarding stale pending container" ) ; container . internalDiscard ( ) ; } else { m_pendingContainer . set ( container ) ; } }
|
Needs to be thread - safe EDS executor export decoder and site thread both touch m_pendingContainer .
| 103
| 25
|
154,872
|
public void remoteAck ( final long seq ) { //In replicated only master will be doing this. m_es . execute ( new Runnable ( ) { @ Override public void run ( ) { try { // ENG-12282: A race condition between export data source // master promotion and getting acks from the previous // failed master can occur. The failed master could have // sent out an ack with Long.MIN and fails immediately after // that, which causes a new master to be elected. The // election and the receiving of this ack message happens on // two different threads on the new master. If it's promoted // while processing the ack, the ack may call `m_onDrain` // while the other thread is polling buffers, which may // never get discarded. // // Now that we are on the same thread, check to see if we // are already promoted to be the master. If so, ignore the // ack. if ( ! m_es . isShutdown ( ) && ! m_mastershipAccepted . get ( ) ) { setCommittedSeqNo ( seq ) ; ackImpl ( seq ) ; } } catch ( Exception e ) { exportLog . error ( "Error acking export buffer" , e ) ; } catch ( Error e ) { VoltDB . crashLocalVoltDB ( "Error acking export buffer" , true , e ) ; } } } ) ; }
|
Entry point for receiving acknowledgments from remote entities .
| 302
| 10
|
154,873
|
private void handleDrainedSource ( ) throws IOException { if ( ! inCatalog ( ) && m_committedBuffers . isEmpty ( ) ) { //Returning null indicates end of stream try { if ( m_pollTask != null ) { m_pollTask . setFuture ( null ) ; } } catch ( RejectedExecutionException reex ) { // Ignore, {@code GuestProcessor} was closed } m_pollTask = null ; m_generation . onSourceDrained ( m_partitionId , m_tableName ) ; return ; } }
|
Notify the generation when source is drained on an unused partition .
| 122
| 13
|
154,874
|
public synchronized void acceptMastership ( ) { if ( m_onMastership == null ) { if ( exportLog . isDebugEnabled ( ) ) { exportLog . debug ( "Mastership Runnable not yet set for table " + getTableName ( ) + " partition " + getPartitionId ( ) ) ; } return ; } if ( m_mastershipAccepted . get ( ) ) { if ( exportLog . isDebugEnabled ( ) ) { exportLog . debug ( "Export table " + getTableName ( ) + " mastership already accepted for partition " + getPartitionId ( ) ) ; } return ; } m_es . execute ( new Runnable ( ) { @ Override public void run ( ) { try { if ( ! m_es . isShutdown ( ) || ! m_closed ) { if ( exportLog . isDebugEnabled ( ) ) { exportLog . debug ( "Export table " + getTableName ( ) + " accepting mastership for partition " + getPartitionId ( ) ) ; } if ( m_mastershipAccepted . compareAndSet ( false , true ) ) { // Either get enough responses or have received TRANSFER_MASTER event, clear the response sender HSids. m_queryResponses . clear ( ) ; m_onMastership . run ( ) ; } } } catch ( Exception e ) { exportLog . error ( "Error in accepting mastership" , e ) ; } } } ) ; }
|
Trigger an execution of the mastership runnable by the associated executor service
| 320
| 16
|
154,875
|
public void setOnMastership ( Runnable toBeRunOnMastership ) { Preconditions . checkNotNull ( toBeRunOnMastership , "mastership runnable is null" ) ; m_onMastership = toBeRunOnMastership ; // If connector "replicated" property is set to true then every // replicated export stream is its own master if ( m_runEveryWhere ) { //export stream for run-everywhere clients doesn't need ack mailbox m_ackMailboxRefs . set ( null ) ; acceptMastership ( ) ; } }
|
set the runnable task that is to be executed on mastership designation
| 130
| 15
|
154,876
|
public void handleQueryMessage ( final long senderHSId , long requestId , long gapStart ) { m_es . execute ( new Runnable ( ) { @ Override public void run ( ) { long lastSeq = Long . MIN_VALUE ; Pair < Long , Long > range = m_gapTracker . getRangeContaining ( gapStart ) ; if ( range != null ) { lastSeq = range . getSecond ( ) ; } sendQueryResponse ( senderHSId , requestId , lastSeq ) ; } } ) ; }
|
Query whether a master exists for the given partition if not try to promote the local data source .
| 116
| 19
|
154,877
|
private void resetStateInRejoinOrRecover ( long initialSequenceNumber , boolean isRejoin ) { if ( isRejoin ) { if ( ! m_gapTracker . isEmpty ( ) ) { m_lastReleasedSeqNo = Math . max ( m_lastReleasedSeqNo , m_gapTracker . getFirstSeqNo ( ) - 1 ) ; } } else { m_lastReleasedSeqNo = Math . max ( m_lastReleasedSeqNo , initialSequenceNumber ) ; } // Rejoin or recovery should be on a transaction boundary (except maybe in a gap situation) m_committedSeqNo = m_lastReleasedSeqNo ; m_firstUnpolledSeqNo = m_lastReleasedSeqNo + 1 ; m_tuplesPending . set ( m_gapTracker . sizeInSequence ( ) ) ; }
|
current master to tell us where to poll next buffer .
| 189
| 11
|
154,878
|
public static Date getDateFromTransactionId ( long txnId ) { long time = txnId >> ( COUNTER_BITS + INITIATORID_BITS ) ; time += VOLT_EPOCH ; return new Date ( time ) ; }
|
Given a transaction id return the time of its creation by examining the embedded timestamp .
| 56
| 16
|
154,879
|
private AbstractTopology recoverPartitions ( AbstractTopology topology , String haGroup , Set < Integer > recoverPartitions ) { long version = topology . version ; if ( ! recoverPartitions . isEmpty ( ) ) { // In rejoin case, partition list from the rejoining node could be out of range if the rejoining // host is a previously elastic removed node or some other used nodes, if out of range, do not restore if ( Collections . max ( recoverPartitions ) > Collections . max ( m_cartographer . getPartitions ( ) ) ) { recoverPartitions . clear ( ) ; } } AbstractTopology recoveredTopo = AbstractTopology . mutateRecoverTopology ( topology , m_messenger . getLiveHostIds ( ) , m_messenger . getHostId ( ) , haGroup , recoverPartitions ) ; if ( recoveredTopo == null ) { return null ; } List < Integer > partitions = Lists . newArrayList ( recoveredTopo . getPartitionIdList ( m_messenger . getHostId ( ) ) ) ; if ( partitions != null && partitions . size ( ) == m_catalogContext . getNodeSettings ( ) . getLocalSitesCount ( ) ) { TopologyZKUtils . updateTopologyToZK ( m_messenger . getZK ( ) , recoveredTopo ) ; } if ( version < recoveredTopo . version && ! recoverPartitions . isEmpty ( ) ) { consoleLog . info ( "Partition placement layout has been restored for rejoining." ) ; } return recoveredTopo ; }
|
recover the partition assignment from one of lost hosts in the same placement group for rejoin Use the placement group of the recovering host to find a matched host from the lost nodes in the topology If the partition count from the lost node is the same as the site count of the recovering host The partitions on the lost node will be placed on the recovering host . Partition group layout will be maintained . Topology will be updated on ZK if successful
| 342
| 90
|
154,880
|
private boolean stopRejoiningHost ( ) { // The host failure notification could come before mesh determination, wait for the determination try { m_meshDeterminationLatch . await ( ) ; } catch ( InterruptedException e ) { } if ( m_rejoining ) { VoltDB . crashLocalVoltDB ( "Another node failed before this node could finish rejoining. " + "As a result, the rejoin operation has been canceled. Please try again." ) ; return true ; } return false ; }
|
If the current node hasn t finished rejoin when another node fails fail this node to prevent locking up .
| 107
| 21
|
154,881
|
private void checkExportStreamMastership ( ) { for ( Initiator initiator : m_iv2Initiators . values ( ) ) { if ( initiator . getPartitionId ( ) != MpInitiator . MP_INIT_PID ) { SpInitiator spInitiator = ( SpInitiator ) initiator ; if ( spInitiator . isLeader ( ) ) { ExportManager . instance ( ) . takeMastership ( spInitiator . getPartitionId ( ) ) ; } } } }
|
move back to partition leader s node .
| 118
| 8
|
154,882
|
void scheduleDailyLoggingWorkInNextCheckTime ( ) { DailyRollingFileAppender dailyAppender = null ; Enumeration < ? > appenders = Logger . getRootLogger ( ) . getAllAppenders ( ) ; while ( appenders . hasMoreElements ( ) ) { Appender appender = ( Appender ) appenders . nextElement ( ) ; if ( appender instanceof DailyRollingFileAppender ) { dailyAppender = ( DailyRollingFileAppender ) appender ; } } final DailyRollingFileAppender dailyRollingFileAppender = dailyAppender ; Field field = null ; if ( dailyRollingFileAppender != null ) { try { field = dailyRollingFileAppender . getClass ( ) . getDeclaredField ( "nextCheck" ) ; field . setAccessible ( true ) ; } catch ( NoSuchFieldException e ) { hostLog . error ( "Failed to set daily system info logging: " + e . getMessage ( ) ) ; } } final Field nextCheckField = field ; long nextCheck = System . currentTimeMillis ( ) ; // the next part may throw exception, current time is the default value if ( dailyRollingFileAppender != null && nextCheckField != null ) { try { nextCheck = nextCheckField . getLong ( dailyRollingFileAppender ) ; scheduleWork ( new DailyLogTask ( ) , nextCheck - System . currentTimeMillis ( ) + 30 * 1000 , 0 , TimeUnit . MILLISECONDS ) ; } catch ( Exception e ) { hostLog . error ( "Failed to set daily system info logging: " + e . getMessage ( ) ) ; } } }
|
Get the next check time for a private member in log4j library which is not a reliable idea . It adds 30 seconds for the initial delay and uses a periodical thread to schedule the daily logging work with this delay .
| 366
| 45
|
154,883
|
private void schedulePeriodicWorks ( ) { // JMX stats broadcast m_periodicWorks . add ( scheduleWork ( new Runnable ( ) { @ Override public void run ( ) { // A null here was causing a steady stream of annoying but apparently inconsequential // NPEs during a debug session of an unrelated unit test. if ( m_statsManager != null ) { m_statsManager . sendNotification ( ) ; } } } , 0 , StatsManager . POLL_INTERVAL , TimeUnit . MILLISECONDS ) ) ; // clear login count m_periodicWorks . add ( scheduleWork ( new Runnable ( ) { @ Override public void run ( ) { ScheduledExecutorService es = VoltDB . instance ( ) . getSES ( false ) ; if ( es != null && ! es . isShutdown ( ) ) { es . submit ( new Runnable ( ) { @ Override public void run ( ) { long timestamp = System . currentTimeMillis ( ) ; m_flc . checkCounter ( timestamp ) ; } } ) ; } } } , 0 , 10 , TimeUnit . SECONDS ) ) ; // small stats samples m_periodicWorks . add ( scheduleWork ( new Runnable ( ) { @ Override public void run ( ) { SystemStatsCollector . asyncSampleSystemNow ( false , false ) ; } } , 0 , 5 , TimeUnit . SECONDS ) ) ; // medium stats samples m_periodicWorks . add ( scheduleWork ( new Runnable ( ) { @ Override public void run ( ) { SystemStatsCollector . asyncSampleSystemNow ( true , false ) ; } } , 0 , 1 , TimeUnit . MINUTES ) ) ; // large stats samples m_periodicWorks . add ( scheduleWork ( new Runnable ( ) { @ Override public void run ( ) { SystemStatsCollector . asyncSampleSystemNow ( true , true ) ; } } , 0 , 6 , TimeUnit . MINUTES ) ) ; // export stream master check m_periodicWorks . add ( scheduleWork ( new Runnable ( ) { @ Override public void run ( ) { checkExportStreamMastership ( ) ; } } , 0 , 1 , TimeUnit . MINUTES ) ) ; // other enterprise setup EnterpriseMaintenance em = EnterpriseMaintenance . get ( ) ; if ( em != null ) { em . setupMaintenaceTasks ( ) ; } GCInspector . instance . start ( m_periodicPriorityWorkThread , m_gcStats ) ; }
|
Schedule all the periodic works
| 555
| 6
|
154,884
|
private boolean determineIfEligibleAsLeader ( Collection < Integer > partitions , Set < Integer > partitionGroupPeers , AbstractTopology topology ) { if ( partitions . contains ( Integer . valueOf ( 0 ) ) ) { return true ; } for ( Integer host : topology . getHostIdList ( 0 ) ) { if ( partitionGroupPeers . contains ( host ) ) { return true ; } } return false ; }
|
This host can be a leader if partition 0 is on it or it is in the same partition group as a node which has partition 0 . This is because the partition group with partition 0 can never be removed by elastic remove .
| 91
| 45
|
154,885
|
@ Override public void run ( ) { if ( m_restoreAgent != null ) { // start restore process m_restoreAgent . restore ( ) ; } else { onSnapshotRestoreCompletion ( ) ; onReplayCompletion ( Long . MIN_VALUE , m_iv2InitiatorStartingTxnIds ) ; } // Start the rejoin coordinator if ( m_joinCoordinator != null ) { try { m_statusTracker . set ( NodeState . REJOINING ) ; if ( ! m_joinCoordinator . startJoin ( m_catalogContext . database ) ) { VoltDB . crashLocalVoltDB ( "Failed to join the cluster" , true , null ) ; } } catch ( Exception e ) { VoltDB . crashLocalVoltDB ( "Failed to join the cluster" , true , e ) ; } } m_isRunning = true ; }
|
Start all the site s event loops . That s it .
| 197
| 12
|
154,886
|
@ Override public void cleanUpTempCatalogJar ( ) { File configInfoDir = getConfigDirectory ( ) ; if ( ! configInfoDir . exists ( ) ) { return ; } File tempJar = new VoltFile ( configInfoDir . getPath ( ) , InMemoryJarfile . TMP_CATALOG_JAR_FILENAME ) ; if ( tempJar . exists ( ) ) { tempJar . delete ( ) ; } }
|
Clean up the temporary jar file
| 96
| 6
|
154,887
|
private void shutdownInitiators ( ) { if ( m_iv2Initiators == null ) { return ; } m_iv2Initiators . descendingMap ( ) . values ( ) . stream ( ) . forEach ( p -> p . shutdown ( ) ) ; }
|
to be done on SP sites kill SP sites first may risk MP site to wait forever .
| 59
| 18
|
154,888
|
public void createRuntimeReport ( PrintStream out ) { // This function may be running in its own thread. out . print ( "MIME-Version: 1.0\n" ) ; out . print ( "Content-type: multipart/mixed; boundary=\"reportsection\"" ) ; out . print ( "\n\n--reportsection\nContent-Type: text/plain\n\nClientInterface Report\n" ) ; if ( m_clientInterface != null ) { out . print ( m_clientInterface . toString ( ) + "\n" ) ; } }
|
Debugging function - creates a record of the current state of the system .
| 126
| 15
|
154,889
|
private void initializeDRProducer ( ) { try { if ( m_producerDRGateway != null ) { m_producerDRGateway . startAndWaitForGlobalAgreement ( ) ; for ( Initiator iv2init : m_iv2Initiators . values ( ) ) { iv2init . initDRGateway ( m_config . m_startAction , m_producerDRGateway , isLowestSiteId ( iv2init ) ) ; } m_producerDRGateway . completeInitialization ( ) ; } } catch ( Exception ex ) { CoreUtils . printPortsInUse ( hostLog ) ; VoltDB . crashLocalVoltDB ( "Failed to initialize DR producer" , false , ex ) ; } }
|
Initialize the DR producer so that any binary log generated on recover will be queued . This does NOT open the DR port . That will happen after command log replay finishes .
| 163
| 35
|
154,890
|
static public long computeMinimumHeapRqt ( int tableCount , int sitesPerHost , int kfactor ) { long baseRqt = 384 ; long tableRqt = 10 * tableCount ; // K-safety Heap consumption drop to 8 MB (per node) // Snapshot cost 32 MB (per node) // Theoretically, 40 MB (per node) should be enough long rejoinRqt = ( kfactor > 0 ) ? 128 * sitesPerHost : 0 ; return baseRqt + tableRqt + rejoinRqt ; }
|
Any changes there should get reflected here and vice versa .
| 116
| 11
|
154,891
|
synchronized void prepareCommit ( Session session ) { RowActionBase action = this ; do { if ( action . session == session && action . commitTimestamp == 0 ) { action . prepared = true ; } action = action . next ; } while ( action != null ) ; }
|
for two - phased pre - commit
| 60
| 7
|
154,892
|
synchronized void rollback ( Session session , long timestamp ) { RowActionBase action = this ; do { if ( action . session == session && action . commitTimestamp == 0 ) { if ( action . actionTimestamp >= timestamp || action . actionTimestamp == 0 ) { action . commitTimestamp = session . actionTimestamp ; action . rolledback = true ; action . prepared = false ; } } action = action . next ; } while ( action != null ) ; }
|
Rollback actions for a session including and after the given timestamp
| 101
| 12
|
154,893
|
synchronized int getCommitType ( long timestamp ) { RowActionBase action = this ; int type = ACTION_NONE ; do { if ( action . commitTimestamp == timestamp ) { type = action . type ; } action = action . next ; } while ( action != null ) ; return type ; }
|
returns type of commit performed on timestamp . ACTION_NONE if none .
| 66
| 16
|
154,894
|
synchronized boolean canCommit ( Session session , OrderedHashSet set ) { RowActionBase action ; long timestamp = session . transactionTimestamp ; long commitTimestamp = 0 ; final boolean readCommitted = session . isolationMode == SessionInterface . TX_READ_COMMITTED ; action = this ; if ( readCommitted ) { do { if ( action . session == session ) { // for READ_COMMITTED, use action timestamp for later conflicts if ( action . commitTimestamp == 0 ) { timestamp = action . actionTimestamp ; } } action = action . next ; } while ( action != null ) ; action = this ; } do { if ( action . rolledback || action . type == ACTION_NONE ) { action = action . next ; continue ; } if ( action . session != session ) { if ( action . prepared ) { return false ; } if ( action . commitTimestamp == 0 && action . actionTimestamp != 0 ) { set . add ( action . session ) ; } else if ( action . commitTimestamp > commitTimestamp ) { commitTimestamp = action . commitTimestamp ; } } action = action . next ; } while ( action != null ) ; return commitTimestamp < timestamp ; }
|
returns false if another committed session has altered the same row
| 264
| 12
|
154,895
|
synchronized void mergeRollback ( Row row ) { RowActionBase action = this ; RowActionBase head = null ; RowActionBase tail = null ; if ( type == RowActionBase . ACTION_DELETE_FINAL || type == RowActionBase . ACTION_NONE ) { return ; } do { if ( action . rolledback ) { if ( tail != null ) { tail . next = null ; } } else { if ( head == null ) { head = tail = action ; } else { tail . next = action ; tail = action ; } } action = action . next ; } while ( action != null ) ; if ( head == null ) { boolean exists = ( type == RowActionBase . ACTION_DELETE ) ; if ( exists ) { setAsNoOp ( row ) ; } else { setAsDeleteFinal ( ) ; } } else { if ( head != this ) { setAsAction ( head ) ; } } }
|
merge rolled back actions
| 202
| 5
|
154,896
|
private void adjustReplicationFactorForURI ( HttpPut httpPut ) throws URISyntaxException { String queryString = httpPut . getURI ( ) . getQuery ( ) ; if ( ! StringUtils . isEmpty ( queryString ) && queryString . contains ( "op=CREATE" ) && ( queryString . contains ( "replication=" ) || ! StringUtils . isEmpty ( m_blockReplication ) ) ) { rateLimitedLogWarn ( m_logger , "Set block replication factor in the target system." ) ; if ( ! StringUtils . isEmpty ( m_blockReplication ) && ! queryString . contains ( "replication=" ) ) { StringBuilder builder = new StringBuilder ( 128 ) ; builder . append ( queryString ) . append ( "&replication=" ) . append ( m_blockReplication ) ; URI oldUri = httpPut . getURI ( ) ; URI newUri = new URI ( oldUri . getScheme ( ) , oldUri . getAuthority ( ) , oldUri . getPath ( ) , builder . toString ( ) , oldUri . getFragment ( ) ) ; httpPut . setURI ( newUri ) ; } } }
|
append replication factor to the URI for CREATE operation if the factor is not in URI
| 267
| 17
|
154,897
|
private List < NameValuePair > sign ( URI uri , final List < NameValuePair > params ) { Preconditions . checkNotNull ( m_secret ) ; final List < NameValuePair > sortedParams = Lists . newArrayList ( params ) ; Collections . sort ( sortedParams , new Comparator < NameValuePair > ( ) { @ Override public int compare ( NameValuePair left , NameValuePair right ) { return left . getName ( ) . compareTo ( right . getName ( ) ) ; } } ) ; final StringBuilder paramSb = new StringBuilder ( ) ; String separator = "" ; for ( NameValuePair param : sortedParams ) { paramSb . append ( separator ) . append ( param . getName ( ) ) ; if ( param . getValue ( ) != null ) { paramSb . append ( "=" ) . append ( param . getValue ( ) ) ; } separator = "&" ; } final StringBuilder baseSb = new StringBuilder ( ) ; baseSb . append ( m_method ) . append ( ' ' ) ; baseSb . append ( uri . getHost ( ) ) . append ( ' ' ) ; baseSb . append ( uri . getPath ( ) . isEmpty ( ) ? ' ' : uri . getPath ( ) ) . append ( ' ' ) ; baseSb . append ( paramSb . toString ( ) ) ; final Mac hmac ; final Key key ; try { hmac = Mac . getInstance ( m_signatureMethod ) ; key = new SecretKeySpec ( m_secret . getBytes ( Charsets . UTF_8 ) , m_signatureMethod ) ; hmac . init ( key ) ; } catch ( NoSuchAlgorithmException e ) { // should never happen rateLimitedLogError ( m_logger , "Fail to get HMAC instance %s" , Throwables . getStackTraceAsString ( e ) ) ; return null ; } catch ( InvalidKeyException e ) { rateLimitedLogError ( m_logger , "Fail to sign the message %s" , Throwables . getStackTraceAsString ( e ) ) ; return null ; } sortedParams . add ( new BasicNameValuePair ( m_signatureName , NVPairsDecoder . percentEncode ( Encoder . base64Encode ( hmac . doFinal ( baseSb . toString ( ) . getBytes ( Charsets . UTF_8 ) ) ) ) ) ) ; return sortedParams ; }
|
Calculate the signature of the request using the specified secret key .
| 558
| 14
|
154,898
|
public final FluentIterable < T > preOrderTraversal ( final T root ) { checkNotNull ( root ) ; return new FluentIterable < T > ( ) { @ Override public UnmodifiableIterator < T > iterator ( ) { return preOrderIterator ( root ) ; } } ; }
|
Returns an unmodifiable iterable over the nodes in a tree structure using pre - order traversal . That is each node s subtrees are traversed after the node itself is returned .
| 66
| 38
|
154,899
|
public final FluentIterable < T > breadthFirstTraversal ( final T root ) { checkNotNull ( root ) ; return new FluentIterable < T > ( ) { @ Override public UnmodifiableIterator < T > iterator ( ) { return new BreadthFirstIterator ( root ) ; } } ; }
|
Returns an unmodifiable iterable over the nodes in a tree structure using breadth - first traversal . That is all the nodes of depth 0 are returned then depth 1 then 2 and so on .
| 68
| 40
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.