idx
int64
0
165k
question
stringlengths
73
4.15k
target
stringlengths
5
918
len_question
int64
21
890
len_target
int64
3
255
155,700
public static int compareRows ( Object [ ] a , Object [ ] b , int [ ] cols , Type [ ] coltypes ) { int fieldcount = cols . length ; for ( int j = 0 ; j < fieldcount ; j ++ ) { int i = coltypes [ cols [ j ] ] . compare ( a [ cols [ j ] ] , b [ cols [ j ] ] ) ; if ( i != 0 ) { return i ; } } return 0 ; }
compares two full table rows based on a set of columns
105
12
155,701
@ Override public int size ( PersistentStore store ) { int count = 0 ; readLock . lock ( ) ; try { RowIterator it = firstRow ( null , store ) ; while ( it . hasNext ( ) ) { it . getNextRow ( ) ; count ++ ; } return count ; } finally { readLock . unlock ( ) ; } }
Returns the node count .
76
5
155,702
@ Override public void insert ( Session session , PersistentStore store , Row row ) { NodeAVL n ; NodeAVL x ; boolean isleft = true ; int compare = - 1 ; writeLock . lock ( ) ; try { n = getAccessor ( store ) ; x = n ; if ( n == null ) { store . setAccessor ( this , ( ( RowAVL ) row ) . getNode ( position ) ) ; return ; } while ( true ) { Row currentRow = n . getRow ( store ) ; compare = compareRowForInsertOrDelete ( session , row , currentRow ) ; if ( compare == 0 ) { throw Error . error ( ErrorCode . X_23505 ) ; } isleft = compare < 0 ; x = n ; n = child ( store , x , isleft ) ; if ( n == null ) { break ; } } x = set ( store , x , isleft , ( ( RowAVL ) row ) . getNode ( position ) ) ; balance ( store , x , isleft ) ; } finally { writeLock . unlock ( ) ; } }
Insert a node into the index
236
6
155,703
@ Override public RowIterator findFirstRow ( Session session , PersistentStore store , Object [ ] rowdata , int match ) { NodeAVL node = findNode ( session , store , rowdata , defaultColMap , match ) ; return getIterator ( session , store , node ) ; }
Return the first node equal to the indexdata object . The rowdata has the same column mapping as this index .
62
23
155,704
@ Override public RowIterator findFirstRow ( Session session , PersistentStore store , Object [ ] rowdata ) { NodeAVL node = findNode ( session , store , rowdata , colIndex , colIndex . length ) ; return getIterator ( session , store , node ) ; }
Return the first node equal to the rowdata object . The rowdata has the same column mapping as this table .
61
23
155,705
@ Override public RowIterator findFirstRow ( Session session , PersistentStore store , Object value , int compare ) { readLock . lock ( ) ; try { if ( compare == OpTypes . SMALLER || compare == OpTypes . SMALLER_EQUAL ) { return findFirstRowNotNull ( session , store ) ; } boolean isEqual = compare == OpTypes . EQUAL || compare == OpTypes . IS_NULL ; NodeAVL x = getAccessor ( store ) ; int iTest = 1 ; if ( compare == OpTypes . GREATER ) { iTest = 0 ; } if ( value == null && ! isEqual ) { return emptyIterator ; } // this method returns the correct node only with the following conditions boolean check = compare == OpTypes . GREATER || compare == OpTypes . EQUAL || compare == OpTypes . GREATER_EQUAL ; if ( ! check ) { Error . runtimeError ( ErrorCode . U_S0500 , "Index.findFirst" ) ; } while ( x != null ) { boolean t = colTypes [ 0 ] . compare ( value , x . getRow ( store ) . getData ( ) [ colIndex [ 0 ] ] ) >= iTest ; if ( t ) { NodeAVL r = x . getRight ( store ) ; if ( r == null ) { break ; } x = r ; } else { NodeAVL l = x . getLeft ( store ) ; if ( l == null ) { break ; } x = l ; } } /* while (x != null && Column.compare(value, x.getData()[colIndex_0], colType_0) >= iTest) { x = next(x); } */ while ( x != null ) { Object colvalue = x . getRow ( store ) . getData ( ) [ colIndex [ 0 ] ] ; int result = colTypes [ 0 ] . compare ( value , colvalue ) ; if ( result >= iTest ) { x = next ( store , x ) ; } else { if ( isEqual ) { if ( result != 0 ) { x = null ; } } else if ( colvalue == null ) { x = next ( store , x ) ; continue ; } break ; } } // MVCC if ( session == null || x == null ) { return getIterator ( session , store , x ) ; } while ( x != null ) { Row row = x . getRow ( store ) ; if ( compare == OpTypes . EQUAL && colTypes [ 0 ] . compare ( value , row . getData ( ) [ colIndex [ 0 ] ] ) != 0 ) { x = null ; break ; } if ( session . database . txManager . canRead ( session , row ) ) { break ; } x = next ( store , x ) ; } return getIterator ( session , store , x ) ; } finally { readLock . unlock ( ) ; } }
Finds the first node that is larger or equal to the given one based on the first column of the index only .
625
24
155,706
@ Override public RowIterator findFirstRowNotNull ( Session session , PersistentStore store ) { readLock . lock ( ) ; try { NodeAVL x = getAccessor ( store ) ; while ( x != null ) { boolean t = colTypes [ 0 ] . compare ( null , x . getRow ( store ) . getData ( ) [ colIndex [ 0 ] ] ) >= 0 ; if ( t ) { NodeAVL r = x . getRight ( store ) ; if ( r == null ) { break ; } x = r ; } else { NodeAVL l = x . getLeft ( store ) ; if ( l == null ) { break ; } x = l ; } } while ( x != null ) { Object colvalue = x . getRow ( store ) . getData ( ) [ colIndex [ 0 ] ] ; if ( colvalue == null ) { x = next ( store , x ) ; } else { break ; } } // MVCC while ( session != null && x != null ) { Row row = x . getRow ( store ) ; if ( session . database . txManager . canRead ( session , row ) ) { break ; } x = next ( store , x ) ; } return getIterator ( session , store , x ) ; } finally { readLock . unlock ( ) ; } }
Finds the first node where the data is not null .
283
12
155,707
@ Override public RowIterator firstRow ( Session session , PersistentStore store ) { int tempDepth = 0 ; readLock . lock ( ) ; try { NodeAVL x = getAccessor ( store ) ; NodeAVL l = x ; while ( l != null ) { x = l ; l = x . getLeft ( store ) ; tempDepth ++ ; } while ( session != null && x != null ) { Row row = x . getRow ( store ) ; if ( session . database . txManager . canRead ( session , row ) ) { break ; } x = next ( store , x ) ; } return getIterator ( session , store , x ) ; } finally { depth = tempDepth ; readLock . unlock ( ) ; } }
Returns the row for the first node of the index
159
10
155,708
@ Override public Row lastRow ( Session session , PersistentStore store ) { readLock . lock ( ) ; try { NodeAVL x = getAccessor ( store ) ; NodeAVL l = x ; while ( l != null ) { x = l ; l = x . getRight ( store ) ; } while ( session != null && x != null ) { Row row = x . getRow ( store ) ; if ( session . database . txManager . canRead ( session , row ) ) { break ; } x = last ( store , x ) ; } return x == null ? null : x . getRow ( store ) ; } finally { readLock . unlock ( ) ; } }
Returns the row for the last node of the index
147
10
155,709
private NodeAVL next ( Session session , PersistentStore store , NodeAVL x ) { if ( x == null ) { return null ; } readLock . lock ( ) ; try { while ( true ) { x = next ( store , x ) ; if ( x == null ) { return x ; } if ( session == null ) { return x ; } Row row = x . getRow ( store ) ; if ( session . database . txManager . canRead ( session , row ) ) { return x ; } } } finally { readLock . unlock ( ) ; } }
Returns the node after the given one
122
7
155,710
private void replace ( PersistentStore store , NodeAVL x , NodeAVL n ) { if ( x . isRoot ( ) ) { if ( n != null ) { n = n . setParent ( store , null ) ; } store . setAccessor ( this , n ) ; } else { set ( store , x . getParent ( store ) , x . isFromLeft ( store ) , n ) ; } }
Replace x with n
90
5
155,711
@ Override public int compareRowNonUnique ( Object [ ] a , Object [ ] b , int fieldcount ) { for ( int j = 0 ; j < fieldcount ; j ++ ) { int i = colTypes [ j ] . compare ( a [ j ] , b [ colIndex [ j ] ] ) ; if ( i != 0 ) { return i ; } } return 0 ; }
As above but use the index column data
83
8
155,712
private int compareRowForInsertOrDelete ( Session session , Row newRow , Row existingRow ) { Object [ ] a = newRow . getData ( ) ; Object [ ] b = existingRow . getData ( ) ; int j = 0 ; boolean hasNull = false ; for ( ; j < colIndex . length ; j ++ ) { Object currentvalue = a [ colIndex [ j ] ] ; Object othervalue = b [ colIndex [ j ] ] ; int i = colTypes [ j ] . compare ( currentvalue , othervalue ) ; boolean nulls = currentvalue == null || othervalue == null ; if ( i != 0 ) { if ( colDesc [ j ] && ! nulls ) { i = - i ; } if ( nullsLast [ j ] && nulls ) { i = - i ; } return i ; } if ( currentvalue == null ) { hasNull = true ; } } if ( isUnique && ! useRowId && ! hasNull ) { if ( session == null || session . database . txManager . canRead ( session , existingRow ) ) { //* debug 190 // session.database.txManager.canRead(session, existingRow); return 0 ; } else { int difference = newRow . getPos ( ) - existingRow . getPos ( ) ; return difference ; } } for ( j = 0 ; j < pkCols . length ; j ++ ) { Object currentvalue = a [ pkCols [ j ] ] ; int i = pkTypes [ j ] . compare ( currentvalue , b [ pkCols [ j ] ] ) ; if ( i != 0 ) { return i ; } } if ( useRowId ) { int difference = newRow . getPos ( ) - existingRow . getPos ( ) ; if ( difference < 0 ) { difference = - 1 ; } else if ( difference > 0 ) { difference = 1 ; } return difference ; } if ( session == null || session . database . txManager . canRead ( session , existingRow ) ) { return 0 ; } else { int difference = newRow . getPos ( ) - existingRow . getPos ( ) ; if ( difference < 0 ) { difference = - 1 ; } else if ( difference > 0 ) { difference = 1 ; } return difference ; } }
Compare two rows of the table for inserting rows into unique indexes Supports descending columns .
490
16
155,713
private NodeAVL findNode ( Session session , PersistentStore store , Object [ ] rowdata , int [ ] rowColMap , int fieldCount ) { readLock . lock ( ) ; try { NodeAVL x = getAccessor ( store ) ; NodeAVL n ; NodeAVL result = null ; while ( x != null ) { int i = this . compareRowNonUnique ( rowdata , rowColMap , x . getRow ( store ) . getData ( ) , fieldCount ) ; if ( i == 0 ) { result = x ; n = x . getLeft ( store ) ; } else if ( i > 0 ) { n = x . getRight ( store ) ; } else { n = x . getLeft ( store ) ; } if ( n == null ) { break ; } x = n ; } // MVCC 190 if ( session == null ) { return result ; } while ( result != null ) { Row row = result . getRow ( store ) ; if ( compareRowNonUnique ( rowdata , rowColMap , row . getData ( ) , fieldCount ) != 0 ) { result = null ; break ; } if ( session . database . txManager . canRead ( session , row ) ) { break ; } result = next ( store , result ) ; } return result ; } finally { readLock . unlock ( ) ; } }
Finds a match with a row from a different table
290
11
155,714
private void balance ( PersistentStore store , NodeAVL x , boolean isleft ) { while ( true ) { int sign = isleft ? 1 : - 1 ; switch ( x . getBalance ( ) * sign ) { case 1 : x = x . setBalance ( store , 0 ) ; return ; case 0 : x = x . setBalance ( store , - sign ) ; break ; case - 1 : NodeAVL l = child ( store , x , isleft ) ; if ( l . getBalance ( ) == - sign ) { replace ( store , x , l ) ; x = set ( store , x , isleft , child ( store , l , ! isleft ) ) ; l = set ( store , l , ! isleft , x ) ; x = x . setBalance ( store , 0 ) ; l = l . setBalance ( store , 0 ) ; } else { NodeAVL r = child ( store , l , ! isleft ) ; replace ( store , x , r ) ; l = set ( store , l , ! isleft , child ( store , r , isleft ) ) ; r = set ( store , r , isleft , l ) ; x = set ( store , x , isleft , child ( store , r , ! isleft ) ) ; r = set ( store , r , ! isleft , x ) ; int rb = r . getBalance ( ) ; x = x . setBalance ( store , ( rb == - sign ) ? sign : 0 ) ; l = l . setBalance ( store , ( rb == sign ) ? - sign : 0 ) ; r = r . setBalance ( store , 0 ) ; } return ; } if ( x . isRoot ( ) ) { return ; } isleft = x . isFromLeft ( store ) ; x = x . getParent ( store ) ; } }
Balances part of the tree after an alteration to the index .
396
13
155,715
public AbstractExpression resolveTVE ( TupleValueExpression tve ) { AbstractExpression resolvedExpr = processTVE ( tve , tve . getColumnName ( ) ) ; List < TupleValueExpression > tves = ExpressionUtil . getTupleValueExpressions ( resolvedExpr ) ; for ( TupleValueExpression subqTve : tves ) { resolveLeafTve ( subqTve ) ; } return resolvedExpr ; }
The parameter tve is a column reference obtained by parsing a column ref VoltXML element . We need to find out to which column in the current table scan the name of the TVE refers and transfer metadata from the schema s column to the tve . The function processTVE does the transfer .
103
61
155,716
void resolveColumnIndexesUsingSchema ( NodeSchema inputSchema ) { // get all the TVEs in the output columns int difftor = 0 ; for ( SchemaColumn col : m_outputSchema ) { col . setDifferentiator ( difftor ) ; ++ difftor ; Collection < TupleValueExpression > allTves = ExpressionUtil . getTupleValueExpressions ( col . getExpression ( ) ) ; // and update their indexes against the table schema for ( TupleValueExpression tve : allTves ) { tve . setColumnIndexUsingSchema ( inputSchema ) ; assert ( tve . getColumnIndex ( ) >= 0 && tve . getColumnIndex ( ) < inputSchema . size ( ) ) ; } } // DON'T RE-SORT HERE }
Given an input schema resolve all the TVEs in all the output column expressions . This method is necessary to be able to do this for inlined projection nodes that don t have a child from which they can get an output schema .
181
46
155,717
public boolean isIdentity ( AbstractPlanNode childNode ) throws PlanningErrorException { assert ( childNode != null ) ; // Find the output schema. // If the child node has an inline projection node, // then the output schema is the inline projection // node's output schema. Otherwise it's the output // schema of the childNode itself. NodeSchema childSchema = childNode . getTrueOutputSchema ( false ) ; assert ( childSchema != null ) ; NodeSchema outputSchema = getOutputSchema ( ) ; if ( outputSchema . size ( ) != childSchema . size ( ) ) { return false ; } for ( int idx = 0 ; idx < outputSchema . size ( ) ; idx += 1 ) { SchemaColumn col = outputSchema . getColumn ( idx ) ; SchemaColumn childCol = childSchema . getColumn ( idx ) ; if ( col . getValueType ( ) != childCol . getValueType ( ) ) { return false ; } if ( ! ( col . getExpression ( ) instanceof TupleValueExpression ) ) { return false ; } if ( ! ( childCol . getExpression ( ) instanceof TupleValueExpression ) ) { return false ; } TupleValueExpression tve = ( TupleValueExpression ) col . getExpression ( ) ; if ( tve . getColumnIndex ( ) != idx ) { return false ; } } return true ; }
Return true if this node unneeded if its input schema is the given one .
315
16
155,718
public void replaceChildOutputSchemaNames ( AbstractPlanNode child ) { NodeSchema childSchema = child . getTrueOutputSchema ( false ) ; NodeSchema mySchema = getOutputSchema ( ) ; assert ( childSchema . size ( ) == mySchema . size ( ) ) ; for ( int idx = 0 ; idx < childSchema . size ( ) ; idx += 1 ) { SchemaColumn cCol = childSchema . getColumn ( idx ) ; SchemaColumn myCol = mySchema . getColumn ( idx ) ; assert ( cCol . getValueType ( ) == myCol . getValueType ( ) ) ; assert ( cCol . getExpression ( ) instanceof TupleValueExpression ) ; assert ( myCol . getExpression ( ) instanceof TupleValueExpression ) ; cCol . reset ( myCol . getTableName ( ) , myCol . getTableAlias ( ) , myCol . getColumnName ( ) , myCol . getColumnAlias ( ) ) ; } }
Replace the column names output schema of the child node with the output schema column names of this node . We use this when we delete an unnecessary projection node . We only need to make sure the column names are changed since we will have checked carefully that everything else is the same .
228
56
155,719
void deliverToRepairLog ( VoltMessage msg ) { assert ( Thread . currentThread ( ) . getId ( ) == m_taskThreadId ) ; m_repairLog . deliver ( msg ) ; }
when the MpScheduler needs to log the completion of a transaction to its local repair log
44
20
155,720
private void sendInternal ( long destHSId , VoltMessage message ) { message . m_sourceHSId = getHSId ( ) ; m_messenger . send ( destHSId , message ) ; }
have a serialized order to all hosts .
44
9
155,721
public static ClientAffinityStats diff ( ClientAffinityStats newer , ClientAffinityStats older ) { if ( newer . m_partitionId != older . m_partitionId ) { throw new IllegalArgumentException ( "Can't diff these ClientAffinityStats instances." ) ; } ClientAffinityStats retval = new ClientAffinityStats ( older . m_partitionId , newer . m_affinityWrites - older . m_affinityWrites , newer . m_rrWrites - older . m_rrWrites , newer . m_affinityReads - older . m_affinityReads , newer . m_rrReads - older . m_rrReads ) ; return retval ; }
Subtract one ClientAffinityStats instance from another to produce a third .
156
16
155,722
private int addFramesForCompleteMessage ( ) { boolean added = false ; EncryptFrame frame = null ; int delta = 0 ; while ( ! added && ( frame = m_encryptedFrames . poll ( ) ) != null ) { if ( ! frame . isLast ( ) ) { //TODO: Review - I don't think this synchronized(m_partialMessages) is required. // This is the only method with synchronized(m_partialMessages) and // it doesn't look like this method will be called from multiple threads concurrently. // Take this out 8.0 release. synchronized ( m_partialMessages ) { m_partialMessages . add ( frame ) ; ++ m_partialSize ; } continue ; } final int partialSize = m_partialSize ; if ( partialSize > 0 ) { assert frame . chunks == partialSize + 1 : "partial frame buildup has wrong number of preceding pieces" ; //TODO: Review - I don't think this synchronized(m_partialMessages) is required. // See comment above. // Take this out 8.0 release. synchronized ( m_partialMessages ) { for ( EncryptFrame frm : m_partialMessages ) { m_encryptedMessages . addComponent ( true , frm . frame ) ; delta += frm . delta ; } m_partialMessages . clear ( ) ; m_partialSize = 0 ; } } m_encryptedMessages . addComponent ( true , frame . frame ) ; delta += frame . delta ; m_numEncryptedMessages += frame . msgs ; added = true ; } return added ? delta : - 1 ; }
Gather all the frames that comprise a whole Volt Message Returns the delta between the original message byte count and encrypted message byte count .
347
26
155,723
void shutdown ( ) { m_isShutdown = true ; try { int waitFor = 1 - Math . min ( m_inFlight . availablePermits ( ) , - 4 ) ; for ( int i = 0 ; i < waitFor ; ++ i ) { try { if ( m_inFlight . tryAcquire ( 1 , TimeUnit . SECONDS ) ) { m_inFlight . release ( ) ; break ; } } catch ( InterruptedException e ) { break ; } } m_ecryptgw . die ( ) ; EncryptFrame frame = null ; while ( ( frame = m_encryptedFrames . poll ( ) ) != null ) { frame . frame . release ( ) ; } for ( EncryptFrame ef : m_partialMessages ) { ef . frame . release ( ) ; } m_partialMessages . clear ( ) ; if ( m_encryptedMessages . refCnt ( ) > 0 ) m_encryptedMessages . release ( ) ; } finally { m_inFlight . drainPermits ( ) ; m_inFlight . release ( ) ; } }
Called from synchronized block only
236
6
155,724
private Runnable createCompletionTask ( final Mailbox mb ) { return new Runnable ( ) { @ Override public void run ( ) { VoltDB . instance ( ) . getHostMessenger ( ) . removeMailbox ( mb . getHSId ( ) ) ; } } ; }
Remove the mailbox from the host messenger after all data targets are done .
65
14
155,725
private Callable < Boolean > coalesceTruncationSnapshotPlan ( String file_path , String pathType , String file_nonce , long txnId , Map < Integer , Long > partitionTransactionIds , SystemProcedureExecutionContext context , VoltTable result , ExtensibleSnapshotDigestData extraSnapshotData , SiteTracker tracker , HashinatorSnapshotData hashinatorData , long timestamp , int newPartitionCount ) { final NativeSnapshotWritePlan plan = new NativeSnapshotWritePlan ( ) ; final Callable < Boolean > deferredTruncationSetup = plan . createSetupInternal ( file_path , pathType , file_nonce , txnId , partitionTransactionIds , new SnapshotRequestConfig ( newPartitionCount , context . getDatabase ( ) ) , context , result , extraSnapshotData , tracker , hashinatorData , timestamp ) ; m_taskListsForHSIds . putAll ( plan . m_taskListsForHSIds ) ; return new Callable < Boolean > ( ) { @ Override public Boolean call ( ) throws Exception { final Boolean retval = deferredTruncationSetup . call ( ) ; m_targets . addAll ( plan . m_targets ) ; return retval ; } } ; }
NativeSnapshotWritePlan to include all tables .
275
10
155,726
void killSocket ( ) { try { m_closing = true ; m_socket . setKeepAlive ( false ) ; m_socket . setSoLinger ( false , 0 ) ; Thread . sleep ( 25 ) ; m_socket . close ( ) ; Thread . sleep ( 25 ) ; System . gc ( ) ; Thread . sleep ( 25 ) ; } catch ( Exception e ) { // don't REALLY care if this fails e . printStackTrace ( ) ; } }
Used only for test code to kill this FH
103
10
155,727
void send ( final long destinations [ ] , final VoltMessage message ) { if ( ! m_isUp ) { hostLog . warn ( "Failed to send VoltMessage because connection to host " + CoreUtils . getHostIdFromHSId ( destinations [ 0 ] ) + " is closed" ) ; return ; } if ( destinations . length == 0 ) { return ; } // if this link is "gone silent" for partition tests, just drop the message on the floor if ( ! m_linkCutForTest . get ( ) ) { m_network . enqueue ( new DeferredSerialization ( ) { @ Override public final void serialize ( final ByteBuffer buf ) throws IOException { buf . putInt ( buf . capacity ( ) - 4 ) ; buf . putLong ( message . m_sourceHSId ) ; buf . putInt ( destinations . length ) ; for ( int ii = 0 ; ii < destinations . length ; ii ++ ) { buf . putLong ( destinations [ ii ] ) ; } message . flattenToBuffer ( buf ) ; buf . flip ( ) ; } @ Override public final void cancel ( ) { /* * Can this be removed? */ } @ Override public String toString ( ) { return message . getClass ( ) . getName ( ) ; } @ Override public int getSerializedSize ( ) { final int len = 4 /* length prefix */ + 8 /* source hsid */ + 4 /* destinationCount */ + 8 * destinations . length /* destination list */ + message . getSerializedSize ( ) ; return len ; } } ) ; } long current_time = EstTime . currentTimeMillis ( ) ; long current_delta = current_time - m_lastMessageMillis . get ( ) ; /* * Try and give some warning when a connection is timing out. * Allows you to observe the liveness of the host receiving the heartbeats */ if ( isPrimary ( ) && current_delta > m_logRate ) { rateLimitedLogger . log ( "Have not received a message from host " + hostnameAndIPAndPort ( ) + " for " + ( current_delta / 1000.0 ) + " seconds" , current_time ) ; } // NodeFailureFault no longer immediately trips FHInputHandler to // set m_isUp to false, so use both that and m_closing to // avoid repeat reports of a single node failure if ( ( ! m_closing && m_isUp ) && isPrimary ( ) && current_delta > m_deadHostTimeout ) { if ( m_deadReportsCount . getAndIncrement ( ) == 0 ) { hostLog . error ( "DEAD HOST DETECTED, hostname: " + hostnameAndIPAndPort ( ) ) ; hostLog . info ( "\tcurrent time: " + current_time ) ; hostLog . info ( "\tlast message: " + m_lastMessageMillis ) ; hostLog . info ( "\tdelta (millis): " + current_delta ) ; hostLog . info ( "\ttimeout value (millis): " + m_deadHostTimeout ) ; VoltDB . dropStackTrace ( "Timed out foreign host " + hostnameAndIPAndPort ( ) + " with delta " + current_delta ) ; } m_hostMessenger . reportForeignHostFailed ( m_hostId ) ; } }
Send a message to the network . This public method is re - entrant .
731
16
155,728
private void deliverMessage ( long destinationHSId , VoltMessage message ) { if ( ! m_hostMessenger . validateForeignHostId ( m_hostId ) ) { hostLog . warn ( String . format ( "Message (%s) sent to site id: %s @ (%s) at %d from %s " + "which is a known failed host. The message will be dropped\n" , message . getClass ( ) . getSimpleName ( ) , CoreUtils . hsIdToString ( destinationHSId ) , m_socket . getRemoteSocketAddress ( ) . toString ( ) , m_hostMessenger . getHostId ( ) , CoreUtils . hsIdToString ( message . m_sourceHSId ) ) ) ; return ; } Mailbox mailbox = m_hostMessenger . getMailbox ( destinationHSId ) ; /* * At this point we are OK with messages going to sites that don't exist * because we are saying that things can come and go */ if ( mailbox == null ) { hostLog . info ( String . format ( "Message (%s) sent to unknown site id: %s @ (%s) at %d from %s \n" , message . getClass ( ) . getSimpleName ( ) , CoreUtils . hsIdToString ( destinationHSId ) , m_socket . getRemoteSocketAddress ( ) . toString ( ) , m_hostMessenger . getHostId ( ) , CoreUtils . hsIdToString ( message . m_sourceHSId ) ) ) ; /* * If it is for the wrong host, that definitely isn't cool */ if ( m_hostMessenger . getHostId ( ) != ( int ) destinationHSId ) { VoltDB . crashLocalVoltDB ( "Received a message at wrong host" , false , null ) ; } return ; } // deliver the message to the mailbox mailbox . deliver ( message ) ; }
Deliver a deserialized message from the network to a local mailbox
415
14
155,729
private void handleRead ( ByteBuffer in , Connection c ) throws IOException { // port is locked by VoltNetwork when in valid use. // assert(m_port.m_lock.tryLock() == true); long recvDests [ ] = null ; final long sourceHSId = in . getLong ( ) ; final int destCount = in . getInt ( ) ; if ( destCount == POISON_PILL ) { //This is a poison pill //Ignore poison pill during shutdown, in tests we receive crash messages from //leader appointer during shutdown if ( VoltDB . instance ( ) . getMode ( ) == OperationMode . SHUTTINGDOWN ) { return ; } byte messageBytes [ ] = new byte [ in . getInt ( ) ] ; in . get ( messageBytes ) ; String message = new String ( messageBytes , "UTF-8" ) ; message = String . format ( "Fatal error from id,hostname(%d,%s): %s" , m_hostId , hostnameAndIPAndPort ( ) , message ) ; //if poison pill with particular cause handle it. int cause = in . getInt ( ) ; if ( cause == ForeignHost . CRASH_ME ) { int hid = VoltDB . instance ( ) . getHostMessenger ( ) . getHostId ( ) ; hostLog . debug ( "Poison Pill with target me was sent.: " + hid ) ; //Killing myself. VoltDB . instance ( ) . halt ( ) ; } else if ( cause == ForeignHost . CRASH_ALL || cause == ForeignHost . CRASH_SPECIFIED ) { org . voltdb . VoltDB . crashLocalVoltDB ( message , false , null ) ; } else if ( cause == ForeignHost . PRINT_STACKTRACE ) { //collect thread dumps String dumpDir = new File ( VoltDB . instance ( ) . getVoltDBRootPath ( ) , "thread_dumps" ) . getAbsolutePath ( ) ; String fileName = m_hostMessenger . getHostname ( ) + "_host-" + m_hostId + "_" + System . currentTimeMillis ( ) + ".jstack" ; VoltDB . dumpThreadTraceToFile ( dumpDir , fileName ) ; } else { //Should never come here. hostLog . error ( "Invalid Cause in poison pill: " + cause ) ; } return ; } else if ( destCount == STOPNODE_NOTICE ) { int targetHostId = in . getInt ( ) ; hostLog . info ( "Receive StopNode notice for host " + targetHostId ) ; m_hostMessenger . addStopNodeNotice ( targetHostId ) ; return ; } recvDests = new long [ destCount ] ; for ( int i = 0 ; i < destCount ; i ++ ) { recvDests [ i ] = in . getLong ( ) ; } final VoltMessage message = m_hostMessenger . getMessageFactory ( ) . createMessageFromBuffer ( in , sourceHSId ) ; // ENG-1608. We sniff for SiteFailureMessage here so // that a node will participate in the failure resolution protocol // even if it hasn't directly witnessed a node fault. if ( message instanceof SiteFailureMessage && ! ( message instanceof SiteFailureForwardMessage ) ) { SiteFailureMessage sfm = ( SiteFailureMessage ) message ; for ( FaultMessage fm : sfm . asFaultMessages ( ) ) { m_hostMessenger . relayForeignHostFailed ( fm ) ; } } for ( int i = 0 ; i < destCount ; i ++ ) { deliverMessage ( recvDests [ i ] , message ) ; } //m_lastMessageMillis = System.currentTimeMillis(); m_lastMessageMillis . lazySet ( EstTime . currentTimeMillis ( ) ) ; }
Read data from the network . Runs in the context of PicoNetwork thread when data is available .
833
20
155,730
@ Nullable private AvlNode < E > firstNode ( ) { AvlNode < E > root = rootReference . get ( ) ; if ( root == null ) { return null ; } AvlNode < E > node ; if ( range . hasLowerBound ( ) ) { E endpoint = range . getLowerEndpoint ( ) ; node = rootReference . get ( ) . ceiling ( comparator ( ) , endpoint ) ; if ( node == null ) { return null ; } if ( range . getLowerBoundType ( ) == BoundType . OPEN && comparator ( ) . compare ( endpoint , node . getElement ( ) ) == 0 ) { node = node . succ ; } } else { node = header . succ ; } return ( node == header || ! range . contains ( node . getElement ( ) ) ) ? null : node ; }
Returns the first node in the tree that is in range .
180
12
155,731
public static MediaType create ( String type , String subtype ) { return create ( type , subtype , ImmutableListMultimap . < String , String > of ( ) ) ; }
Creates a new media type with the given type and subtype .
40
14
155,732
private static int getSerializedParamSizeForApplyBinaryLog ( int streamCount , int remotePartitionCount , int concatLogSize ) { int serializedParamSize = 2 + 1 + 4 // placeholder byte[0] + 1 + 4 // producerClusterId Integer + 1 + 4 + 4 + ( 4 + 8 * remotePartitionCount ) * streamCount // concatLogIds byte[] + 1 + 4 + 4 + ( 4 + 8 + 8 + 4 + 4 + 16 ) * streamCount // concatTrackerBufs (DRConsumerDrIdTracker) byte[] + 1 + 4 + 4 + 4 * streamCount + concatLogSize // concatLogs byte[] + 1 + 1 // extraOption Byte + 1 + 4 ; // extraParameters byte[0] return serializedParamSize ; }
calculate based on BinaryLogHelper and ParameterSet . fromArrayNoCopy
177
17
155,733
void restart ( ) { // The poisoning path will, unfortunately, set this to true. Need to undo that. setNeedsRollback ( false ) ; // Also need to make sure that we get the original invocation in the first fragment // since some masters may not have seen it. m_haveDistributedInitTask = false ; m_isRestart = true ; m_haveSentfragment = false ; m_drBufferChangedAgg = 0 ; }
Used to reset the internal state of this transaction so it can be successfully restarted
96
16
155,734
@ Override public void setupProcedureResume ( int [ ] dependencies ) { // Reset state so we can run this batch cleanly m_localWork = null ; m_remoteWork = null ; m_remoteDeps = null ; m_remoteDepTables . clear ( ) ; }
Overrides needed by MpProcedureRunner
63
11
155,735
public void setupProcedureResume ( List < Integer > deps ) { setupProcedureResume ( com . google_voltpatches . common . primitives . Ints . toArray ( deps ) ) ; }
I met this List at bandcamp ...
49
8
155,736
public void restartFragment ( FragmentResponseMessage message , List < Long > masters , Map < Integer , Long > partitionMastersMap ) { final int partionId = message . getPartitionId ( ) ; Long restartHsid = partitionMastersMap . get ( partionId ) ; Long hsid = message . getExecutorSiteId ( ) ; if ( ! hsid . equals ( restartHsid ) ) { m_masterMapForFragmentRestart . clear ( ) ; m_masterMapForFragmentRestart . put ( restartHsid , hsid ) ; //The very first fragment is to be rerouted to the new leader, then all the follow-up fragments are routed //to new leaders. updateMasters ( masters , partitionMastersMap ) ; } if ( restartHsid == null ) { restartHsid = hsid ; } if ( tmLog . isDebugEnabled ( ) ) { tmLog . debug ( "Rerouted fragment from " + CoreUtils . hsIdToString ( hsid ) + " to " + CoreUtils . hsIdToString ( restartHsid ) + "\n" + m_remoteWork ) ; } m_fragmentRestarted = true ; m_mbox . send ( restartHsid , m_remoteWork ) ; }
Restart this fragment after the fragment is mis - routed from MigratePartitionLeader If the masters have been updated the fragment will be routed to its new master . The fragment will be routed to the old master . until new master is updated .
283
49
155,737
private boolean checkNewUniqueIndex ( Index newIndex ) { Table table = ( Table ) newIndex . getParent ( ) ; CatalogMap < Index > existingIndexes = m_originalIndexesByTable . get ( table . getTypeName ( ) ) ; for ( Index existingIndex : existingIndexes ) { if ( indexCovers ( newIndex , existingIndex ) ) { return true ; } } return false ; }
Check if there is a unique index that exists in the old catalog that is covered by the new index . That would mean adding this index can t fail with a duplicate key .
88
35
155,738
private String createViewDisallowedMessage ( String viewName , String singleTableName ) { boolean singleTable = ( singleTableName != null ) ; return String . format ( "Unable to create %sview %s %sbecause the view definition uses operations that cannot always be applied if %s." , ( singleTable ? "single table " : "multi-table " ) , viewName , ( singleTable ? String . format ( "on table %s " , singleTableName ) : "" ) , ( singleTable ? "the table already contains data" : "none of the source tables are empty" ) ) ; }
Return an error message asserting that we cannot create a view with a given name .
130
16
155,739
private TablePopulationRequirements getMVHandlerInfoMessage ( MaterializedViewHandlerInfo mvh ) { if ( ! mvh . getIssafewithnonemptysources ( ) ) { TablePopulationRequirements retval ; String viewName = mvh . getDesttable ( ) . getTypeName ( ) ; String errorMessage = createViewDisallowedMessage ( viewName , null ) ; retval = new TablePopulationRequirements ( viewName ) ; retval . setErrorMessage ( errorMessage ) ; for ( TableRef tref : mvh . getSourcetables ( ) ) { String tableName = tref . getTable ( ) . getTypeName ( ) ; retval . addTableName ( tableName ) ; } return retval ; } return null ; }
Check a MaterializedViewHandlerInfo object for safety . Return an object with table population requirements on the table for it to be allowed . The return object if it is non - null will have a set of names of tables one of which must be empty if the view can be created . It will also have an error message .
166
65
155,740
private void writeModification ( CatalogType newType , CatalogType prevType , String field ) { // Don't write modifications if the field can be ignored if ( checkModifyIgnoreList ( newType , prevType , field ) ) { return ; } // verify this is possible, write an error and mark return code false if so String errorMessage = checkModifyWhitelist ( newType , prevType , field ) ; // if it's not possible with non-empty tables, check for possible with empty tables if ( errorMessage != null ) { List < TablePopulationRequirements > responseList = checkModifyIfTableIsEmptyWhitelist ( newType , prevType , field ) ; // handle all the error messages and state from the modify check processModifyResponses ( errorMessage , responseList ) ; } if ( ! m_requiresCatalogDiffCmdsApplyToEE && checkCatalogDiffShouldApplyToEE ( newType ) ) { m_requiresCatalogDiffCmdsApplyToEE = true ; } // write the commands to make it so // they will be ignored if the change is unsupported m_serializer . writeCommandForField ( newType , field , true ) ; // record the field change for later generation of descriptive text // though skip the schema field of database because it changes all the time // and the diff will be caught elsewhere // need a better way to generalize this if ( ( newType instanceof Database ) && field . equals ( "schema" ) ) { return ; } CatalogChangeGroup cgrp = m_changes . get ( DiffClass . get ( newType ) ) ; cgrp . processChange ( newType , prevType , field ) ; }
Add a modification
352
3
155,741
protected static boolean checkCatalogDiffShouldApplyToEE ( final CatalogType suspect ) { // Warning: // This check list should be consistent with catalog items defined in EE // Once a new catalog type is added in EE, we should add it here. if ( suspect instanceof Cluster || suspect instanceof Database ) { return true ; } // Information about user-defined functions need to be applied to EE. // Because the EE needs to know about the parameter types and the return type to do // many type casting operations. if ( suspect instanceof Function ) { return true ; } if ( suspect instanceof Table || suspect instanceof TableRef || suspect instanceof Column || suspect instanceof ColumnRef || suspect instanceof Index || suspect instanceof IndexRef || suspect instanceof Constraint || suspect instanceof ConstraintRef || suspect instanceof MaterializedViewInfo || suspect instanceof MaterializedViewHandlerInfo ) { return true ; } // Statement can be children of Table or MaterilizedViewInfo, which should apply to EE // But if they are under Procedure, we can skip them. if ( suspect instanceof Statement && ( suspect . getParent ( ) instanceof Procedure == false ) ) { return true ; } // PlanFragment is a similar case like Statement if ( suspect instanceof PlanFragment && suspect . getParent ( ) instanceof Statement && ( suspect . getParent ( ) . getParent ( ) instanceof Procedure == false ) ) { return true ; } if ( suspect instanceof Connector || suspect instanceof ConnectorProperty || suspect instanceof ConnectorTableInfo ) { // export table related change, should not skip EE return true ; } // The other changes in the catalog will not be applied to EE, // including User, Group, Procedures, etc return false ; }
Our EE has a list of Catalog items that are in use but Java catalog contains much more . Some of the catalog diff commands will only be useful to Java . So this function will decide whether the
365
39
155,742
private void processModifyResponses ( String errorMessage , List < TablePopulationRequirements > responseList ) { assert ( errorMessage != null ) ; // if no requirements, then it's just not possible if ( responseList == null ) { m_supported = false ; m_errors . append ( errorMessage + "\n" ) ; return ; } // otherwise, it's possible if a specific table is empty // collect the error message(s) and decide if it can be done inside @UAC for ( TablePopulationRequirements response : responseList ) { String objectName = response . getObjectName ( ) ; String nonEmptyErrorMessage = response . getErrorMessage ( ) ; assert ( nonEmptyErrorMessage != null ) ; TablePopulationRequirements popreq = m_tablesThatMustBeEmpty . get ( objectName ) ; if ( popreq == null ) { popreq = response ; m_tablesThatMustBeEmpty . put ( objectName , popreq ) ; } else { String newErrorMessage = popreq . getErrorMessage ( ) + "\n " + response . getErrorMessage ( ) ; popreq . setErrorMessage ( newErrorMessage ) ; } } }
After we decide we can t modify add or delete something on a full table we do a check to see if we can do that on an empty table . The original error and any response from the empty table check is processed here . This code is basically in this method so it s not repeated 3 times for modify add and delete . See where it s called for context . If the responseList equals null it is not possible to modify otherwise we do the check described above for every element in the responseList if there is no element in the responseList it means no tables must be empty which is totally fine .
247
121
155,743
private void writeDeletion ( CatalogType prevType , CatalogType newlyChildlessParent , String mapName ) { // Don't write deletions if the field can be ignored if ( checkDeleteIgnoreList ( prevType , newlyChildlessParent , mapName , prevType . getTypeName ( ) ) ) { return ; } // verify this is possible, write an error and mark return code false if so String errorMessage = checkAddDropWhitelist ( prevType , ChangeType . DELETION ) ; // if it's not possible with non-empty tables, check for possible with empty tables if ( errorMessage != null ) { TablePopulationRequirements response = checkAddDropIfTableIsEmptyWhitelist ( prevType , ChangeType . DELETION ) ; List < TablePopulationRequirements > responseList = null ; if ( response != null ) { responseList = Collections . singletonList ( response ) ; } processModifyResponses ( errorMessage , responseList ) ; } if ( ! m_requiresCatalogDiffCmdsApplyToEE && checkCatalogDiffShouldApplyToEE ( prevType ) ) { m_requiresCatalogDiffCmdsApplyToEE = true ; } // write the commands to make it so // they will be ignored if the change is unsupported m_serializer . writeDeleteDiffStatement ( prevType , mapName ) ; // add it to the set of deletions to later compute descriptive text CatalogChangeGroup cgrp = m_changes . get ( DiffClass . get ( prevType ) ) ; cgrp . processDeletion ( prevType , newlyChildlessParent ) ; }
Add a deletion
336
3
155,744
private void writeAddition ( CatalogType newType ) { // Don't write additions if the field can be ignored if ( checkAddIgnoreList ( newType ) ) { return ; } // verify this is possible, write an error and mark return code false if so String errorMessage = checkAddDropWhitelist ( newType , ChangeType . ADDITION ) ; // if it's not possible with non-empty tables, check for possible with empty tables if ( errorMessage != null ) { TablePopulationRequirements response = checkAddDropIfTableIsEmptyWhitelist ( newType , ChangeType . ADDITION ) ; // handle all the error messages and state from the modify check List < TablePopulationRequirements > responseList = null ; if ( response != null ) { responseList = Collections . singletonList ( response ) ; } processModifyResponses ( errorMessage , responseList ) ; } if ( ! m_requiresCatalogDiffCmdsApplyToEE && checkCatalogDiffShouldApplyToEE ( newType ) ) { m_requiresCatalogDiffCmdsApplyToEE = true ; } // write the commands to make it so // they will be ignored if the change is unsupported newType . accept ( m_serializer ) ; // add it to the set of additions to later compute descriptive text CatalogChangeGroup cgrp = m_changes . get ( DiffClass . get ( newType ) ) ; cgrp . processAddition ( newType ) ; }
Add an addition
303
3
155,745
private void getCommandsToDiff ( String mapName , CatalogMap < ? extends CatalogType > prevMap , CatalogMap < ? extends CatalogType > newMap ) { assert ( prevMap != null ) ; assert ( newMap != null ) ; // in previous, not in new for ( CatalogType prevType : prevMap ) { String name = prevType . getTypeName ( ) ; CatalogType newType = newMap . get ( name ) ; if ( newType == null ) { writeDeletion ( prevType , newMap . m_parent , mapName ) ; continue ; } diffRecursively ( prevType , newType ) ; } // in new, not in previous for ( CatalogType newType : newMap ) { CatalogType prevType = prevMap . get ( newType . getTypeName ( ) ) ; if ( prevType != null ) continue ; writeAddition ( newType ) ; } }
Check if all the children in prevMap are present and identical in newMap . Then check if anything is in newMap that isn t in prevMap .
194
31
155,746
public String getSQL ( ) { StringBuffer sb = new StringBuffer ( 64 ) ; switch ( opType ) { case OpTypes . VALUE : if ( valueData == null ) { return Tokens . T_NULL ; } return dataType . convertToSQLString ( valueData ) ; case OpTypes . ROW : sb . append ( ' ' ) ; for ( int i = 0 ; i < nodes . length ; i ++ ) { sb . append ( nodes [ i ] . getSQL ( ) ) ; if ( i < nodes . length - 1 ) { sb . append ( ' ' ) ; } } sb . append ( ' ' ) ; return sb . toString ( ) ; // case OpTypes . TABLE : for ( int i = 0 ; i < nodes . length ; i ++ ) { sb . append ( nodes [ i ] . getSQL ( ) ) ; if ( i < nodes . length - 1 ) { sb . append ( ' ' ) ; } } return sb . toString ( ) ; } switch ( opType ) { case OpTypes . ROW_SUBQUERY : case OpTypes . TABLE_SUBQUERY : /* buf.append('('); buf.append(subSelect.getSQL()); buf.append(')'); */ break ; default : throw Error . runtimeError ( ErrorCode . U_S0500 , "Expression" ) ; } return sb . toString ( ) ; }
For use with CHECK constraints . Under development .
311
10
155,747
void setDataType ( Session session , Type type ) { if ( opType == OpTypes . VALUE ) { valueData = type . convertToType ( session , valueData , dataType ) ; } dataType = type ; }
Set the data type
49
4
155,748
Expression replaceAliasInOrderBy ( Expression [ ] columns , int length ) { for ( int i = 0 ; i < nodes . length ; i ++ ) { if ( nodes [ i ] == null ) { continue ; } nodes [ i ] = nodes [ i ] . replaceAliasInOrderBy ( columns , length ) ; } return this ; }
return the expression for an alias used in an ORDER BY clause
73
12
155,749
public HsqlList resolveColumnReferences ( RangeVariable [ ] rangeVarArray , HsqlList unresolvedSet ) { return resolveColumnReferences ( rangeVarArray , rangeVarArray . length , unresolvedSet , true ) ; }
resolve tables and collect unresolved column expressions
45
8
155,750
void insertValuesIntoSubqueryTable ( Session session , PersistentStore store ) { for ( int i = 0 ; i < nodes . length ; i ++ ) { Object [ ] data = nodes [ i ] . getRowValue ( session ) ; for ( int j = 0 ; j < nodeDataTypes . length ; j ++ ) { data [ j ] = nodeDataTypes [ j ] . convertToType ( session , data [ j ] , nodes [ i ] . nodes [ j ] . dataType ) ; } Row row = ( Row ) store . getNewCachedObject ( session , data ) ; try { store . indexRow ( session , row ) ; } //XXX: what conditions are being casually ignored here? catch ( HsqlException e ) { } } }
Details of IN condition optimisation for 1 . 9 . 0 Predicates with SELECT are QUERY expressions
163
20
155,751
static QuerySpecification getCheckSelect ( Session session , Table t , Expression e ) { CompileContext compileContext = new CompileContext ( session ) ; QuerySpecification s = new QuerySpecification ( compileContext ) ; s . exprColumns = new Expression [ 1 ] ; s . exprColumns [ 0 ] = EXPR_TRUE ; RangeVariable range = new RangeVariable ( t , null , null , null , compileContext ) ; s . rangeVariables = new RangeVariable [ ] { range } ; HsqlList unresolved = e . resolveColumnReferences ( s . rangeVariables , null ) ; ExpressionColumn . checkColumnsResolved ( unresolved ) ; e . resolveTypes ( session , null ) ; if ( Type . SQL_BOOLEAN != e . getDataType ( ) ) { throw Error . error ( ErrorCode . X_42568 ) ; } Expression condition = new ExpressionLogical ( OpTypes . NOT , e ) ; s . queryCondition = condition ; s . resolveReferences ( session ) ; s . resolveTypes ( session ) ; return s ; }
Returns a Select object that can be used for checking the contents of an existing table against the given CHECK search condition .
228
24
155,752
static void collectAllExpressions ( HsqlList set , Expression e , OrderedIntHashSet typeSet , OrderedIntHashSet stopAtTypeSet ) { if ( e == null ) { return ; } if ( stopAtTypeSet . contains ( e . opType ) ) { return ; } for ( int i = 0 ; i < e . nodes . length ; i ++ ) { collectAllExpressions ( set , e . nodes [ i ] , typeSet , stopAtTypeSet ) ; } if ( typeSet . contains ( e . opType ) ) { set . add ( e ) ; } if ( e . subQuery != null && e . subQuery . queryExpression != null ) { e . subQuery . queryExpression . collectAllExpressions ( set , typeSet , stopAtTypeSet ) ; } }
collect all extrassions of a set of expression types appearing anywhere in a select statement and its subselects etc .
176
24
155,753
protected String getUniqueId ( final Session session ) { if ( cached_id != null ) { return cached_id ; } // // Calculated an new Id // // this line ripped from the "describe" method // seems to help with some types like "equal" cached_id = new String ( ) ; // // If object is a leaf node, then we'll use John's original code... // Otherwise we need to generate and Id based on what our children are // if ( getType ( ) != OpTypes . VALUE && getType ( ) != OpTypes . COLUMN ) { // // Horribly inefficient, but it works for now... // traverse ( this , session ) ; } long nodeId = session . getNodeIdForExpression ( this ) ; cached_id = Long . toString ( nodeId ) ; return cached_id ; }
Get the hex address of this Expression Object in memory to be used as a unique identifier .
180
18
155,754
private VoltXMLElement convertUsingColumnrefToCoaleseExpression ( Session session , VoltXMLElement exp , Type dataType ) throws org . hsqldb_voltpatches . HSQLInterface . HSQLParseException { // Hsql has check dataType can not be null. assert ( dataType != null ) ; exp . attributes . put ( "valuetype" , dataType . getNameString ( ) ) ; // Extract unique columnref HashSet < String > tables = new HashSet <> ( ) ; ArrayDeque < VoltXMLElement > uniqueColumnrefs = new ArrayDeque <> ( ) ; for ( VoltXMLElement columnref : exp . children ) { String table = columnref . attributes . get ( "table" ) ; String tableAlias = columnref . attributes . get ( "tablealias" ) ; assert ( table != null ) ; String tableOrAlias = ( tableAlias == null ) ? table : tableAlias ; if ( tables . contains ( tableOrAlias ) ) { continue ; } tables . add ( tableOrAlias ) ; uniqueColumnrefs . add ( columnref ) ; } // Delete original children exp . children . clear ( ) ; // There should be at least 2 columnref expressions assert ( uniqueColumnrefs . size ( ) > 1 ) ; VoltXMLElement lastAlternativeExpr = null ; VoltXMLElement resultColaesceExpr = null ; while ( true ) { VoltXMLElement next = uniqueColumnrefs . pop ( ) ; if ( uniqueColumnrefs . isEmpty ( ) ) { // Last columnref. Simply plug it in to the last THEN Expression assert ( lastAlternativeExpr != null ) ; // Add next as the first child lastAlternativeExpr . children . add ( 0 , next ) ; break ; } // IS_NULL expression VoltXMLElement isnull_expr = prototypes . get ( OpTypes . IS_NULL ) ; if ( isnull_expr == null ) { throwForUnsupportedExpression ( OpTypes . IS_NULL ) ; } isnull_expr = isnull_expr . duplicate ( ) ; isnull_expr . attributes . put ( "id" , this . getUniqueId ( session ) ) ; isnull_expr . children . add ( next ) ; // Alternative expression VoltXMLElement alt_expr = prototypes . get ( OpTypes . ALTERNATIVE ) ; if ( alt_expr == null ) { throwForUnsupportedExpression ( OpTypes . ALTERNATIVE ) ; } alt_expr = alt_expr . duplicate ( ) ; alt_expr . attributes . put ( "id" , this . getUniqueId ( session ) ) ; alt_expr . attributes . put ( "valuetype" , dataType . getNameString ( ) ) ; // The next expression should be a second child // but for now we keep it as the first one alt_expr . children . add ( next ) ; // COALESCE expression VoltXMLElement coalesceExpr = exp . duplicate ( ) ; coalesceExpr . attributes . put ( "alias" , next . attributes . get ( "alias" ) ) ; coalesceExpr . attributes . put ( "column" , next . attributes . get ( "column" ) ) ; // Add IS NULL and ALTERNATIVE expressions to the coalesceExpr coalesceExpr . children . add ( isnull_expr ) ; coalesceExpr . children . add ( alt_expr ) ; if ( resultColaesceExpr == null ) { resultColaesceExpr = coalesceExpr ; } else { assert ( lastAlternativeExpr != null ) ; // Add coalesceExpr as the first child to the last alternative expression lastAlternativeExpr . children . add ( 0 , coalesceExpr ) ; } lastAlternativeExpr = alt_expr ; } assert ( resultColaesceExpr != null ) ; return resultColaesceExpr ; }
columnref T1 . C
839
6
155,755
private void appendOptionGroup ( StringBuffer buff , OptionGroup group ) { if ( ! group . isRequired ( ) ) { buff . append ( "[" ) ; } List < Option > optList = new ArrayList < Option > ( group . getOptions ( ) ) ; if ( getOptionComparator ( ) != null ) { Collections . sort ( optList , getOptionComparator ( ) ) ; } // for each option in the OptionGroup for ( Iterator < Option > it = optList . iterator ( ) ; it . hasNext ( ) ; ) { // whether the option is required or not is handled at group level appendOption ( buff , it . next ( ) , true ) ; if ( it . hasNext ( ) ) { buff . append ( " | " ) ; } } if ( ! group . isRequired ( ) ) { buff . append ( "]" ) ; } }
Appends the usage clause for an OptionGroup to a StringBuffer . The clause is wrapped in square brackets if the group is required . The display of the options is handled by appendOption
187
37
155,756
private void appendOption ( StringBuffer buff , Option option , boolean required ) { if ( ! required ) { buff . append ( "[" ) ; } if ( option . getOpt ( ) != null ) { buff . append ( "-" ) . append ( option . getOpt ( ) ) ; } else { buff . append ( "--" ) . append ( option . getLongOpt ( ) ) ; } // if the Option has a value and a non blank argname if ( option . hasArg ( ) && ( option . getArgName ( ) == null || option . getArgName ( ) . length ( ) != 0 ) ) { buff . append ( option . getOpt ( ) == null ? longOptSeparator : " " ) ; buff . append ( "<" ) . append ( option . getArgName ( ) != null ? option . getArgName ( ) : getArgName ( ) ) . append ( ">" ) ; } // if the Option is not a required option if ( ! required ) { buff . append ( "]" ) ; } }
Appends the usage clause for an Option to a StringBuffer .
224
13
155,757
public void printUsage ( PrintWriter pw , int width , String cmdLineSyntax ) { int argPos = cmdLineSyntax . indexOf ( ' ' ) + 1 ; printWrapped ( pw , width , getSyntaxPrefix ( ) . length ( ) + argPos , getSyntaxPrefix ( ) + cmdLineSyntax ) ; }
Print the cmdLineSyntax to the specified writer using the specified width .
77
15
155,758
protected StringBuffer renderOptions ( StringBuffer sb , int width , Options options , int leftPad , int descPad ) { final String lpad = createPadding ( leftPad ) ; final String dpad = createPadding ( descPad ) ; // first create list containing only <lpad>-a,--aaa where // -a is opt and --aaa is long opt; in parallel look for // the longest opt string this list will be then used to // sort options ascending int max = 0 ; List < StringBuffer > prefixList = new ArrayList < StringBuffer > ( ) ; List < Option > optList = options . helpOptions ( ) ; if ( getOptionComparator ( ) != null ) { Collections . sort ( optList , getOptionComparator ( ) ) ; } for ( Option option : optList ) { StringBuffer optBuf = new StringBuffer ( ) ; if ( option . getOpt ( ) == null ) { optBuf . append ( lpad ) . append ( " " ) . append ( getLongOptPrefix ( ) ) . append ( option . getLongOpt ( ) ) ; } else { optBuf . append ( lpad ) . append ( getOptPrefix ( ) ) . append ( option . getOpt ( ) ) ; if ( option . hasLongOpt ( ) ) { optBuf . append ( ' ' ) . append ( getLongOptPrefix ( ) ) . append ( option . getLongOpt ( ) ) ; } } if ( option . hasArg ( ) ) { String argName = option . getArgName ( ) ; if ( argName != null && argName . length ( ) == 0 ) { // if the option has a blank argname optBuf . append ( ' ' ) ; } else { optBuf . append ( option . hasLongOpt ( ) ? longOptSeparator : " " ) ; optBuf . append ( "<" ) . append ( argName != null ? option . getArgName ( ) : getArgName ( ) ) . append ( ">" ) ; } } prefixList . add ( optBuf ) ; max = optBuf . length ( ) > max ? optBuf . length ( ) : max ; } int x = 0 ; for ( Iterator < Option > it = optList . iterator ( ) ; it . hasNext ( ) ; ) { Option option = it . next ( ) ; StringBuilder optBuf = new StringBuilder ( prefixList . get ( x ++ ) . toString ( ) ) ; if ( optBuf . length ( ) < max ) { optBuf . append ( createPadding ( max - optBuf . length ( ) ) ) ; } optBuf . append ( dpad ) ; int nextLineTabStop = max + descPad ; if ( option . getDescription ( ) != null ) { optBuf . append ( option . getDescription ( ) ) ; } renderWrappedText ( sb , width , nextLineTabStop , optBuf . toString ( ) ) ; if ( it . hasNext ( ) ) { sb . append ( getNewLine ( ) ) ; } } return sb ; }
Render the specified Options and return the rendered Options in a StringBuffer .
674
14
155,759
protected StringBuffer renderWrappedText ( StringBuffer sb , int width , int nextLineTabStop , String text ) { int pos = findWrapPos ( text , width , 0 ) ; if ( pos == - 1 ) { sb . append ( rtrim ( text ) ) ; return sb ; } sb . append ( rtrim ( text . substring ( 0 , pos ) ) ) . append ( getNewLine ( ) ) ; if ( nextLineTabStop >= width ) { // stops infinite loop happening nextLineTabStop = 1 ; } // all following lines must be padded with nextLineTabStop space characters final String padding = createPadding ( nextLineTabStop ) ; while ( true ) { text = padding + text . substring ( pos ) . trim ( ) ; pos = findWrapPos ( text , width , 0 ) ; if ( pos == - 1 ) { sb . append ( text ) ; return sb ; } if ( text . length ( ) > width && pos == nextLineTabStop - 1 ) { pos = width ; } sb . append ( rtrim ( text . substring ( 0 , pos ) ) ) . append ( getNewLine ( ) ) ; } }
Render the specified text and return the rendered Options in a StringBuffer .
261
14
155,760
private static boolean functionMatches ( FunctionDescriptor existingFd , Type returnType , Type [ ] parameterTypes ) { if ( returnType != existingFd . m_type ) { return false ; } if ( parameterTypes . length != existingFd . m_paramTypes . length ) { return false ; } for ( int idx = 0 ; idx < parameterTypes . length ; idx ++ ) { if ( parameterTypes [ idx ] != existingFd . m_paramTypes [ idx ] ) { return false ; } } return true ; }
Return true iff the existing function descriptor matches the given return type and parameter types . These are all HSQLDB types not Volt types .
120
28
155,761
private static FunctionDescriptor findFunction ( String functionName , Type returnType , Type [ ] parameterType ) { m_logger . debug ( "Looking for UDF " + functionName ) ; FunctionDescriptor fd = FunctionDescriptor . m_by_LC_name . get ( functionName ) ; if ( fd == null ) { m_logger . debug ( " Not defined in by_LC_name. Maybe it's saved." ) ; fd = FunctionDescriptor . m_saved_functions . get ( functionName ) ; } if ( fd != null && functionMatches ( fd , returnType , parameterType ) ) { m_logger . debug ( " " + functionName + " is defined or saved. id == " + fd . getId ( ) ) ; return fd ; } m_logger . debug ( " " + functionName + " is not defined or saved." ) ; return null ; }
Given a function name and signature find if there is an existing definition or saved defintion which matches the name and signature and return the definition .
208
29
155,762
public static synchronized int registerTokenForUDF ( String functionName , int functionId , VoltType voltReturnType , VoltType [ ] voltParameterTypes ) { int retFunctionId ; Type hsqlReturnType = hsqlTypeFromVoltType ( voltReturnType ) ; Type [ ] hsqlParameterTypes = hsqlTypeFromVoltType ( voltParameterTypes ) ; // If the token is already registered in the map, do not bother again. FunctionDescriptor oldFd = findFunction ( functionName , hsqlReturnType , hsqlParameterTypes ) ; if ( oldFd != null ) { // This may replace functionName with itself. This will not be an error. FunctionDescriptor . addDefinedFunction ( functionName , oldFd ) ; retFunctionId = oldFd . getId ( ) ; // If we were given a non-negative function id, it // was defined in the catalog. Our re-verification here // should have a value which we put into the catalog sometime // earlier. So, this earlier value should match the one we // were told to return. assert ( ( functionId < 0 ) || ( functionId == retFunctionId ) ) ; } else { // if the function was not already defined, then // if functionId is a valid UDF id or pre-defined SQL function id, then use it // otherwise, we want a new number. // if ( functionId > 0 ) { retFunctionId = functionId ; } else { retFunctionId = getNextFunctionId ( ) ; } FunctionDescriptor fd = makeFunctionDescriptorFromParts ( functionName , retFunctionId , hsqlReturnType , hsqlParameterTypes ) ; // if the function id belongs to UDF, put it into the defined_function map if ( isUserDefinedFunctionId ( retFunctionId ) ) { FunctionDescriptor . addDefinedFunction ( functionName , fd ) ; } m_logger . debug ( String . format ( "Added UDF \"%s\"(%d) with %d parameters" , functionName , retFunctionId , voltParameterTypes . length ) ) ; } // Ensure that m_udfSeqId is larger than all the // ones we've seen so far. if ( m_udfSeqId <= retFunctionId ) { m_udfSeqId = retFunctionId + 1 ; } return retFunctionId ; }
This function registers a UDF using VoltType values for the return type and parameter types .
513
18
155,763
public static Type hsqlTypeFromVoltType ( VoltType voltReturnType ) { Class < ? > typeClass = VoltType . classFromByteValue ( voltReturnType . getValue ( ) ) ; int typeNo = Types . getParameterSQLTypeNumber ( typeClass ) ; return Type . getDefaultTypeWithSize ( typeNo ) ; }
Convert a VoltType to an HSQL type .
73
11
155,764
public static Type [ ] hsqlTypeFromVoltType ( VoltType [ ] voltParameterTypes ) { Type [ ] answer = new Type [ voltParameterTypes . length ] ; for ( int idx = 0 ; idx < voltParameterTypes . length ; idx ++ ) { answer [ idx ] = hsqlTypeFromVoltType ( voltParameterTypes [ idx ] ) ; } return answer ; }
Map the single parameter hsqlTypeFromVoltType over an array .
87
15
155,765
void setNewNodes ( ) { int index = tTable . getIndexCount ( ) ; nPrimaryNode = new NodeAVLMemoryPointer ( this ) ; NodeAVL n = nPrimaryNode ; for ( int i = 1 ; i < index ; i ++ ) { n . nNext = new NodeAVLMemoryPointer ( this ) ; n = n . nNext ; } }
Used when data is read from the disk into the Cache the first time . New Nodes are created which are then indexed .
84
25
155,766
private void bufferCatchup ( int messageSize ) throws IOException { // If the current buffer has too many tasks logged, queue it and // create a new one. if ( m_tail != null && m_tail . size ( ) > 0 && messageSize > m_bufferHeadroom ) { // compile the invocation buffer m_tail . compile ( ) ; final RejoinTaskBuffer boundTail = m_tail ; final Runnable r = new Runnable ( ) { @ Override public void run ( ) { try { m_buffers . offer ( boundTail . getContainer ( ) ) ; if ( m_reader . sizeInBytes ( ) > m_overflowLimit * 1024 * 1024 ) { // we can never catch up, should break rejoin. VoltDB . crashLocalVoltDB ( "On-disk task log is full. Please reduce " + "workload and try live rejoin again, or use blocking rejoin." ) ; } } catch ( Throwable t ) { VoltDB . crashLocalVoltDB ( "Error in task log buffering transactions" , true , t ) ; } } } ; m_es . execute ( r ) ; // Reset m_tail = null ; m_tasksPendingInCurrentTail = 0 ; } // create a new buffer if ( m_tail == null ) { m_tail = new RejoinTaskBuffer ( m_partitionId , messageSize ) ; m_bufferHeadroom = RejoinTaskBuffer . DEFAULT_BUFFER_SIZE ; } }
The buffers are bound by the number of tasks in them . Once the current buffer has enough tasks it will be queued and a new buffer will be created .
328
32
155,767
@ Override public TransactionInfoBaseMessage getNextMessage ( ) throws IOException { if ( m_closed ) { throw new IOException ( "Closed" ) ; } if ( m_head == null ) { // Get another buffer asynchronously final Runnable r = new Runnable ( ) { @ Override public void run ( ) { try { BBContainer cont = m_reader . poll ( PersistentBinaryDeque . UNSAFE_CONTAINER_FACTORY ) ; if ( cont != null ) { m_headBuffers . offer ( new RejoinTaskBuffer ( cont ) ) ; } } catch ( Throwable t ) { VoltDB . crashLocalVoltDB ( "Error retrieving buffer data in task log" , true , t ) ; } finally { m_pendingPolls . decrementAndGet ( ) ; } } } ; //Always keep three buffers ready to go for ( int ii = m_pendingPolls . get ( ) + m_headBuffers . size ( ) ; ii < 3 ; ii ++ ) { m_pendingPolls . incrementAndGet ( ) ; m_es . execute ( r ) ; } m_head = m_headBuffers . poll ( ) ; } TransactionInfoBaseMessage nextTask = null ; if ( m_head != null ) { nextTask = m_head . nextTask ( ) ; if ( nextTask == null ) { scheduleDiscard ( m_head ) ; // current buffer is completely consumed, move to the next m_head = null ; } else { m_taskCount -- ; } } else if ( ( m_taskCount - m_tasksPendingInCurrentTail == 0 ) && m_tail != null ) { m_tasksPendingInCurrentTail = 0 ; /* * there is only one buffer left which hasn't been pushed into the * queue yet. set it to head directly, short-circuiting the queue. */ m_tail . compile ( ) ; if ( m_head != null ) { scheduleDiscard ( m_head ) ; } m_head = m_tail ; m_tail = null ; nextTask = getNextMessage ( ) ; } // SPs or fragments that's before the actual snapshot fragment may end up in the task log, // because there can be multiple snapshot fragments enabling the task log due to snapshot // collision. Need to filter tasks here based on their spHandles. if ( nextTask != null && nextTask . getSpHandle ( ) > m_snapshotSpHandle ) { return nextTask ; } else { return null ; } }
Try to get the next task message from the queue .
555
11
155,768
void sendBufferSync ( ByteBuffer bb ) { try { /* configure socket to be blocking * so that we dont have to do write in * a tight while loop */ sock . configureBlocking ( true ) ; if ( bb != closeConn ) { if ( sock != null ) { sock . write ( bb ) ; } packetSent ( ) ; } } catch ( IOException ie ) { LOG . error ( "Error sending data synchronously " , ie ) ; } }
send buffer without using the asynchronous calls to selector and then close the socket
100
14
155,769
private void cleanupWriterSocket ( PrintWriter pwriter ) { try { if ( pwriter != null ) { pwriter . flush ( ) ; pwriter . close ( ) ; } } catch ( Exception e ) { LOG . info ( "Error closing PrintWriter " , e ) ; } finally { try { close ( ) ; } catch ( Exception e ) { LOG . error ( "Error closing a command socket " , e ) ; } } }
clean up the socket related to a command and also make sure we flush the data before we do that
92
20
155,770
private boolean readLength ( SelectionKey k ) throws IOException { // Read the length, now get the buffer int len = lenBuffer . getInt ( ) ; if ( ! initialized && checkFourLetterWord ( k , len ) ) { return false ; } if ( len < 0 || len > BinaryInputArchive . maxBuffer ) { throw new IOException ( "Len error " + len ) ; } if ( zk == null ) { throw new IOException ( "ZooKeeperServer not running" ) ; } incomingBuffer = ByteBuffer . allocate ( len ) ; return true ; }
Reads the first 4 bytes of lenBuffer which could be true length or four letter word .
124
19
155,771
private void closeSock ( ) { if ( sock == null ) { return ; } LOG . debug ( "Closed socket connection for client " + sock . socket ( ) . getRemoteSocketAddress ( ) + ( sessionId != 0 ? " which had sessionid 0x" + Long . toHexString ( sessionId ) : " (no session established for client)" ) ) ; try { /* * The following sequence of code is stupid! You would think that * only sock.close() is needed, but alas, it doesn't work that way. * If you just do sock.close() there are cases where the socket * doesn't actually close... */ sock . socket ( ) . shutdownOutput ( ) ; } catch ( IOException e ) { // This is a relatively common exception that we can't avoid if ( LOG . isDebugEnabled ( ) ) { LOG . debug ( "ignoring exception during output shutdown" , e ) ; } } try { sock . socket ( ) . shutdownInput ( ) ; } catch ( IOException e ) { // This is a relatively common exception that we can't avoid if ( LOG . isDebugEnabled ( ) ) { LOG . debug ( "ignoring exception during input shutdown" , e ) ; } } try { sock . socket ( ) . close ( ) ; } catch ( IOException e ) { if ( LOG . isDebugEnabled ( ) ) { LOG . debug ( "ignoring exception during socket close" , e ) ; } } try { sock . close ( ) ; // XXX The next line doesn't seem to be needed, but some posts // to forums suggest that it is needed. Keep in mind if errors in // this section arise. // factory.selector.wakeup(); } catch ( IOException e ) { if ( LOG . isDebugEnabled ( ) ) { LOG . debug ( "ignoring exception during socketchannel close" , e ) ; } } sock = null ; }
Close resources associated with the sock of this cnxn .
404
13
155,772
void increment ( ) { long id = rand . nextInt ( config . tuples ) ; long toIncrement = rand . nextInt ( 5 ) ; // 0 - 4 try { client . callProcedure ( new CMCallback ( ) , "Increment" , toIncrement , id ) ; } catch ( IOException e ) { // This is not ideal error handling for production, but should be // harmless in a benchmark like this try { Thread . sleep ( 50 ) ; } catch ( Exception e2 ) { } } }
Run the Increment procedure on the server asynchronously .
113
12
155,773
public synchronized void writeToLog ( Session session , String statement ) { if ( logStatements && log != null ) { log . writeStatement ( session , statement ) ; } }
Records a Log entry for the specified SQL statement on behalf of the specified Session object .
37
18
155,774
public DataFileCache openTextCache ( Table table , String source , boolean readOnlyData , boolean reversed ) { return log . openTextCache ( table , source , readOnlyData , reversed ) ; }
Opens the TextCache object .
42
7
155,775
protected void initParams ( Database database , String baseFileName ) { HsqlDatabaseProperties props = database . getProperties ( ) ; fileName = baseFileName + ".data" ; backupFileName = baseFileName + ".backup" ; this . database = database ; fa = database . getFileAccess ( ) ; int cacheScale = props . getIntegerProperty ( HsqlDatabaseProperties . hsqldb_cache_scale , 14 , 8 , 18 ) ; int cacheSizeScale = props . getIntegerProperty ( HsqlDatabaseProperties . hsqldb_cache_size_scale , 10 , 6 , 20 ) ; int cacheFreeCountScale = props . getIntegerProperty ( HsqlDatabaseProperties . hsqldb_cache_free_count_scale , 9 , 6 , 12 ) ; incBackup = database . getProperties ( ) . isPropertyTrue ( HsqlDatabaseProperties . hsqldb_inc_backup ) ; cacheFileScale = database . getProperties ( ) . getIntegerProperty ( HsqlDatabaseProperties . hsqldb_cache_file_scale , 8 ) ; if ( cacheFileScale != 1 ) { cacheFileScale = 8 ; } cachedRowPadding = 8 ; if ( cacheFileScale > 8 ) { cachedRowPadding = cacheFileScale ; } cacheReadonly = database . isFilesReadOnly ( ) ; int lookupTableLength = 1 << cacheScale ; int avgRowBytes = 1 << cacheSizeScale ; maxCacheSize = lookupTableLength * 3 ; maxCacheBytes = maxCacheSize * avgRowBytes ; maxDataFileSize = cacheFileScale == 1 ? Integer . MAX_VALUE : ( long ) Integer . MAX_VALUE * cacheFileScale ; maxFreeBlocks = 1 << cacheFreeCountScale ; dataFile = null ; shadowFile = null ; }
initial external parameters are set here .
394
7
155,776
public void close ( boolean write ) { SimpleLog appLog = database . logger . appLog ; try { if ( cacheReadonly ) { if ( dataFile != null ) { dataFile . close ( ) ; dataFile = null ; } return ; } StopWatch sw = new StopWatch ( ) ; appLog . sendLine ( SimpleLog . LOG_NORMAL , "DataFileCache.close(" + write + ") : start" ) ; if ( write ) { cache . saveAll ( ) ; Error . printSystemOut ( "saveAll: " + sw . elapsedTime ( ) ) ; appLog . sendLine ( SimpleLog . LOG_NORMAL , "DataFileCache.close() : save data" ) ; if ( fileModified || freeBlocks . isModified ( ) ) { // set empty dataFile . seek ( LONG_EMPTY_SIZE ) ; dataFile . writeLong ( freeBlocks . getLostBlocksSize ( ) ) ; // set end dataFile . seek ( LONG_FREE_POS_POS ) ; dataFile . writeLong ( fileFreePosition ) ; // set saved flag; dataFile . seek ( FLAGS_POS ) ; int flag = BitMap . set ( 0 , FLAG_ISSAVED ) ; if ( hasRowInfo ) { flag = BitMap . set ( flag , FLAG_ROWINFO ) ; } dataFile . writeInt ( flag ) ; appLog . sendLine ( SimpleLog . LOG_NORMAL , "DataFileCache.close() : flags" ) ; // if ( dataFile . length ( ) != fileFreePosition ) { dataFile . seek ( fileFreePosition ) ; } appLog . sendLine ( SimpleLog . LOG_NORMAL , "DataFileCache.close() : seek end" ) ; Error . printSystemOut ( "pos and flags: " + sw . elapsedTime ( ) ) ; } } if ( dataFile != null ) { dataFile . close ( ) ; appLog . sendLine ( SimpleLog . LOG_NORMAL , "DataFileCache.close() : close" ) ; dataFile = null ; Error . printSystemOut ( "close: " + sw . elapsedTime ( ) ) ; } boolean empty = fileFreePosition == INITIAL_FREE_POS ; if ( empty ) { fa . removeElement ( fileName ) ; fa . removeElement ( backupFileName ) ; } } catch ( Throwable e ) { appLog . logContext ( e , null ) ; throw Error . error ( ErrorCode . FILE_IO_ERROR , ErrorCode . M_DataFileCache_close , new Object [ ] { e , fileName } ) ; } }
Parameter write indicates either an orderly close or a fast close without backup .
566
14
155,777
public void defrag ( ) { if ( cacheReadonly ) { return ; } if ( fileFreePosition == INITIAL_FREE_POS ) { return ; } database . logger . appLog . logContext ( SimpleLog . LOG_NORMAL , "start" ) ; try { boolean wasNio = dataFile . wasNio ( ) ; cache . saveAll ( ) ; DataFileDefrag dfd = new DataFileDefrag ( database , this , fileName ) ; dfd . process ( ) ; close ( false ) ; deleteFile ( wasNio ) ; renameDataFile ( wasNio ) ; backupFile ( ) ; database . getProperties ( ) . setProperty ( HsqlDatabaseProperties . hsqldb_cache_version , HsqlDatabaseProperties . THIS_CACHE_VERSION ) ; database . getProperties ( ) . save ( ) ; cache . clear ( ) ; cache = new Cache ( this ) ; open ( cacheReadonly ) ; dfd . updateTableIndexRoots ( ) ; dfd . updateTransactionRowIDs ( ) ; } catch ( Throwable e ) { database . logger . appLog . logContext ( e , null ) ; if ( e instanceof HsqlException ) { throw ( HsqlException ) e ; } else { throw new HsqlException ( e , Error . getMessage ( ErrorCode . GENERAL_IO_ERROR ) , ErrorCode . GENERAL_IO_ERROR ) ; } } database . logger . appLog . logContext ( SimpleLog . LOG_NORMAL , "end" ) ; }
Writes out all the rows to a new file without fragmentation .
334
13
155,778
public void remove ( int i , PersistentStore store ) { writeLock . lock ( ) ; try { CachedObject r = release ( i ) ; if ( r != null ) { int size = r . getStorageSize ( ) ; freeBlocks . add ( i , size ) ; } } finally { writeLock . unlock ( ) ; } }
Used when a row is deleted as a result of some DML or DDL statement . Removes the row from the cache data structures . Adds the file space for the row to the list of free positions .
73
42
155,779
public void restore ( CachedObject object ) { writeLock . lock ( ) ; try { int i = object . getPos ( ) ; cache . put ( i , object ) ; // was previously used for text tables if ( storeOnInsert ) { saveRow ( object ) ; } } finally { writeLock . unlock ( ) ; } }
For a CacheObject that had been previously released from the cache . A new version is introduced using the preallocated space for the object .
71
28
155,780
static void deleteOrResetFreePos ( Database database , String filename ) { ScaledRAFile raFile = null ; database . getFileAccess ( ) . removeElement ( filename ) ; // OOo related code if ( database . isStoredFileAccess ( ) ) { return ; } // OOo end if ( ! database . getFileAccess ( ) . isStreamElement ( filename ) ) { return ; } try { raFile = new ScaledRAFile ( database , filename , false ) ; raFile . seek ( LONG_FREE_POS_POS ) ; raFile . writeLong ( INITIAL_FREE_POS ) ; } catch ( IOException e ) { database . logger . appLog . logContext ( e , null ) ; } finally { if ( raFile != null ) { try { raFile . close ( ) ; } catch ( IOException e ) { database . logger . appLog . logContext ( e , null ) ; } } } }
This method deletes a data file or resets its free position . this is used only for nio files - not OOo files
205
28
155,781
public static boolean isDurableFragment ( byte [ ] planHash ) { long fragId = VoltSystemProcedure . hashToFragId ( planHash ) ; return ( fragId == PF_prepBalancePartitions || fragId == PF_balancePartitions || fragId == PF_balancePartitionsData || fragId == PF_balancePartitionsClearIndex || fragId == PF_distribute || fragId == PF_applyBinaryLog ) ; }
for sysprocs and we cant distinguish if this needs to be replayed or not .
97
18
155,782
protected void set ( ClientResponse response ) { if ( ! this . status . compareAndSet ( STATUS_RUNNING , STATUS_SUCCESS ) ) return ; this . response = response ; this . latch . countDown ( ) ; }
Sets the result of the operation and flag the execution call as completed .
53
15
155,783
private static List < JoinNode > generateInnerJoinOrdersForTree ( JoinNode subTree ) { // Get a list of the leaf nodes(tables) to permute them List < JoinNode > tableNodes = subTree . generateLeafNodesJoinOrder ( ) ; List < List < JoinNode > > joinOrders = PermutationGenerator . generatePurmutations ( tableNodes ) ; List < JoinNode > newTrees = new ArrayList <> ( ) ; for ( List < JoinNode > joinOrder : joinOrders ) { newTrees . add ( JoinNode . reconstructJoinTreeFromTableNodes ( joinOrder , JoinType . INNER ) ) ; } //Collect all the join/where conditions to reassign them later AbstractExpression combinedWhereExpr = subTree . getAllFilters ( ) ; List < JoinNode > treePermutations = new ArrayList <> ( ) ; for ( JoinNode newTree : newTrees ) { if ( combinedWhereExpr != null ) { newTree . setWhereExpression ( combinedWhereExpr . clone ( ) ) ; } // The new tree root node id must match the original one to be able to reconnect the // subtrees newTree . setId ( subTree . getId ( ) ) ; treePermutations . add ( newTree ) ; } return treePermutations ; }
Helper method to generate join orders for a join tree containing only INNER joins that can be obtained by the permutation of the original tables .
294
28
155,784
private static List < JoinNode > generateOuterJoinOrdersForTree ( JoinNode subTree ) { List < JoinNode > treePermutations = new ArrayList <> ( ) ; treePermutations . add ( subTree ) ; return treePermutations ; }
Helper method to generate join orders for an OUTER join tree . At the moment permutations for LEFT Joins are not supported yet
58
27
155,785
private static List < JoinNode > generateFullJoinOrdersForTree ( JoinNode subTree ) { assert ( subTree != null ) ; List < JoinNode > joinOrders = new ArrayList <> ( ) ; if ( ! ( subTree instanceof BranchNode ) ) { // End of recursion joinOrders . add ( subTree ) ; return joinOrders ; } BranchNode branchNode = ( BranchNode ) subTree ; // Descend to the left branch assert ( branchNode . getLeftNode ( ) != null ) ; List < JoinNode > leftJoinOrders = generateFullJoinOrdersForTree ( branchNode . getLeftNode ( ) ) ; assert ( ! leftJoinOrders . isEmpty ( ) ) ; // Descend to the right branch assert ( branchNode . getRightNode ( ) != null ) ; List < JoinNode > rightJoinOrders = generateFullJoinOrdersForTree ( branchNode . getRightNode ( ) ) ; assert ( ! rightJoinOrders . isEmpty ( ) ) ; // Create permutation pairing left and right nodes and the revere variant for ( JoinNode leftNode : leftJoinOrders ) { for ( JoinNode rightNode : rightJoinOrders ) { JoinNode resultOne = new BranchNode ( branchNode . getId ( ) , branchNode . getJoinType ( ) , ( JoinNode ) leftNode . clone ( ) , ( JoinNode ) rightNode . clone ( ) ) ; JoinNode resultTwo = new BranchNode ( branchNode . getId ( ) , branchNode . getJoinType ( ) , ( JoinNode ) rightNode . clone ( ) , ( JoinNode ) leftNode . clone ( ) ) ; if ( branchNode . getJoinExpression ( ) != null ) { resultOne . setJoinExpression ( branchNode . getJoinExpression ( ) . clone ( ) ) ; resultTwo . setJoinExpression ( branchNode . getJoinExpression ( ) . clone ( ) ) ; } if ( branchNode . getWhereExpression ( ) != null ) { resultOne . setWhereExpression ( branchNode . getWhereExpression ( ) . clone ( ) ) ; resultTwo . setWhereExpression ( branchNode . getWhereExpression ( ) . clone ( ) ) ; } joinOrders . add ( resultOne ) ; joinOrders . add ( resultTwo ) ; } } return joinOrders ; }
Helper method to generate join orders for a join tree containing only FULL joins . The only allowed permutation is a join order that has original left and right nodes swapped .
508
33
155,786
private void generateMorePlansForJoinTree ( JoinNode joinTree ) { assert ( joinTree != null ) ; // generate the access paths for all nodes generateAccessPaths ( joinTree ) ; List < JoinNode > nodes = joinTree . generateAllNodesJoinOrder ( ) ; generateSubPlanForJoinNodeRecursively ( joinTree , 0 , nodes ) ; }
Given a specific join order compute all possible sub - plan - graphs for that join order and add them to the deque of plans . If this doesn t add plans it doesn t mean no more plans can be generated . It s possible that the particular join order it got had no reasonable plans .
79
59
155,787
private AbstractPlanNode getSelectSubPlanForJoinNode ( JoinNode joinNode ) { assert ( joinNode != null ) ; if ( joinNode instanceof BranchNode ) { BranchNode branchJoinNode = ( BranchNode ) joinNode ; // Outer node AbstractPlanNode outerScanPlan = getSelectSubPlanForJoinNode ( branchJoinNode . getLeftNode ( ) ) ; if ( outerScanPlan == null ) { return null ; } // Inner Node. AbstractPlanNode innerScanPlan = getSelectSubPlanForJoinNode ( ( branchJoinNode ) . getRightNode ( ) ) ; if ( innerScanPlan == null ) { return null ; } // Join Node IndexSortablePlanNode answer = getSelectSubPlanForJoin ( branchJoinNode , outerScanPlan , innerScanPlan ) ; // Propagate information used for order by clauses in window functions // and the statement level order by clause. This is only if the // branch node is an inner join. if ( ( answer != null ) && ( branchJoinNode . getJoinType ( ) == JoinType . INNER ) && outerScanPlan instanceof IndexSortablePlanNode ) { IndexUseForOrderBy indexUseForJoin = answer . indexUse ( ) ; IndexUseForOrderBy indexUseFromScan = ( ( IndexSortablePlanNode ) outerScanPlan ) . indexUse ( ) ; indexUseForJoin . setWindowFunctionUsesIndex ( indexUseFromScan . getWindowFunctionUsesIndex ( ) ) ; indexUseForJoin . setWindowFunctionIsCompatibleWithOrderBy ( indexUseFromScan . isWindowFunctionCompatibleWithOrderBy ( ) ) ; indexUseForJoin . setFinalExpressionOrderFromIndexScan ( indexUseFromScan . getFinalExpressionOrderFromIndexScan ( ) ) ; indexUseForJoin . setSortOrderFromIndexScan ( indexUseFromScan . getSortOrderFromIndexScan ( ) ) ; } if ( answer == null ) { return null ; } return answer . planNode ( ) ; } // End of recursion AbstractPlanNode scanNode = getAccessPlanForTable ( joinNode ) ; // Connect the sub-query tree if any if ( joinNode instanceof SubqueryLeafNode ) { StmtSubqueryScan tableScan = ( ( SubqueryLeafNode ) joinNode ) . getSubqueryScan ( ) ; CompiledPlan subQueryPlan = tableScan . getBestCostPlan ( ) ; assert ( subQueryPlan != null ) ; assert ( subQueryPlan . rootPlanGraph != null ) ; // The sub-query best cost plan needs to be un-linked from the previous parent plan // it's the same child plan that gets re-attached to many parents one at a time subQueryPlan . rootPlanGraph . disconnectParents ( ) ; scanNode . addAndLinkChild ( subQueryPlan . rootPlanGraph ) ; } return scanNode ; }
Given a specific join node and access path set for inner and outer tables construct the plan that gives the right tuples .
605
24
155,788
private static List < AbstractExpression > filterSingleTVEExpressions ( List < AbstractExpression > exprs , List < AbstractExpression > otherExprs ) { List < AbstractExpression > singleTVEExprs = new ArrayList <> ( ) ; for ( AbstractExpression expr : exprs ) { List < TupleValueExpression > tves = ExpressionUtil . getTupleValueExpressions ( expr ) ; if ( tves . size ( ) == 1 ) { singleTVEExprs . add ( expr ) ; } else { otherExprs . add ( expr ) ; } } return singleTVEExprs ; }
A method to filter out single - TVE expressions .
141
11
155,789
public void notifyShutdown ( ) { if ( m_shutdown . compareAndSet ( false , true ) ) { for ( KafkaExternalConsumerRunner consumer : m_consumers ) { consumer . shutdown ( ) ; } close ( ) ; } }
shutdown hook to notify kafka consumer threads of shutdown
52
12
155,790
protected void runDDL ( String ddl , boolean transformDdl ) { String modifiedDdl = ( transformDdl ? transformDDL ( ddl ) : ddl ) ; printTransformedSql ( ddl , modifiedDdl ) ; super . runDDL ( modifiedDdl ) ; }
Optionally modifies DDL statements in such a way that PostgreSQL results will match VoltDB results ; and then passes the remaining work to the base class version .
64
33
155,791
@ Override protected String getVoltColumnTypeName ( String columnTypeName ) { String equivalentTypeName = m_PostgreSQLTypeNames . get ( columnTypeName ) ; return ( equivalentTypeName == null ) ? columnTypeName . toUpperCase ( ) : equivalentTypeName ; }
Returns the column type name in VoltDB corresponding to the specified column type name in PostgreSQL .
62
19
155,792
static private int numOccurencesOfCharIn ( String str , char ch ) { boolean inMiddleOfQuote = false ; int num = 0 , previousIndex = 0 ; for ( int index = str . indexOf ( ch ) ; index >= 0 ; index = str . indexOf ( ch , index + 1 ) ) { if ( hasOddNumberOfSingleQuotes ( str . substring ( previousIndex , index ) ) ) { inMiddleOfQuote = ! inMiddleOfQuote ; } if ( ! inMiddleOfQuote ) { num ++ ; } previousIndex = index ; } return num ; }
Returns the number of occurrences of the specified character in the specified String but ignoring those contained in single quotes .
126
21
155,793
static private int indexOfNthOccurrenceOfCharIn ( String str , char ch , int n ) { boolean inMiddleOfQuote = false ; int index = - 1 , previousIndex = 0 ; for ( int i = 0 ; i < n ; i ++ ) { do { index = str . indexOf ( ch , index + 1 ) ; if ( index < 0 ) { return - 1 ; } if ( hasOddNumberOfSingleQuotes ( str . substring ( previousIndex , index ) ) ) { inMiddleOfQuote = ! inMiddleOfQuote ; } previousIndex = index ; } while ( inMiddleOfQuote ) ; } return index ; }
Returns the Nth occurrence of the specified character in the specified String but ignoring those contained in single quotes .
139
21
155,794
protected VoltTable runDML ( String dml , boolean transformDml ) { String modifiedDml = ( transformDml ? transformDML ( dml ) : dml ) ; printTransformedSql ( dml , modifiedDml ) ; return super . runDML ( modifiedDml ) ; }
Optionally modifies queries in such a way that PostgreSQL results will match VoltDB results ; and then passes the remaining work to the base class version .
66
31
155,795
static int getClassCode ( Class cla ) { if ( ! cla . isPrimitive ( ) ) { return ArrayUtil . CLASS_CODE_OBJECT ; } return classCodeMap . get ( cla , - 1 ) ; }
Returns a distinct int code for each primitive type and for all Object types .
50
15
155,796
public static void clearArray ( int type , Object data , int from , int to ) { switch ( type ) { case ArrayUtil . CLASS_CODE_BYTE : { byte [ ] array = ( byte [ ] ) data ; while ( -- to >= from ) { array [ to ] = 0 ; } return ; } case ArrayUtil . CLASS_CODE_CHAR : { byte [ ] array = ( byte [ ] ) data ; while ( -- to >= from ) { array [ to ] = 0 ; } return ; } case ArrayUtil . CLASS_CODE_SHORT : { short [ ] array = ( short [ ] ) data ; while ( -- to >= from ) { array [ to ] = 0 ; } return ; } case ArrayUtil . CLASS_CODE_INT : { int [ ] array = ( int [ ] ) data ; while ( -- to >= from ) { array [ to ] = 0 ; } return ; } case ArrayUtil . CLASS_CODE_LONG : { long [ ] array = ( long [ ] ) data ; while ( -- to >= from ) { array [ to ] = 0 ; } return ; } case ArrayUtil . CLASS_CODE_FLOAT : { float [ ] array = ( float [ ] ) data ; while ( -- to >= from ) { array [ to ] = 0 ; } return ; } case ArrayUtil . CLASS_CODE_DOUBLE : { double [ ] array = ( double [ ] ) data ; while ( -- to >= from ) { array [ to ] = 0 ; } return ; } case ArrayUtil . CLASS_CODE_BOOLEAN : { boolean [ ] array = ( boolean [ ] ) data ; while ( -- to >= from ) { array [ to ] = false ; } return ; } default : { Object [ ] array = ( Object [ ] ) data ; while ( -- to >= from ) { array [ to ] = null ; } return ; } } }
Clears an area of the given array of the given type .
422
13
155,797
public static void adjustArray ( int type , Object array , int usedElements , int index , int count ) { if ( index >= usedElements ) { return ; } int newCount = usedElements + count ; int source ; int target ; int size ; if ( count >= 0 ) { source = index ; target = index + count ; size = usedElements - index ; } else { source = index - count ; target = index ; size = usedElements - index + count ; } if ( size > 0 ) { System . arraycopy ( array , source , array , target , size ) ; } if ( count < 0 ) { clearArray ( type , array , newCount , usedElements ) ; } }
Moves the contents of an array to allow both addition and removal of elements . Used arguments must be in range .
152
23
155,798
public static void sortArray ( int [ ] array ) { boolean swapped ; do { swapped = false ; for ( int i = 0 ; i < array . length - 1 ; i ++ ) { if ( array [ i ] > array [ i + 1 ] ) { int temp = array [ i + 1 ] ; array [ i + 1 ] = array [ i ] ; array [ i ] = temp ; swapped = true ; } } } while ( swapped ) ; }
Basic sort for small arrays of int .
97
8
155,799
public static int find ( Object [ ] array , Object object ) { for ( int i = 0 ; i < array . length ; i ++ ) { if ( array [ i ] == object ) { // hadles both nulls return i ; } if ( object != null && object . equals ( array [ i ] ) ) { return i ; } } return - 1 ; }
Basic find for small arrays of Object .
78
8