idx
int64
0
165k
question
stringlengths
73
4.15k
target
stringlengths
5
918
len_question
int64
21
890
len_target
int64
3
255
154,900
public String getUserName ( ) throws SQLException { ResultSet rs = execute ( "CALL USER()" ) ; rs . next ( ) ; String result = rs . getString ( 1 ) ; rs . close ( ) ; return result ; }
Retrieves the user name as known to this database .
55
12
154,901
public boolean isReadOnly ( ) throws SQLException { ResultSet rs = execute ( "CALL isReadOnlyDatabase()" ) ; rs . next ( ) ; boolean result = rs . getBoolean ( 1 ) ; rs . close ( ) ; return result ; }
Retrieves whether this database is in read - only mode .
58
13
154,902
private StringBuffer toQueryPrefixNoSelect ( String t ) { StringBuffer sb = new StringBuffer ( 255 ) ; return sb . append ( t ) . append ( whereTrue ) ; }
Retrieves &lt ; expression&gt ; WHERE 1 = 1 in string
42
16
154,903
public static ConstantValueExpression makeExpression ( VoltType dataType , String value ) { ConstantValueExpression constantExpr = new ConstantValueExpression ( ) ; constantExpr . setValueType ( dataType ) ; constantExpr . setValue ( value ) ; return constantExpr ; }
Create a new CVE for a given type and value
63
10
154,904
Result executeUpdateStatement ( Session session ) { int count = 0 ; Expression [ ] colExpressions = updateExpressions ; HashMappedList rowset = new HashMappedList ( ) ; Type [ ] colTypes = baseTable . getColumnTypes ( ) ; RangeIteratorBase it = RangeVariable . getIterator ( session , targetRangeVariables ) ; Expression checkCondition = null ; if ( targetTable != baseTable ) { checkCondition = ( ( TableDerived ) targetTable ) . getQueryExpression ( ) . getMainSelect ( ) . checkQueryCondition ; } while ( it . next ( ) ) { session . sessionData . startRowProcessing ( ) ; Row row = it . getCurrentRow ( ) ; Object [ ] data = row . getData ( ) ; Object [ ] newData = getUpdatedData ( session , baseTable , updateColumnMap , colExpressions , colTypes , data ) ; if ( checkCondition != null ) { it . currentData = newData ; boolean check = checkCondition . testCondition ( session ) ; if ( ! check ) { throw Error . error ( ErrorCode . X_44000 ) ; } } rowset . add ( row , newData ) ; } /* debug 190 if (rowset.size() == 0) { System.out.println(targetTable.getName().name + " zero update: session " + session.getId()); } else if (rowset.size() >1) { System.out.println("multiple update: session " + session.getId() + ", " + rowset.size()); } //* debug 190 */ count = update ( session , baseTable , rowset ) ; return Result . getUpdateCountResult ( count ) ; }
Executes an UPDATE statement . It is assumed that the argument is of the correct type .
365
18
154,905
Result executeMergeStatement ( Session session ) { Result resultOut = null ; RowSetNavigator generatedNavigator = null ; PersistentStore store = session . sessionData . getRowStore ( baseTable ) ; if ( generatedIndexes != null ) { resultOut = Result . newUpdateCountResult ( generatedResultMetaData , 0 ) ; generatedNavigator = resultOut . getChainedResult ( ) . getNavigator ( ) ; } int count = 0 ; // data generated for non-matching rows RowSetNavigatorClient newData = new RowSetNavigatorClient ( 8 ) ; // rowset for update operation HashMappedList updateRowSet = new HashMappedList ( ) ; RangeVariable [ ] joinRangeIterators = targetRangeVariables ; // populate insert and update lists RangeIterator [ ] rangeIterators = new RangeIterator [ joinRangeIterators . length ] ; for ( int i = 0 ; i < joinRangeIterators . length ; i ++ ) { rangeIterators [ i ] = joinRangeIterators [ i ] . getIterator ( session ) ; } for ( int currentIndex = 0 ; 0 <= currentIndex ; ) { RangeIterator it = rangeIterators [ currentIndex ] ; boolean beforeFirst = it . isBeforeFirst ( ) ; if ( it . next ( ) ) { if ( currentIndex < joinRangeIterators . length - 1 ) { currentIndex ++ ; continue ; } } else { if ( currentIndex == 1 && beforeFirst ) { Object [ ] data = getMergeInsertData ( session ) ; if ( data != null ) { newData . add ( data ) ; } } it . reset ( ) ; currentIndex -- ; continue ; } // row matches! if ( updateExpressions != null ) { Row row = it . getCurrentRow ( ) ; // this is always the second iterator Object [ ] data = getUpdatedData ( session , baseTable , updateColumnMap , updateExpressions , baseTable . getColumnTypes ( ) , row . getData ( ) ) ; updateRowSet . add ( row , data ) ; } } // run the transaction as a whole, updating and inserting where needed // update any matched rows if ( updateRowSet . size ( ) > 0 ) { count = update ( session , baseTable , updateRowSet ) ; } // insert any non-matched rows newData . beforeFirst ( ) ; while ( newData . hasNext ( ) ) { Object [ ] data = newData . getNext ( ) ; baseTable . insertRow ( session , store , data ) ; if ( generatedNavigator != null ) { Object [ ] generatedValues = getGeneratedColumns ( data ) ; generatedNavigator . add ( generatedValues ) ; } } baseTable . fireAfterTriggers ( session , Trigger . INSERT_AFTER , newData ) ; count += newData . getSize ( ) ; if ( resultOut == null ) { return Result . getUpdateCountResult ( count ) ; } else { resultOut . setUpdateCount ( count ) ; return resultOut ; } }
Executes a MERGE statement . It is assumed that the argument is of the correct type .
644
19
154,906
Result executeDeleteStatement ( Session session ) { int count = 0 ; RowSetNavigatorLinkedList oldRows = new RowSetNavigatorLinkedList ( ) ; RangeIterator it = RangeVariable . getIterator ( session , targetRangeVariables ) ; while ( it . next ( ) ) { Row currentRow = it . getCurrentRow ( ) ; oldRows . add ( currentRow ) ; } count = delete ( session , baseTable , oldRows ) ; if ( restartIdentity && targetTable . identitySequence != null ) { targetTable . identitySequence . reset ( ) ; } return Result . getUpdateCountResult ( count ) ; }
Executes a DELETE statement . It is assumed that the argument is of the correct type .
140
20
154,907
int delete ( Session session , Table table , RowSetNavigator oldRows ) { if ( table . fkMainConstraints . length == 0 ) { deleteRows ( session , table , oldRows ) ; oldRows . beforeFirst ( ) ; if ( table . hasTrigger ( Trigger . DELETE_AFTER ) ) { table . fireAfterTriggers ( session , Trigger . DELETE_AFTER , oldRows ) ; } return oldRows . getSize ( ) ; } HashSet path = session . sessionContext . getConstraintPath ( ) ; HashMappedList tableUpdateList = session . sessionContext . getTableUpdateList ( ) ; if ( session . database . isReferentialIntegrity ( ) ) { oldRows . beforeFirst ( ) ; while ( oldRows . hasNext ( ) ) { oldRows . next ( ) ; Row row = oldRows . getCurrentRow ( ) ; path . clear ( ) ; checkCascadeDelete ( session , table , tableUpdateList , row , false , path ) ; } } if ( session . database . isReferentialIntegrity ( ) ) { oldRows . beforeFirst ( ) ; while ( oldRows . hasNext ( ) ) { oldRows . next ( ) ; Row row = oldRows . getCurrentRow ( ) ; path . clear ( ) ; checkCascadeDelete ( session , table , tableUpdateList , row , true , path ) ; } } oldRows . beforeFirst ( ) ; while ( oldRows . hasNext ( ) ) { oldRows . next ( ) ; Row row = oldRows . getCurrentRow ( ) ; if ( ! row . isDeleted ( session ) ) { table . deleteNoRefCheck ( session , row ) ; } } for ( int i = 0 ; i < tableUpdateList . size ( ) ; i ++ ) { Table targetTable = ( Table ) tableUpdateList . getKey ( i ) ; HashMappedList updateList = ( HashMappedList ) tableUpdateList . get ( i ) ; if ( updateList . size ( ) > 0 ) { targetTable . updateRowSet ( session , updateList , null , true ) ; updateList . clear ( ) ; } } oldRows . beforeFirst ( ) ; if ( table . hasTrigger ( Trigger . DELETE_AFTER ) ) { table . fireAfterTriggers ( session , Trigger . DELETE_AFTER , oldRows ) ; } path . clear ( ) ; return oldRows . getSize ( ) ; }
Highest level multiple row delete method . Corresponds to an SQL DELETE .
553
17
154,908
static void mergeUpdate ( HashMappedList rowSet , Row row , Object [ ] newData , int [ ] cols ) { Object [ ] data = ( Object [ ] ) rowSet . get ( row ) ; if ( data != null ) { for ( int j = 0 ; j < cols . length ; j ++ ) { data [ cols [ j ] ] = newData [ cols [ j ] ] ; } } else { rowSet . add ( row , newData ) ; } }
Merges a triggered change with a previous triggered change or adds to list .
107
15
154,909
static boolean mergeKeepUpdate ( Session session , HashMappedList rowSet , int [ ] cols , Type [ ] colTypes , Row row , Object [ ] newData ) { Object [ ] data = ( Object [ ] ) rowSet . get ( row ) ; if ( data != null ) { if ( IndexAVL . compareRows ( row . getData ( ) , newData , cols , colTypes ) != 0 && IndexAVL . compareRows ( newData , data , cols , colTypes ) != 0 ) { return false ; } for ( int j = 0 ; j < cols . length ; j ++ ) { newData [ cols [ j ] ] = data [ cols [ j ] ] ; } rowSet . put ( row , newData ) ; } else { rowSet . add ( row , newData ) ; } return true ; }
Merge the full triggered change with the updated row or add to list . Return false if changes conflict .
187
21
154,910
protected ExportRowData decodeRow ( byte [ ] rowData ) throws IOException { ExportRow row = ExportRow . decodeRow ( m_legacyRow , getPartition ( ) , m_startTS , rowData ) ; return new ExportRowData ( row . values , row . partitionValue , row . partitionId ) ; }
Decode a byte array of row data into ExportRowData
70
12
154,911
public boolean writeRow ( Object row [ ] , CSVWriter writer , boolean skipinternal , BinaryEncoding binaryEncoding , SimpleDateFormat dateFormatter ) { int firstfield = getFirstField ( skipinternal ) ; try { String [ ] fields = new String [ m_tableSchema . size ( ) - firstfield ] ; for ( int i = firstfield ; i < m_tableSchema . size ( ) ; i ++ ) { if ( row [ i ] == null ) { fields [ i - firstfield ] = "NULL" ; } else if ( m_tableSchema . get ( i ) == VoltType . VARBINARY && binaryEncoding != null ) { if ( binaryEncoding == BinaryEncoding . HEX ) { fields [ i - firstfield ] = Encoder . hexEncode ( ( byte [ ] ) row [ i ] ) ; } else { fields [ i - firstfield ] = Encoder . base64Encode ( ( byte [ ] ) row [ i ] ) ; } } else if ( m_tableSchema . get ( i ) == VoltType . STRING ) { fields [ i - firstfield ] = ( String ) row [ i ] ; } else if ( m_tableSchema . get ( i ) == VoltType . TIMESTAMP && dateFormatter != null ) { TimestampType timestamp = ( TimestampType ) row [ i ] ; fields [ i - firstfield ] = dateFormatter . format ( timestamp . asApproximateJavaDate ( ) ) ; } else { fields [ i - firstfield ] = row [ i ] . toString ( ) ; } } writer . writeNext ( fields ) ; } catch ( Exception x ) { x . printStackTrace ( ) ; return false ; } return true ; }
This is for legacy connector .
379
6
154,912
public final int setPartitionColumnName ( String partitionColumnName ) { if ( partitionColumnName == null || partitionColumnName . trim ( ) . isEmpty ( ) ) { return PARTITION_ID_INDEX ; } int idx = - 1 ; for ( String name : m_source . columnNames ) { if ( name . equalsIgnoreCase ( partitionColumnName ) ) { idx = m_source . columnNames . indexOf ( name ) ; break ; } } if ( idx == - 1 ) { m_partitionColumnIndex = PARTITION_ID_INDEX ; m_logger . error ( "Export configuration error: specified " + m_source . tableName + "." + partitionColumnName + " does not exist. A default partition or routing key will be used." ) ; } else { m_partitionColumnIndex = idx ; } return m_partitionColumnIndex ; }
Used for override of column for partitioning . This is for legacy connector only .
195
16
154,913
public static void registerShutdownHook ( int priority , boolean runOnCrash , Runnable action ) { m_instance . addHook ( priority , runOnCrash , action ) ; //Any hook registered lets print crash messsage. ShutdownHooks . m_crashMessage = true ; }
Register an action to be run when the JVM exits .
64
12
154,914
SocketAddress getRemoteSocketAddress ( ) { // a lot could go wrong here, so rather than put in a bunch of code // to check for nulls all down the chain let's do it the simple // yet bulletproof way try { return ( ( SocketChannel ) sendThread . sockKey . channel ( ) ) . socket ( ) . getRemoteSocketAddress ( ) ; } catch ( NullPointerException e ) { return null ; } }
Returns the address to which the socket is connected .
92
10
154,915
SocketAddress getLocalSocketAddress ( ) { // a lot could go wrong here, so rather than put in a bunch of code // to check for nulls all down the chain let's do it the simple // yet bulletproof way try { return ( ( SocketChannel ) sendThread . sockKey . channel ( ) ) . socket ( ) . getLocalSocketAddress ( ) ; } catch ( NullPointerException e ) { return null ; } }
Returns the local address to which the socket is bound .
92
11
154,916
private static String makeThreadName ( String suffix ) { String name = Thread . currentThread ( ) . getName ( ) . replaceAll ( "-EventThread" , "" ) ; return name + suffix ; }
Guard against creating - EventThread - EventThread - EventThread - ... thread names when ZooKeeper object is being created from within a watcher . See ZOOKEEPER - 795 for details .
43
41
154,917
public void commit ( Xid xid , boolean onePhase ) throws XAException { // Comment out following debug statement before public release: System . err . println ( "Performing a " + ( onePhase ? "1-phase" : "2-phase" ) + " commit on " + xid ) ; JDBCXAResource resource = xaDataSource . getResource ( xid ) ; if ( resource == null ) { throw new XAException ( "The XADataSource has no such Xid: " + xid ) ; } resource . commitThis ( onePhase ) ; }
Per the JDBC 3 . 0 spec this commits the transaction for the specified Xid not necessarily for the transaction associated with this XAResource object .
128
31
154,918
public boolean isSameRM ( XAResource xares ) throws XAException { if ( ! ( xares instanceof JDBCXAResource ) ) { return false ; } return xaDataSource == ( ( JDBCXAResource ) xares ) . getXADataSource ( ) ; }
Stub . See implementation comment in the method for why this is not implemented yet .
67
17
154,919
public int prepare ( Xid xid ) throws XAException { validateXid ( xid ) ; /** * @todo: This is where the real 2-phase work should be done to * determine if a commit done here would succeed or not. */ /** * @todo: May improve performance to return XA_RDONLY whenever * possible, but I don't know. * Could determine this by checking if DB instance is in RO mode, * or perhaps (with much difficulty) to determine if there have * been any modifications performed. */ if ( state != XA_STATE_ENDED ) { throw new XAException ( "Invalid XAResource state" ) ; } // throw new XAException( // "Sorry. HSQLDB has not implemented 2-phase commits yet"); state = XA_STATE_PREPARED ; return XA_OK ; // As noted above, should check non-committed work. }
Vote on whether to commit the global transaction .
198
9
154,920
public void rollback ( Xid xid ) throws XAException { JDBCXAResource resource = xaDataSource . getResource ( xid ) ; if ( resource == null ) { throw new XAException ( "The XADataSource has no such Xid in prepared state: " + xid ) ; } resource . rollbackThis ( ) ; }
Per the JDBC 3 . 0 spec this rolls back the transaction for the specified Xid not necessarily for the transaction associated with this XAResource object .
79
32
154,921
private void processValue ( String value ) { // this Option has a separator character if ( hasValueSeparator ( ) ) { // get the separator character char sep = getValueSeparator ( ) ; // store the index for the value separator int index = value . indexOf ( sep ) ; // while there are more value separators while ( index != - 1 ) { // next value to be added if ( values . size ( ) == numberOfArgs - 1 ) { break ; } // store add ( value . substring ( 0 , index ) ) ; // parse value = value . substring ( index + 1 ) ; // get new index index = value . indexOf ( sep ) ; } } // store the actual value or the last value that has been parsed add ( value ) ; }
Processes the value . If this Option has a value separator the value will have to be parsed into individual tokens . When n - 1 tokens have been processed and there are more value separators in the value parsing is ceased and the remaining characters are added as a single token .
169
56
154,922
public boolean enterWhenUninterruptibly ( Guard guard , long time , TimeUnit unit ) { final long timeoutNanos = toSafeNanos ( time , unit ) ; if ( guard . monitor != this ) { throw new IllegalMonitorStateException ( ) ; } final ReentrantLock lock = this . lock ; long startTime = 0L ; boolean signalBeforeWaiting = lock . isHeldByCurrentThread ( ) ; boolean interrupted = Thread . interrupted ( ) ; try { if ( fair || ! lock . tryLock ( ) ) { startTime = initNanoTime ( timeoutNanos ) ; for ( long remainingNanos = timeoutNanos ; ; ) { try { if ( lock . tryLock ( remainingNanos , TimeUnit . NANOSECONDS ) ) { break ; } else { return false ; } } catch ( InterruptedException interrupt ) { interrupted = true ; remainingNanos = remainingNanos ( startTime , timeoutNanos ) ; } } } boolean satisfied = false ; try { while ( true ) { try { if ( guard . isSatisfied ( ) ) { satisfied = true ; } else { final long remainingNanos ; if ( startTime == 0L ) { startTime = initNanoTime ( timeoutNanos ) ; remainingNanos = timeoutNanos ; } else { remainingNanos = remainingNanos ( startTime , timeoutNanos ) ; } satisfied = awaitNanos ( guard , remainingNanos , signalBeforeWaiting ) ; } return satisfied ; } catch ( InterruptedException interrupt ) { interrupted = true ; signalBeforeWaiting = false ; } } } finally { if ( ! satisfied ) { lock . unlock ( ) ; // No need to signal if timed out } } } finally { if ( interrupted ) { Thread . currentThread ( ) . interrupt ( ) ; } } }
Enters this monitor when the guard is satisfied . Blocks at most the given time including both the time to acquire the lock and the time to wait for the guard to be satisfied .
387
36
154,923
public boolean enterIfInterruptibly ( Guard guard , long time , TimeUnit unit ) throws InterruptedException { if ( guard . monitor != this ) { throw new IllegalMonitorStateException ( ) ; } final ReentrantLock lock = this . lock ; if ( ! lock . tryLock ( time , unit ) ) { return false ; } boolean satisfied = false ; try { return satisfied = guard . isSatisfied ( ) ; } finally { if ( ! satisfied ) { lock . unlock ( ) ; } } }
Enters this monitor if the guard is satisfied . Blocks at most the given time acquiring the lock but does not wait for the guard to be satisfied and may be interrupted .
108
34
154,924
public boolean waitFor ( Guard guard , long time , TimeUnit unit ) throws InterruptedException { final long timeoutNanos = toSafeNanos ( time , unit ) ; if ( ! ( ( guard . monitor == this ) & lock . isHeldByCurrentThread ( ) ) ) { throw new IllegalMonitorStateException ( ) ; } if ( guard . isSatisfied ( ) ) { return true ; } if ( Thread . interrupted ( ) ) { throw new InterruptedException ( ) ; } return awaitNanos ( guard , timeoutNanos , true ) ; }
Waits for the guard to be satisfied . Waits at most the given time and may be interrupted . May be called only by a thread currently occupying this monitor .
120
33
154,925
public void ack ( long hsId , boolean isEOS , long targetId , int blockIndex ) { rejoinLog . debug ( "Queue ack for hsId:" + hsId + " isEOS: " + isEOS + " targetId:" + targetId + " blockIndex: " + blockIndex ) ; m_blockIndices . offer ( Pair . of ( hsId , new RejoinDataAckMessage ( isEOS , targetId , blockIndex ) ) ) ; }
Ack with a positive block index .
111
8
154,926
@ Override public boolean absolute ( int row ) throws SQLException { checkClosed ( ) ; if ( rowCount == 0 ) { if ( row == 0 ) { return true ; } return false ; } if ( row == 0 ) { beforeFirst ( ) ; return true ; } if ( rowCount + row < 0 ) { beforeFirst ( ) ; return false ; } if ( row > rowCount ) { cursorPosition = Position . afterLast ; if ( row == rowCount + 1 ) { return true ; } else { return false ; } } try { // for negative row numbers or row numbers lesser then activeRowIndex, resetRowPosition // method is called and the cursor advances to the desired row from top of the table if ( row < 0 ) { row += rowCount ; row ++ ; } if ( table . getActiveRowIndex ( ) > row || cursorPosition != Position . middle ) { table . resetRowPosition ( ) ; table . advanceToRow ( 0 ) ; } cursorPosition = Position . middle ; return table . advanceToRow ( row - 1 ) ; } catch ( Exception x ) { throw SQLError . get ( x ) ; } }
Moves the cursor to the given row number in this ResultSet object .
247
15
154,927
@ Override public int findColumn ( String columnLabel ) throws SQLException { checkClosed ( ) ; try { return table . getColumnIndex ( columnLabel ) + 1 ; } catch ( IllegalArgumentException iax ) { throw SQLError . get ( iax , SQLError . COLUMN_NOT_FOUND , columnLabel ) ; } catch ( Exception x ) { throw SQLError . get ( x ) ; } }
Maps the given ResultSet column label to its ResultSet column index .
101
14
154,928
@ Override public BigDecimal getBigDecimal ( int columnIndex ) throws SQLException { checkColumnBounds ( columnIndex ) ; try { final VoltType type = table . getColumnType ( columnIndex - 1 ) ; BigDecimal decimalValue = null ; switch ( type ) { case TINYINT : decimalValue = new BigDecimal ( table . getLong ( columnIndex - 1 ) ) ; break ; case SMALLINT : decimalValue = new BigDecimal ( table . getLong ( columnIndex - 1 ) ) ; break ; case INTEGER : decimalValue = new BigDecimal ( table . getLong ( columnIndex - 1 ) ) ; break ; case BIGINT : decimalValue = new BigDecimal ( table . getLong ( columnIndex - 1 ) ) ; break ; case FLOAT : decimalValue = new BigDecimal ( table . getDouble ( columnIndex - 1 ) ) ; break ; case DECIMAL : decimalValue = table . getDecimalAsBigDecimal ( columnIndex - 1 ) ; break ; default : throw new IllegalArgumentException ( "Cannot get BigDecimal value for column type '" + type + "'" ) ; } return table . wasNull ( ) ? null : decimalValue ; } catch ( Exception x ) { throw SQLError . get ( x ) ; } }
ResultSet object as a java . math . BigDecimal with full precision .
287
16
154,929
@ Override public InputStream getBinaryStream ( int columnIndex ) throws SQLException { checkColumnBounds ( columnIndex ) ; try { return new ByteArrayInputStream ( table . getStringAsBytes ( columnIndex - 1 ) ) ; } catch ( Exception x ) { throw SQLError . get ( x ) ; } }
ResultSet object as a stream of uninterpreted bytes .
73
12
154,930
@ Override public Blob getBlob ( int columnIndex ) throws SQLException { checkColumnBounds ( columnIndex ) ; try { return new SerialBlob ( table . getStringAsBytes ( columnIndex - 1 ) ) ; } catch ( Exception x ) { throw SQLError . get ( x ) ; } }
ResultSet object as a Blob object in the Java programming language .
71
14
154,931
@ Override public boolean getBoolean ( int columnIndex ) throws SQLException { checkColumnBounds ( columnIndex ) ; // TODO: Tempting to apply a != 0 operation on numbers and // .equals("true") on strings, but... hacky try { return ( new Long ( table . getLong ( columnIndex - 1 ) ) ) . intValue ( ) == 1 ; } catch ( Exception x ) { throw SQLError . get ( x ) ; } }
ResultSet object as a boolean in the Java programming language .
105
12
154,932
@ Override public byte getByte ( int columnIndex ) throws SQLException { checkColumnBounds ( columnIndex ) ; try { Long longValue = getPrivateInteger ( columnIndex ) ; if ( longValue > Byte . MAX_VALUE || longValue < Byte . MIN_VALUE ) { throw new SQLException ( "Value out of byte range" ) ; } return longValue . byteValue ( ) ; } catch ( Exception x ) { throw SQLError . get ( x ) ; } }
ResultSet object as a byte in the Java programming language .
108
12
154,933
@ Override public byte [ ] getBytes ( int columnIndex ) throws SQLException { checkColumnBounds ( columnIndex ) ; try { if ( table . getColumnType ( columnIndex - 1 ) == VoltType . STRING ) return table . getStringAsBytes ( columnIndex - 1 ) ; else if ( table . getColumnType ( columnIndex - 1 ) == VoltType . VARBINARY ) return table . getVarbinary ( columnIndex - 1 ) ; else throw SQLError . get ( SQLError . CONVERSION_NOT_FOUND , table . getColumnType ( columnIndex - 1 ) , "byte[]" ) ; } catch ( SQLException x ) { throw x ; } catch ( Exception x ) { throw SQLError . get ( x ) ; } }
ResultSet object as a byte array in the Java programming language .
174
13
154,934
@ Override public Clob getClob ( int columnIndex ) throws SQLException { checkColumnBounds ( columnIndex ) ; try { return new SerialClob ( table . getString ( columnIndex - 1 ) . toCharArray ( ) ) ; } catch ( Exception x ) { throw SQLError . get ( x ) ; } }
ResultSet object as a Clob object in the Java programming language .
75
14
154,935
@ Override public float getFloat ( int columnIndex ) throws SQLException { checkColumnBounds ( columnIndex ) ; try { final VoltType type = table . getColumnType ( columnIndex - 1 ) ; Double doubleValue = null ; switch ( type ) { case TINYINT : doubleValue = new Double ( table . getLong ( columnIndex - 1 ) ) ; break ; case SMALLINT : doubleValue = new Double ( table . getLong ( columnIndex - 1 ) ) ; break ; case INTEGER : doubleValue = new Double ( table . getLong ( columnIndex - 1 ) ) ; break ; case BIGINT : doubleValue = new Double ( table . getLong ( columnIndex - 1 ) ) ; break ; case FLOAT : doubleValue = new Double ( table . getDouble ( columnIndex - 1 ) ) ; break ; case DECIMAL : doubleValue = table . getDecimalAsBigDecimal ( columnIndex - 1 ) . doubleValue ( ) ; break ; default : throw new IllegalArgumentException ( "Cannot get float value for column type '" + type + "'" ) ; } if ( table . wasNull ( ) ) { doubleValue = new Double ( 0 ) ; } else if ( Math . abs ( doubleValue ) > new Double ( Float . MAX_VALUE ) ) { throw new SQLException ( "Value out of float range" ) ; } return doubleValue . floatValue ( ) ; } catch ( Exception x ) { throw SQLError . get ( x ) ; } }
ResultSet object as a float in the Java programming language .
329
12
154,936
@ Override public int getInt ( int columnIndex ) throws SQLException { checkColumnBounds ( columnIndex ) ; try { Long longValue = getPrivateInteger ( columnIndex ) ; if ( longValue > Integer . MAX_VALUE || longValue < Integer . MIN_VALUE ) { throw new SQLException ( "Value out of int range" ) ; } return longValue . intValue ( ) ; } catch ( Exception x ) { throw SQLError . get ( x ) ; } }
ResultSet object as an int in the Java programming language .
108
12
154,937
@ Override public long getLong ( int columnIndex ) throws SQLException { checkColumnBounds ( columnIndex ) ; try { Long longValue = getPrivateInteger ( columnIndex ) ; return longValue ; } catch ( Exception x ) { throw SQLError . get ( x ) ; } }
ResultSet object as a long in the Java programming language .
65
12
154,938
@ Override public Reader getNCharacterStream ( int columnIndex ) throws SQLException { checkColumnBounds ( columnIndex ) ; try { String value = table . getString ( columnIndex - 1 ) ; if ( ! wasNull ( ) ) return new StringReader ( value ) ; return null ; } catch ( Exception x ) { throw SQLError . get ( x ) ; } }
ResultSet object as a java . io . Reader object .
84
12
154,939
@ Override public NClob getNClob ( int columnIndex ) throws SQLException { checkColumnBounds ( columnIndex ) ; try { return new JDBC4NClob ( table . getString ( columnIndex - 1 ) . toCharArray ( ) ) ; } catch ( Exception x ) { throw SQLError . get ( x ) ; } }
ResultSet object as a NClob object in the Java programming language .
80
15
154,940
@ Override public Object getObject ( int columnIndex ) throws SQLException { checkColumnBounds ( columnIndex ) ; try { VoltType type = table . getColumnType ( columnIndex - 1 ) ; if ( type == VoltType . TIMESTAMP ) return getTimestamp ( columnIndex ) ; else return table . get ( columnIndex - 1 , type ) ; } catch ( Exception x ) { throw SQLError . get ( x ) ; } }
ResultSet object as an Object in the Java programming language .
99
12
154,941
@ Override public short getShort ( int columnIndex ) throws SQLException { checkColumnBounds ( columnIndex ) ; try { Long longValue = getPrivateInteger ( columnIndex ) ; if ( longValue > Short . MAX_VALUE || longValue < Short . MIN_VALUE ) { throw new SQLException ( "Value out of short range" ) ; } return longValue . shortValue ( ) ; } catch ( Exception x ) { throw SQLError . get ( x ) ; } }
ResultSet object as a short in the Java programming language .
108
12
154,942
@ Override @ Deprecated public InputStream getUnicodeStream ( int columnIndex ) throws SQLException { checkColumnBounds ( columnIndex ) ; throw SQLError . noSupport ( ) ; }
Deprecated . use getCharacterStream in place of getUnicodeStream
46
15
154,943
@ Override @ Deprecated public InputStream getUnicodeStream ( String columnLabel ) throws SQLException { return getUnicodeStream ( findColumn ( columnLabel ) ) ; }
Deprecated . use getCharacterStream instead
41
8
154,944
@ Override public boolean last ( ) throws SQLException { checkClosed ( ) ; if ( rowCount == 0 ) { return false ; } try { if ( cursorPosition != Position . middle ) { cursorPosition = Position . middle ; table . resetRowPosition ( ) ; table . advanceToRow ( 0 ) ; } return table . advanceToRow ( rowCount - 1 ) ; } catch ( Exception x ) { throw SQLError . get ( x ) ; } }
Moves the cursor to the last row in this ResultSet object .
102
14
154,945
@ Override public boolean next ( ) throws SQLException { checkClosed ( ) ; if ( cursorPosition == Position . afterLast || table . getActiveRowIndex ( ) == rowCount - 1 ) { cursorPosition = Position . afterLast ; return false ; } if ( cursorPosition == Position . beforeFirst ) { cursorPosition = Position . middle ; } try { return table . advanceRow ( ) ; } catch ( Exception x ) { throw SQLError . get ( x ) ; } }
Moves the cursor forward one row from its current position .
106
12
154,946
@ Override public boolean previous ( ) throws SQLException { checkClosed ( ) ; if ( cursorPosition == Position . afterLast ) { return last ( ) ; } if ( cursorPosition == Position . beforeFirst || table . getActiveRowIndex ( ) <= 0 ) { beforeFirst ( ) ; return false ; } try { int tempRowIndex = table . getActiveRowIndex ( ) ; table . resetRowPosition ( ) ; table . advanceToRow ( 0 ) ; return table . advanceToRow ( tempRowIndex - 1 ) ; } catch ( Exception x ) { throw SQLError . get ( x ) ; } }
Moves the cursor to the previous row in this ResultSet object .
135
14
154,947
@ Override public boolean relative ( int rows ) throws SQLException { checkClosed ( ) ; if ( rowCount == 0 ) { return false ; } if ( cursorPosition == Position . afterLast && rows > 0 ) { return false ; } if ( cursorPosition == Position . beforeFirst && rows <= 0 ) { return false ; } if ( table . getActiveRowIndex ( ) + rows >= rowCount ) { cursorPosition = Position . afterLast ; if ( table . getActiveRowIndex ( ) + rows == rowCount ) { return true ; } return false ; } try { // for negative row numbers, resetRowPosition method is called // and the cursor advances to the desired row from top of the table int rowsToMove = table . getActiveRowIndex ( ) + rows ; if ( cursorPosition == Position . beforeFirst || rows < 0 ) { if ( cursorPosition == Position . afterLast ) { rowsToMove = rowCount + rows ; } else if ( cursorPosition == Position . beforeFirst ) { rowsToMove = rows - 1 ; } else { rowsToMove = table . getActiveRowIndex ( ) + rows ; } if ( rowsToMove < 0 ) { beforeFirst ( ) ; return false ; } table . resetRowPosition ( ) ; table . advanceToRow ( 0 ) ; } cursorPosition = Position . middle ; return table . advanceToRow ( rowsToMove ) ; } catch ( Exception x ) { throw SQLError . get ( x ) ; } }
Moves the cursor a relative number of rows either positive or negative .
316
14
154,948
@ Override public void setFetchDirection ( int direction ) throws SQLException { if ( ( direction != FETCH_FORWARD ) && ( direction != FETCH_REVERSE ) && ( direction != FETCH_UNKNOWN ) ) throw SQLError . get ( SQLError . ILLEGAL_STATEMENT , direction ) ; this . fetchDirection = direction ; }
object will be processed .
89
5
154,949
@ Override public void updateAsciiStream ( String columnLabel , InputStream x , long length ) throws SQLException { throw SQLError . noSupport ( ) ; }
the specified number of bytes .
40
6
154,950
@ Override public void updateBlob ( String columnLabel , InputStream inputStream , long length ) throws SQLException { throw SQLError . noSupport ( ) ; }
have the specified number of bytes .
39
7
154,951
@ Override public void updateNClob ( String columnLabel , Reader reader , long length ) throws SQLException { throw SQLError . noSupport ( ) ; }
given number of characters long .
38
6
154,952
@ Override public void updateObject ( String columnLabel , Object x , int scaleOrLength ) throws SQLException { throw SQLError . noSupport ( ) ; }
Updates the designated column with an Object value .
38
10
154,953
@ Override public boolean wasNull ( ) throws SQLException { checkClosed ( ) ; try { return table . wasNull ( ) ; } catch ( Exception x ) { throw SQLError . get ( x ) ; } }
Reports whether the last column read had a value of SQL NULL .
51
13
154,954
public Object [ ] getRowData ( ) throws SQLException { Object [ ] row = new Object [ columnCount ] ; for ( int i = 1 ; i < columnCount + 1 ; i ++ ) { row [ i - 1 ] = getObject ( i ) ; } return row ; }
Retrieve the raw row data as an array
63
9
154,955
void transformAndQueue ( T event , long systemCurrentTimeMillis ) { // if you're super unlucky, this blows up the stack if ( rand . nextDouble ( ) < 0.05 ) { // duplicate this message (note recursion means maybe more than duped) transformAndQueue ( event , systemCurrentTimeMillis ) ; } long delayms = nextZipfDelay ( ) ; delayed . add ( systemCurrentTimeMillis + delayms , event ) ; }
Possibly duplicate and delay by some random amount .
100
10
154,956
@ Override public T next ( long systemCurrentTimeMillis ) { // drain all the waiting messages from the source (up to 10k) while ( delayed . size ( ) < 10000 ) { T event = source . next ( systemCurrentTimeMillis ) ; if ( event == null ) { break ; } transformAndQueue ( event , systemCurrentTimeMillis ) ; } return delayed . nextReady ( systemCurrentTimeMillis ) ; }
Return the next event that is safe for delivery or null if there are no safe objects to deliver .
93
20
154,957
public int compareNames ( SchemaColumn that ) { String thatTbl ; String thisTbl ; if ( m_tableAlias != null && that . m_tableAlias != null ) { thisTbl = m_tableAlias ; thatTbl = that . m_tableAlias ; } else { thisTbl = m_tableName ; thatTbl = that . m_tableName ; } int tblCmp = nullSafeStringCompareTo ( thisTbl , thatTbl ) ; if ( tblCmp != 0 ) { return tblCmp ; } String thisCol ; String thatCol ; if ( m_columnName != null && that . m_columnName != null ) { thisCol = m_columnName ; thatCol = that . m_columnName ; } else { thisCol = m_columnAlias ; thatCol = that . m_columnAlias ; } int colCmp = nullSafeStringCompareTo ( thisCol , thatCol ) ; return colCmp ; }
Compare this schema column to the input .
213
8
154,958
public SchemaColumn copyAndReplaceWithTVE ( int colIndex ) { TupleValueExpression newTve ; if ( m_expression instanceof TupleValueExpression ) { newTve = ( TupleValueExpression ) m_expression . clone ( ) ; newTve . setColumnIndex ( colIndex ) ; } else { newTve = new TupleValueExpression ( m_tableName , m_tableAlias , m_columnName , m_columnAlias , m_expression , colIndex ) ; } return new SchemaColumn ( m_tableName , m_tableAlias , m_columnName , m_columnAlias , newTve , m_differentiator ) ; }
Return a copy of this SchemaColumn but with the input expression replaced by an appropriate TupleValueExpression .
151
23
154,959
@ Override synchronized void offer ( TransactionTask task ) { Iv2Trace . logTransactionTaskQueueOffer ( task ) ; m_backlog . addLast ( task ) ; taskQueueOffer ( ) ; }
Stick this task in the backlog . Many network threads may be racing to reach here synchronize to serialize queue order . Always returns true in this case side effect of extending TransactionTaskQueue .
46
39
154,960
private boolean aquireFileLock ( ) { // PRE: // // raf is never null and is never closed upon entry. // // Rhetorical question to self: How does one tell if a RandomAccessFile // is closed, short of invoking an operation and getting an IOException // the says its closed (assuming you can control the Locale of the error // message)? // final RandomAccessFile lraf = super . raf ; // In an ideal world, we would use a lock region back off approach, // starting with region MAX_LOCK_REGION, then MAX_NFS_LOCK_REGION, // then MIN_LOCK_REGION. // // In practice, however, it is just generally unwise to mount network // file system database instances. Be warned. // // In general, it is probably also unwise to mount removable media // database instances that are not read-only. boolean success = false ; try { if ( this . fileLock != null ) { // API says never throws exception, but I suspect // it's quite possible some research / FOSS JVMs might // still throw unsupported operation exceptions on certain // NIO classes...better to be safe than sorry. if ( this . fileLock . isValid ( ) ) { return true ; } else { // It's not valid, so releasing it is a no-op. // // However, we should still clean up the referenceand hope // no previous complications exist (a hung FileLock in a // flaky JVM) or that gc kicks in and saves the day... // (unlikely, though). this . releaseFileLock ( ) ; } } if ( isPosixManditoryFileLock ( ) ) { try { Runtime . getRuntime ( ) . exec ( new String [ ] { "chmod" , "g+s,g-x" , file . getPath ( ) } ) ; } catch ( Exception ex ) { //ex.printStackTrace(); } } // Note: from FileChannel.tryLock(...) JavaDoc: // // @return A lock object representing the newly-acquired lock, // or <tt>null</tt> if the lock could not be acquired // because another program holds an overlapping lock this . fileLock = lraf . getChannel ( ) . tryLock ( 0 , MIN_LOCK_REGION , false ) ; // According to the API, if it's non-null, it must be valid. // This may not actually yet be the full truth of the matter under // all commonly available JVM implementations. // fileLock.isValid() API says it never throws, though, so // with fingers crossed... success = ( this . fileLock != null && this . fileLock . isValid ( ) ) ; } catch ( Exception e ) { } if ( ! success ) { this . releaseFileLock ( ) ; } return success ; }
does the real work of aquiring the FileLock
601
10
154,961
private boolean releaseFileLock ( ) { // Note: Closing the super class RandomAccessFile has the // side-effect of closing the file lock's FileChannel, // so we do not deal with this here. boolean success = false ; if ( this . fileLock == null ) { success = true ; } else { try { this . fileLock . release ( ) ; success = true ; } catch ( Exception e ) { } finally { this . fileLock = null ; } } return success ; }
does the real work of releasing the FileLock
103
9
154,962
protected Object addOrRemove ( int intKey , Object objectValue , boolean remove ) { int hash = intKey ; int index = hashIndex . getHashIndex ( hash ) ; int lookup = hashIndex . hashTable [ index ] ; int lastLookup = - 1 ; Object returnValue = null ; for ( ; lookup >= 0 ; lastLookup = lookup , lookup = hashIndex . getNextLookup ( lookup ) ) { if ( intKey == intKeyTable [ lookup ] ) { break ; } } if ( lookup >= 0 ) { if ( remove ) { if ( intKey == 0 ) { hasZeroKey = false ; zeroKeyIndex = - 1 ; } intKeyTable [ lookup ] = 0 ; returnValue = objectValueTable [ lookup ] ; objectValueTable [ lookup ] = null ; hashIndex . unlinkNode ( index , lastLookup , lookup ) ; if ( accessTable != null ) { accessTable [ lookup ] = 0 ; } return returnValue ; } if ( isObjectValue ) { returnValue = objectValueTable [ lookup ] ; objectValueTable [ lookup ] = objectValue ; } if ( accessTable != null ) { accessTable [ lookup ] = accessCount ++ ; } return returnValue ; } // not found if ( remove ) { return returnValue ; } if ( hashIndex . elementCount >= threshold ) { if ( reset ( ) ) { return addOrRemove ( intKey , objectValue , remove ) ; } else { return null ; } } lookup = hashIndex . linkNode ( index , lastLookup ) ; intKeyTable [ lookup ] = intKey ; if ( intKey == 0 ) { hasZeroKey = true ; zeroKeyIndex = lookup ; } objectValueTable [ lookup ] = objectValue ; if ( accessTable != null ) { accessTable [ lookup ] = accessCount ++ ; } return returnValue ; }
type - specific method for adding or removing keys in int - > Object maps
392
15
154,963
protected Object removeObject ( Object objectKey , boolean removeRow ) { if ( objectKey == null ) { return null ; } int hash = objectKey . hashCode ( ) ; int index = hashIndex . getHashIndex ( hash ) ; int lookup = hashIndex . hashTable [ index ] ; int lastLookup = - 1 ; Object returnValue = null ; for ( ; lookup >= 0 ; lastLookup = lookup , lookup = hashIndex . getNextLookup ( lookup ) ) { if ( objectKeyTable [ lookup ] . equals ( objectKey ) ) { objectKeyTable [ lookup ] = null ; hashIndex . unlinkNode ( index , lastLookup , lookup ) ; if ( isObjectValue ) { returnValue = objectValueTable [ lookup ] ; objectValueTable [ lookup ] = null ; } if ( removeRow ) { removeRow ( lookup ) ; } return returnValue ; } } // not found return returnValue ; }
type specific method for Object sets or Object - > Object maps
198
12
154,964
public void clear ( ) { if ( hashIndex . modified ) { accessCount = 0 ; accessMin = accessCount ; hasZeroKey = false ; zeroKeyIndex = - 1 ; clearElementArrays ( 0 , hashIndex . linkTable . length ) ; hashIndex . clear ( ) ; if ( minimizeOnEmpty ) { rehash ( initialCapacity ) ; } } }
Clear the map completely .
79
5
154,965
public int getAccessCountCeiling ( int count , int margin ) { return ArrayCounter . rank ( accessTable , hashIndex . newNodePointer , count , accessMin + 1 , accessCount , margin ) ; }
Return the max accessCount value for count elements with the lowest access count . Always return at least accessMin + 1
47
23
154,966
protected void clear ( int count , int margin ) { if ( margin < 64 ) { margin = 64 ; } int maxlookup = hashIndex . newNodePointer ; int accessBase = getAccessCountCeiling ( count , margin ) ; for ( int lookup = 0 ; lookup < maxlookup ; lookup ++ ) { Object o = objectKeyTable [ lookup ] ; if ( o != null && accessTable [ lookup ] < accessBase ) { removeObject ( o , false ) ; } } accessMin = accessBase ; }
Clear approximately count elements from the map starting with those with low accessTable ranking .
112
16
154,967
public void materialise ( Session session ) { PersistentStore store ; // table constructors if ( isDataExpression ) { store = session . sessionData . getSubqueryRowStore ( table ) ; dataExpression . insertValuesIntoSubqueryTable ( session , store ) ; return ; } Result result = queryExpression . getResult ( session , isExistsPredicate ? 1 : 0 ) ; RowSetNavigatorData navigator = ( ( RowSetNavigatorData ) result . getNavigator ( ) ) ; if ( uniqueRows ) { navigator . removeDuplicates ( ) ; } store = session . sessionData . getSubqueryRowStore ( table ) ; table . insertResult ( store , result ) ; result . getNavigator ( ) . close ( ) ; }
Fills the table with a result set
167
8
154,968
static public void encodeDecimal ( final FastSerializer fs , BigDecimal value ) throws IOException { fs . write ( ( byte ) VoltDecimalHelper . kDefaultScale ) ; fs . write ( ( byte ) 16 ) ; fs . write ( VoltDecimalHelper . serializeBigDecimal ( value ) ) ; }
Read a decimal according to the Export encoding specification .
69
10
154,969
static public void encodeGeographyPoint ( final FastSerializer fs , GeographyPointValue value ) throws IOException { final int length = GeographyPointValue . getLengthInBytes ( ) ; ByteBuffer bb = ByteBuffer . allocate ( length ) ; bb . order ( ByteOrder . nativeOrder ( ) ) ; value . flattenToBuffer ( bb ) ; byte [ ] array = bb . array ( ) ; assert ( array . length == length ) ; fs . write ( array ) ; }
Encode a GEOGRAPHY_POINT according to the Export encoding specification .
107
18
154,970
static public void encodeGeography ( final FastSerializer fs , GeographyValue value ) throws IOException { ByteBuffer bb = ByteBuffer . allocate ( value . getLengthInBytes ( ) ) ; bb . order ( ByteOrder . nativeOrder ( ) ) ; value . flattenToBuffer ( bb ) ; byte [ ] array = bb . array ( ) ; fs . writeInt ( array . length ) ; fs . write ( array ) ; }
Encode a GEOGRAPHY according to the Export encoding specification .
97
15
154,971
@ SuppressWarnings ( "unchecked" ) private ImmutableMap < String , ProcedureRunnerNTGenerator > loadSystemProcedures ( boolean startup ) { ImmutableMap . Builder < String , ProcedureRunnerNTGenerator > builder = ImmutableMap . < String , ProcedureRunnerNTGenerator > builder ( ) ; Set < Entry < String , Config > > entrySet = SystemProcedureCatalog . listing . entrySet ( ) ; for ( Entry < String , Config > entry : entrySet ) { String procName = entry . getKey ( ) ; Config sysProc = entry . getValue ( ) ; // transactional sysprocs handled by LoadedProcedureSet if ( sysProc . transactional ) { continue ; } final String className = sysProc . getClassname ( ) ; Class < ? extends VoltNonTransactionalProcedure > procClass = null ; // this check is for sysprocs that don't have a procedure class if ( className != null ) { try { procClass = ( Class < ? extends VoltNonTransactionalProcedure > ) Class . forName ( className ) ; } catch ( final ClassNotFoundException e ) { if ( sysProc . commercial ) { continue ; } VoltDB . crashLocalVoltDB ( "Missing Java class for NT System Procedure: " + procName ) ; } if ( startup ) { // This is a startup-time check to make sure we can instantiate try { if ( ( procClass . newInstance ( ) instanceof VoltNTSystemProcedure ) == false ) { VoltDB . crashLocalVoltDB ( "NT System Procedure is incorrect class type: " + procName ) ; } } catch ( InstantiationException | IllegalAccessException e ) { VoltDB . crashLocalVoltDB ( "Unable to instantiate NT System Procedure: " + procName ) ; } } ProcedureRunnerNTGenerator prntg = new ProcedureRunnerNTGenerator ( procClass ) ; builder . put ( procName , prntg ) ; } } return builder . build ( ) ; }
Load the system procedures . Optionally don t load UAC but use parameter instead .
442
17
154,972
@ SuppressWarnings ( "unchecked" ) synchronized void update ( CatalogContext catalogContext ) { CatalogMap < Procedure > procedures = catalogContext . database . getProcedures ( ) ; Map < String , ProcedureRunnerNTGenerator > runnerGeneratorMap = new TreeMap <> ( ) ; for ( Procedure procedure : procedures ) { if ( procedure . getTransactional ( ) ) { continue ; } // this code is mostly lifted from transactional procedures String className = procedure . getClassname ( ) ; Class < ? extends VoltNonTransactionalProcedure > clz = null ; try { clz = ( Class < ? extends VoltNonTransactionalProcedure > ) catalogContext . classForProcedureOrUDF ( className ) ; } catch ( ClassNotFoundException e ) { if ( className . startsWith ( "org.voltdb." ) ) { String msg = String . format ( LoadedProcedureSet . ORGVOLTDB_PROCNAME_ERROR_FMT , className ) ; VoltDB . crashLocalVoltDB ( msg , false , null ) ; } else { String msg = String . format ( LoadedProcedureSet . UNABLETOLOAD_ERROR_FMT , className ) ; VoltDB . crashLocalVoltDB ( msg , false , null ) ; } } // The ProcedureRunnerNTGenerator has all of the dangerous and slow // stuff in it. Like classfinding, instantiation, and reflection. ProcedureRunnerNTGenerator prntg = new ProcedureRunnerNTGenerator ( clz ) ; runnerGeneratorMap . put ( procedure . getTypeName ( ) , prntg ) ; } m_procs = ImmutableMap . < String , ProcedureRunnerNTGenerator > builder ( ) . putAll ( runnerGeneratorMap ) . build ( ) ; // reload all sysprocs loadSystemProcedures ( false ) ; // Set the system to start accepting work again now that ebertything is updated. // We had to stop because stats would be wonky if we called a proc while updating // this stuff. m_paused = false ; // release all of the pending invocations into the real queue m_pendingInvocations . forEach ( pi -> callProcedureNT ( pi . ciHandle , pi . user , pi . ccxn , pi . isAdmin , pi . ntPriority , pi . task ) ) ; m_pendingInvocations . clear ( ) ; }
Refresh the NT procedures when the catalog changes .
533
10
154,973
synchronized void callProcedureNT ( final long ciHandle , final AuthUser user , final Connection ccxn , final boolean isAdmin , final boolean ntPriority , final StoredProcedureInvocation task ) { // If paused, stuff a record of the invocation into a queue that gets // drained when un-paused. We're counting on regular upstream backpressure // to prevent this from getting too out of hand. if ( m_paused ) { PendingInvocation pi = new PendingInvocation ( ciHandle , user , ccxn , isAdmin , ntPriority , task ) ; m_pendingInvocations . add ( pi ) ; return ; } String procName = task . getProcName ( ) ; final ProcedureRunnerNTGenerator prntg ; if ( procName . startsWith ( "@" ) ) { prntg = m_sysProcs . get ( procName ) ; } else { prntg = m_procs . get ( procName ) ; } final ProcedureRunnerNT runner ; try { runner = prntg . generateProcedureRunnerNT ( user , ccxn , isAdmin , ciHandle , task . getClientHandle ( ) , task . getBatchTimeout ( ) ) ; } catch ( InstantiationException | IllegalAccessException e1 ) { // I don't expect to hit this, but it's here... // must be done as IRM to CI mailbox for backpressure accounting ClientResponseImpl response = new ClientResponseImpl ( ClientResponseImpl . UNEXPECTED_FAILURE , new VoltTable [ 0 ] , "Could not create running context for " + procName + "." , task . getClientHandle ( ) ) ; InitiateResponseMessage irm = InitiateResponseMessage . messageForNTProcResponse ( ciHandle , ccxn . connectionId ( ) , response ) ; m_mailbox . deliver ( irm ) ; return ; } m_outstanding . put ( runner . m_id , runner ) ; Runnable invocationRunnable = new Runnable ( ) { @ Override public void run ( ) { try { runner . call ( task . getParams ( ) . toArray ( ) ) ; } catch ( Throwable ex ) { ex . printStackTrace ( ) ; throw ex ; } } } ; try { // pick the executor service based on priority // - new (from user) txns get regular one // - sub tasks and sub procs generated by nt procs get // immediate exec service (priority) if ( ntPriority ) { m_priorityExecutorService . submit ( invocationRunnable ) ; } else { m_primaryExecutorService . submit ( invocationRunnable ) ; } } catch ( RejectedExecutionException e ) { handleNTProcEnd ( runner ) ; // I really don't expect this to happen... but it's here. // must be done as IRM to CI mailbox for backpressure accounting ClientResponseImpl response = new ClientResponseImpl ( ClientResponseImpl . UNEXPECTED_FAILURE , new VoltTable [ 0 ] , "Could not submit NT procedure " + procName + " to exec service for ." , task . getClientHandle ( ) ) ; InitiateResponseMessage irm = InitiateResponseMessage . messageForNTProcResponse ( ciHandle , ccxn . connectionId ( ) , response ) ; m_mailbox . deliver ( irm ) ; return ; } }
Invoke an NT procedure asynchronously on one of the exec services .
744
15
154,974
void handleCallbacksForFailedHosts ( final Set < Integer > failedHosts ) { for ( ProcedureRunnerNT runner : m_outstanding . values ( ) ) { runner . processAnyCallbacksFromFailedHosts ( failedHosts ) ; } }
For all - host NT procs use site failures to call callbacks for hosts that will obviously never respond .
56
22
154,975
private boolean isDefinedFunctionName ( String functionName ) { return FunctionForVoltDB . isFunctionNameDefined ( functionName ) || FunctionSQL . isFunction ( functionName ) || FunctionCustom . getFunctionId ( functionName ) != ID_NOT_DEFINED || ( null != m_schema . findChild ( "ud_function" , functionName ) ) ; }
Find out if the function is defined . It might be defined in the FunctionForVoltDB table . It also might be in the VoltXML .
82
31
154,976
public Object upper ( Session session , Object data ) { if ( data == null ) { return null ; } if ( typeCode == Types . SQL_CLOB ) { String result = ( ( ClobData ) data ) . getSubString ( session , 0 , ( int ) ( ( ClobData ) data ) . length ( session ) ) ; result = collation . toUpperCase ( result ) ; ClobData clob = session . createClob ( result . length ( ) ) ; clob . setString ( session , 0 , result ) ; return clob ; } return collation . toUpperCase ( ( String ) data ) ; }
Memory limits apply to Upper and Lower implementations with Clob data
139
12
154,977
public void outputStartTime ( final long startTimeMsec ) { log . format ( Locale . US , "#[StartTime: %.3f (seconds since epoch), %s]\n" , startTimeMsec / 1000.0 , ( new Date ( startTimeMsec ) ) . toString ( ) ) ; }
Log a start time in the log .
71
8
154,978
public String latencyHistoReport ( ) { ByteArrayOutputStream baos = new ByteArrayOutputStream ( ) ; PrintStream pw = null ; try { pw = new PrintStream ( baos , false , Charsets . UTF_8 . name ( ) ) ; } catch ( UnsupportedEncodingException e ) { Throwables . propagate ( e ) ; } //Get a latency report in milliseconds m_latencyHistogram . outputPercentileDistributionVolt ( pw , 1 , 1000.0 ) ; return new String ( baos . toByteArray ( ) , Charsets . UTF_8 ) ; }
Generate a human - readable report of latencies in the form of a histogram . Latency is in milliseconds
133
23
154,979
public synchronized BBContainer getNextChunk ( ) throws IOException { if ( m_chunkReaderException != null ) { throw m_chunkReaderException ; } if ( ! m_hasMoreChunks . get ( ) ) { final Container c = m_availableChunks . poll ( ) ; return c ; } if ( m_chunkReader == null ) { m_chunkReader = new ChunkReader ( ) ; m_chunkReaderThread = new Thread ( m_chunkReader , "ChunkReader" ) ; m_chunkReaderThread . start ( ) ; } Container c = null ; while ( c == null && ( m_hasMoreChunks . get ( ) || ! m_availableChunks . isEmpty ( ) ) ) { c = m_availableChunks . poll ( ) ; if ( c == null ) { try { wait ( ) ; } catch ( InterruptedException e ) { throw new IOException ( e ) ; } } } if ( c != null ) { m_chunkReads . release ( ) ; } else { if ( m_chunkReaderException != null ) { throw m_chunkReaderException ; } } return c ; }
Will get the next chunk of the table that is just over the chunk size
257
15
154,980
protected void validateSpecifiedUserAndPassword ( String user , String password ) throws SQLException { String configuredUser = connProperties . getProperty ( "user" ) ; String configuredPassword = connProperties . getProperty ( "password" ) ; if ( ( ( user == null && configuredUser != null ) || ( user != null && configuredUser == null ) ) || ( user != null && ! user . equals ( configuredUser ) ) || ( ( password == null && configuredPassword != null ) || ( password != null && configuredPassword == null ) ) || ( password != null && ! password . equals ( configuredPassword ) ) ) { throw new SQLException ( "Given user name or password does not " + "match those configured for this object" ) ; } }
Throws a SQLException if given user name or password are not same as those configured for this object .
163
23
154,981
public Object setConnectionProperty ( String name , String value ) { return connProperties . setProperty ( name , value ) ; }
Sets JDBC Connection Properties to be used when physical connections are obtained for the pool .
27
18
154,982
@ Override public void start ( boolean block ) throws InterruptedException , ExecutionException { Future < ? > task = m_es . submit ( new ParentEvent ( null ) ) ; if ( block ) { task . get ( ) ; } }
Initialize and start watching the cache .
51
8
154,983
public Object getAggregatedValue ( Session session , Object currValue ) { if ( currValue == null ) { // A VoltDB extension APPROX_COUNT_DISTINCT return opType == OpTypes . COUNT || opType == OpTypes . APPROX_COUNT_DISTINCT ? ValuePool . INTEGER_0 : null ; /* disable 2 lines... return opType == OpTypes.COUNT ? ValuePool.INTEGER_0 : null; ...disabled 2 lines */ // End of VoltDB extension } return ( ( SetFunction ) currValue ) . getValue ( ) ; }
Get the result of a SetFunction or an ordinary value
136
11
154,984
private boolean tableListIncludesReadOnlyView ( List < Table > tableList ) { for ( Table table : tableList ) { if ( table . getMaterializer ( ) != null && ! TableType . isStream ( table . getMaterializer ( ) . getTabletype ( ) ) ) { return true ; } } return false ; }
Return true if tableList includes at least one matview .
70
12
154,985
private boolean tableListIncludesExportOnly ( List < Table > tableList ) { // list of all export tables (assume uppercase) NavigableSet < String > exportTables = CatalogUtil . getExportTableNames ( m_catalogDb ) ; // this loop is O(number-of-joins * number-of-export-tables) // which seems acceptable if not great. Probably faster than // re-hashing the export only tables for faster lookup. for ( Table table : tableList ) { if ( exportTables . contains ( table . getTypeName ( ) ) && TableType . isStream ( table . getTabletype ( ) ) ) { return true ; } } return false ; }
Return true if tableList includes at least one export table .
153
12
154,986
private ParsedResultAccumulator getBestCostPlanForEphemeralScans ( List < StmtEphemeralTableScan > scans ) { int nextPlanId = m_planSelector . m_planId ; boolean orderIsDeterministic = true ; boolean hasSignificantOffsetOrLimit = false ; String contentNonDeterminismMessage = null ; for ( StmtEphemeralTableScan scan : scans ) { if ( scan instanceof StmtSubqueryScan ) { nextPlanId = planForParsedSubquery ( ( StmtSubqueryScan ) scan , nextPlanId ) ; // If we can't plan this, then give up. if ( ( ( StmtSubqueryScan ) scan ) . getBestCostPlan ( ) == null ) { return null ; } } else if ( scan instanceof StmtCommonTableScan ) { nextPlanId = planForCommonTableQuery ( ( StmtCommonTableScan ) scan , nextPlanId ) ; if ( ( ( StmtCommonTableScan ) scan ) . getBestCostBasePlan ( ) == null ) { return null ; } } else { throw new PlanningErrorException ( "Unknown scan plan type." ) ; } orderIsDeterministic = scan . isOrderDeterministic ( orderIsDeterministic ) ; contentNonDeterminismMessage = scan . contentNonDeterminismMessage ( contentNonDeterminismMessage ) ; hasSignificantOffsetOrLimit = scan . hasSignificantOffsetOrLimit ( hasSignificantOffsetOrLimit ) ; } // need to reset plan id for the entire SQL m_planSelector . m_planId = nextPlanId ; return new ParsedResultAccumulator ( orderIsDeterministic , hasSignificantOffsetOrLimit , contentNonDeterminismMessage ) ; }
Generate best cost plans for a list of derived tables which we call FROM sub - queries and common table queries .
377
23
154,987
private boolean getBestCostPlanForExpressionSubQueries ( Set < AbstractExpression > subqueryExprs ) { int nextPlanId = m_planSelector . m_planId ; for ( AbstractExpression expr : subqueryExprs ) { assert ( expr instanceof SelectSubqueryExpression ) ; if ( ! ( expr instanceof SelectSubqueryExpression ) ) { continue ; // DEAD CODE? } SelectSubqueryExpression subqueryExpr = ( SelectSubqueryExpression ) expr ; StmtSubqueryScan subqueryScan = subqueryExpr . getSubqueryScan ( ) ; nextPlanId = planForParsedSubquery ( subqueryScan , nextPlanId ) ; CompiledPlan bestPlan = subqueryScan . getBestCostPlan ( ) ; if ( bestPlan == null ) { return false ; } subqueryExpr . setSubqueryNode ( bestPlan . rootPlanGraph ) ; // The subquery plan must not contain Receive/Send nodes because it will be executed // multiple times during the parent statement execution. if ( bestPlan . rootPlanGraph . hasAnyNodeOfType ( PlanNodeType . SEND ) ) { // fail the whole plan m_recentErrorMsg = IN_EXISTS_SCALAR_ERROR_MESSAGE ; return false ; } } // need to reset plan id for the entire SQL m_planSelector . m_planId = nextPlanId ; return true ; }
Generate best cost plans for each Subquery expression from the list
311
13
154,988
private CompiledPlan getNextPlan ( ) { CompiledPlan retval ; AbstractParsedStmt nextStmt = null ; if ( m_parsedSelect != null ) { nextStmt = m_parsedSelect ; retval = getNextSelectPlan ( ) ; } else if ( m_parsedInsert != null ) { nextStmt = m_parsedInsert ; retval = getNextInsertPlan ( ) ; } else if ( m_parsedDelete != null ) { nextStmt = m_parsedDelete ; retval = getNextDeletePlan ( ) ; // note that for replicated tables, multi-fragment plans // need to divide the result by the number of partitions } else if ( m_parsedUpdate != null ) { nextStmt = m_parsedUpdate ; retval = getNextUpdatePlan ( ) ; } else if ( m_parsedUnion != null ) { nextStmt = m_parsedUnion ; retval = getNextUnionPlan ( ) ; } else if ( m_parsedSwap != null ) { nextStmt = m_parsedSwap ; retval = getNextSwapPlan ( ) ; } else if ( m_parsedMigrate != null ) { nextStmt = m_parsedMigrate ; retval = getNextMigratePlan ( ) ; } else { throw new RuntimeException ( "setupForNewPlans encountered unsupported statement type." ) ; } if ( retval == null || retval . rootPlanGraph == null ) { return null ; } assert ( nextStmt != null ) ; retval . setParameters ( nextStmt . getParameters ( ) ) ; return retval ; }
Generate a unique and correct plan for the current SQL statement context . This method gets called repeatedly until it returns null meaning there are no more plans .
374
30
154,989
private void connectChildrenBestPlans ( AbstractPlanNode parentPlan ) { if ( parentPlan instanceof AbstractScanPlanNode ) { AbstractScanPlanNode scanNode = ( AbstractScanPlanNode ) parentPlan ; StmtTableScan tableScan = scanNode . getTableScan ( ) ; if ( tableScan instanceof StmtSubqueryScan ) { CompiledPlan bestCostPlan = ( ( StmtSubqueryScan ) tableScan ) . getBestCostPlan ( ) ; assert ( bestCostPlan != null ) ; AbstractPlanNode subQueryRoot = bestCostPlan . rootPlanGraph ; subQueryRoot . disconnectParents ( ) ; scanNode . clearChildren ( ) ; scanNode . addAndLinkChild ( subQueryRoot ) ; } else if ( tableScan instanceof StmtCommonTableScan ) { assert ( parentPlan instanceof SeqScanPlanNode ) ; SeqScanPlanNode scanPlanNode = ( SeqScanPlanNode ) parentPlan ; StmtCommonTableScan cteScan = ( StmtCommonTableScan ) tableScan ; CompiledPlan bestCostBasePlan = cteScan . getBestCostBasePlan ( ) ; CompiledPlan bestCostRecursivePlan = cteScan . getBestCostRecursivePlan ( ) ; assert ( bestCostBasePlan != null ) ; AbstractPlanNode basePlanRoot = bestCostBasePlan . rootPlanGraph ; scanPlanNode . setCTEBaseNode ( basePlanRoot ) ; if ( bestCostRecursivePlan != null ) { // Either the CTE is not recursive, or this is a recursive CTE but we // got here during the planning of the recurse query when the recurse // query plan is still being worked on. AbstractPlanNode recursePlanRoot = bestCostRecursivePlan . rootPlanGraph ; assert ( basePlanRoot instanceof CommonTablePlanNode ) ; CommonTablePlanNode ctePlanNode = ( CommonTablePlanNode ) basePlanRoot ; ctePlanNode . setRecursiveNode ( recursePlanRoot ) ; } } } else { for ( int i = 0 ; i < parentPlan . getChildCount ( ) ; ++ i ) { connectChildrenBestPlans ( parentPlan . getChild ( i ) ) ; } } }
For each sub - query or CTE node in the plan tree attach the corresponding plans to the parent node .
466
22
154,990
private boolean needProjectionNode ( AbstractPlanNode root ) { if ( ! root . planNodeClassNeedsProjectionNode ( ) ) { return false ; } // If there is a complexGroupby at his point, it means that // display columns contain all the order by columns and // does not require another projection node on top of sort node. // If there is a complex aggregation case, the projection plan node is already added // right above the group by plan node. In future, we may inline that projection node. if ( m_parsedSelect . hasComplexGroupby ( ) || m_parsedSelect . hasComplexAgg ( ) ) { return false ; } if ( root instanceof AbstractReceivePlanNode && m_parsedSelect . hasPartitionColumnInGroupby ( ) ) { // Top aggregate has been removed, its schema is exactly the same to // its local aggregate node. return false ; } return true ; }
Return true if the plan referenced by root node needs a projection node appended to the top .
200
19
154,991
static private boolean deleteIsTruncate ( ParsedDeleteStmt stmt , AbstractPlanNode plan ) { if ( ! ( plan instanceof SeqScanPlanNode ) ) { return false ; } // Assume all index scans have filters in this context, so only consider seq scans. SeqScanPlanNode seqScanNode = ( SeqScanPlanNode ) plan ; if ( seqScanNode . getPredicate ( ) != null ) { return false ; } if ( stmt . hasLimitOrOffset ( ) ) { return false ; } return true ; }
Returns true if this DELETE can be executed in the EE as a truncate operation
118
18
154,992
private static AbstractPlanNode addCoordinatorToDMLNode ( AbstractPlanNode dmlRoot , boolean isReplicated ) { dmlRoot = SubPlanAssembler . addSendReceivePair ( dmlRoot ) ; AbstractPlanNode sumOrLimitNode ; if ( isReplicated ) { // Replicated table DML result doesn't need to be summed. All partitions should // modify the same number of tuples in replicated table, so just pick the result from // any partition. LimitPlanNode limitNode = new LimitPlanNode ( ) ; sumOrLimitNode = limitNode ; limitNode . setLimit ( 1 ) ; } else { // create the nodes being pushed on top of dmlRoot. AggregatePlanNode countNode = new AggregatePlanNode ( ) ; sumOrLimitNode = countNode ; // configure the count aggregate (sum) node to produce a single // output column containing the result of the sum. // Create a TVE that should match the tuple count input column // This TVE is magic. // really really need to make this less hard-wired TupleValueExpression count_tve = new TupleValueExpression ( AbstractParsedStmt . TEMP_TABLE_NAME , AbstractParsedStmt . TEMP_TABLE_NAME , "modified_tuples" , "modified_tuples" , 0 ) ; count_tve . setValueType ( VoltType . BIGINT ) ; count_tve . setValueSize ( VoltType . BIGINT . getLengthInBytesForFixedTypes ( ) ) ; countNode . addAggregate ( ExpressionType . AGGREGATE_SUM , false , 0 , count_tve ) ; // The output column. Not really based on a TVE (it is really the // count expression represented by the count configured above). But // this is sufficient for now. This looks identical to the above // TVE but it's logically different so we'll create a fresh one. TupleValueExpression tve = new TupleValueExpression ( AbstractParsedStmt . TEMP_TABLE_NAME , AbstractParsedStmt . TEMP_TABLE_NAME , "modified_tuples" , "modified_tuples" , 0 ) ; tve . setValueType ( VoltType . BIGINT ) ; tve . setValueSize ( VoltType . BIGINT . getLengthInBytesForFixedTypes ( ) ) ; NodeSchema count_schema = new NodeSchema ( ) ; count_schema . addColumn ( AbstractParsedStmt . TEMP_TABLE_NAME , AbstractParsedStmt . TEMP_TABLE_NAME , "modified_tuples" , "modified_tuples" , tve ) ; countNode . setOutputSchema ( count_schema ) ; } // connect the nodes to build the graph sumOrLimitNode . addAndLinkChild ( dmlRoot ) ; SendPlanNode sendNode = new SendPlanNode ( ) ; sendNode . addAndLinkChild ( sumOrLimitNode ) ; return sendNode ; }
Add a receive node a sum or limit node and a send node to the given DML node . If the DML target is a replicated table it will add a limit node otherwise it adds a sum node .
652
42
154,993
private static OrderByPlanNode buildOrderByPlanNode ( List < ParsedColInfo > cols ) { OrderByPlanNode n = new OrderByPlanNode ( ) ; for ( ParsedColInfo col : cols ) { n . addSortExpression ( col . m_expression , col . m_ascending ? SortDirectionType . ASC : SortDirectionType . DESC ) ; } return n ; }
Given a list of ORDER BY columns construct and return an OrderByPlanNode .
90
16
154,994
private static AbstractPlanNode handleOrderBy ( AbstractParsedStmt parsedStmt , AbstractPlanNode root ) { assert ( parsedStmt instanceof ParsedSelectStmt || parsedStmt instanceof ParsedUnionStmt || parsedStmt instanceof ParsedDeleteStmt ) ; if ( ! isOrderByNodeRequired ( parsedStmt , root ) ) { return root ; } OrderByPlanNode orderByNode = buildOrderByPlanNode ( parsedStmt . orderByColumns ( ) ) ; orderByNode . addAndLinkChild ( root ) ; return orderByNode ; }
Create an order by node as required by the statement and make it a parent of root .
126
18
154,995
private AbstractPlanNode handleSelectLimitOperator ( AbstractPlanNode root ) { // The coordinator's top limit graph fragment for a MP plan. // If planning "order by ... limit", getNextSelectPlan() // will have already added an order by to the coordinator frag. // This is the only limit node in a SP plan LimitPlanNode topLimit = m_parsedSelect . getLimitNodeTop ( ) ; assert ( topLimit != null ) ; /* * TODO: allow push down limit with distinct (select distinct C from T limit 5) * , DISTINCT in aggregates and DISTINCT PUSH DOWN with partition column included. */ AbstractPlanNode sendNode = null ; // Whether or not we can push the limit node down boolean canPushDown = ! m_parsedSelect . hasDistinctWithGroupBy ( ) ; if ( canPushDown ) { sendNode = checkLimitPushDownViability ( root ) ; if ( sendNode == null ) { canPushDown = false ; } else { canPushDown = m_parsedSelect . getCanPushdownLimit ( ) ; } } if ( m_parsedSelect . m_mvFixInfo . needed ( ) ) { // Do not push down limit for mv based distributed query. canPushDown = false ; } /* * Push down the limit plan node when possible even if offset is set. If * the plan is for a partitioned table, do the push down. Otherwise, * there is no need to do the push down work, the limit plan node will * be run in the partition. */ if ( canPushDown ) { /* * For partitioned table, the pushed-down limit plan node has a limit based * on the combined limit and offset, which may require an expression if either of these * was not a hard-coded constant and didn't get parameterized. * The top level limit plan node remains the same, with the original limit and offset values. */ LimitPlanNode distLimit = m_parsedSelect . getLimitNodeDist ( ) ; // Disconnect the distributed parts of the plan below the SEND node AbstractPlanNode distributedPlan = sendNode . getChild ( 0 ) ; distributedPlan . clearParents ( ) ; sendNode . clearChildren ( ) ; // If the distributed limit must be performed on ordered input, // ensure the order of the data on each partition. if ( m_parsedSelect . hasOrderByColumns ( ) ) { distributedPlan = handleOrderBy ( m_parsedSelect , distributedPlan ) ; } if ( isInlineLimitPlanNodePossible ( distributedPlan ) ) { // Inline the distributed limit. distributedPlan . addInlinePlanNode ( distLimit ) ; sendNode . addAndLinkChild ( distributedPlan ) ; } else { distLimit . addAndLinkChild ( distributedPlan ) ; // Add the distributed work back to the plan sendNode . addAndLinkChild ( distLimit ) ; } } // In future, inline LIMIT for join, Receive // Then we do not need to distinguish the order by node. return inlineLimitOperator ( root , topLimit ) ; }
Add a limit pushed - down if possible and return the new root .
663
14
154,996
private AbstractPlanNode handleUnionLimitOperator ( AbstractPlanNode root ) { // The coordinator's top limit graph fragment for a MP plan. // If planning "order by ... limit", getNextUnionPlan() // will have already added an order by to the coordinator frag. // This is the only limit node in a SP plan LimitPlanNode topLimit = m_parsedUnion . getLimitNodeTop ( ) ; assert ( topLimit != null ) ; return inlineLimitOperator ( root , topLimit ) ; }
Add a limit and return the new root .
108
9
154,997
private AbstractPlanNode inlineLimitOperator ( AbstractPlanNode root , LimitPlanNode topLimit ) { if ( isInlineLimitPlanNodePossible ( root ) ) { root . addInlinePlanNode ( topLimit ) ; } else if ( root instanceof ProjectionPlanNode && isInlineLimitPlanNodePossible ( root . getChild ( 0 ) ) ) { // In future, inlined this projection node for OrderBy and Aggregate // Then we could delete this ELSE IF block. root . getChild ( 0 ) . addInlinePlanNode ( topLimit ) ; } else { topLimit . addAndLinkChild ( root ) ; root = topLimit ; } return root ; }
Inline Limit plan node if possible
147
7
154,998
static private boolean isInlineLimitPlanNodePossible ( AbstractPlanNode pn ) { if ( pn instanceof OrderByPlanNode || pn . getPlanNodeType ( ) == PlanNodeType . AGGREGATE ) { return true ; } return false ; }
Inline limit plan node can be applied with ORDER BY node and serial aggregation node
58
16
154,999
private boolean switchToIndexScanForGroupBy ( AbstractPlanNode candidate , IndexGroupByInfo gbInfo ) { if ( ! m_parsedSelect . isGrouped ( ) ) { return false ; } if ( candidate instanceof IndexScanPlanNode ) { calculateIndexGroupByInfo ( ( IndexScanPlanNode ) candidate , gbInfo ) ; if ( gbInfo . m_coveredGroupByColumns != null && ! gbInfo . m_coveredGroupByColumns . isEmpty ( ) ) { // The candidate index does cover all or some // of the GROUP BY columns and can be serialized gbInfo . m_indexAccess = candidate ; return true ; } return false ; } AbstractPlanNode sourceSeqScan = findSeqScanCandidateForGroupBy ( candidate ) ; if ( sourceSeqScan == null ) { return false ; } assert ( sourceSeqScan instanceof SeqScanPlanNode ) ; AbstractPlanNode parent = null ; if ( sourceSeqScan . getParentCount ( ) > 0 ) { parent = sourceSeqScan . getParent ( 0 ) ; } AbstractPlanNode indexAccess = indexAccessForGroupByExprs ( ( SeqScanPlanNode ) sourceSeqScan , gbInfo ) ; if ( indexAccess . getPlanNodeType ( ) != PlanNodeType . INDEXSCAN ) { // does not find proper index to replace sequential scan return false ; } gbInfo . m_indexAccess = indexAccess ; if ( parent != null ) { // have a parent and would like to replace // the sequential scan with an index scan indexAccess . clearParents ( ) ; // For two children join node, index 0 is its outer side parent . replaceChild ( 0 , indexAccess ) ; return false ; } // parent is null and switched to index scan from sequential scan return true ; }
For a seqscan feeding a GROUP BY consider substituting an IndexScan that pre - sorts by the GROUP BY keys . If a candidate is already an indexscan simply calculate GROUP BY column coverage
392
38