idx
int64 0
165k
| question
stringlengths 73
4.15k
| target
stringlengths 5
918
| len_question
int64 21
890
| len_target
int64 3
255
|
|---|---|---|---|---|
155,200
|
private KafkaInternalConsumerRunner createConsumerRunner ( Properties properties ) throws Exception { ClassLoader previous = Thread . currentThread ( ) . getContextClassLoader ( ) ; Thread . currentThread ( ) . setContextClassLoader ( getClass ( ) . getClassLoader ( ) ) ; try { Consumer < ByteBuffer , ByteBuffer > consumer = new KafkaConsumer <> ( properties ) ; return new KafkaInternalConsumerRunner ( this , m_config , consumer ) ; } finally { Thread . currentThread ( ) . setContextClassLoader ( previous ) ; } }
|
Create a Kafka consumer and runner .
| 114
| 7
|
155,201
|
void createSchema ( HsqlName name , Grantee owner ) { SqlInvariants . checkSchemaNameNotSystem ( name . name ) ; Schema schema = new Schema ( name , owner ) ; schemaMap . add ( name . name , schema ) ; }
|
Creates a schema belonging to the given grantee .
| 59
| 11
|
155,202
|
public HsqlName getSchemaHsqlName ( String name ) { if ( name == null ) { return defaultSchemaHsqlName ; } if ( SqlInvariants . INFORMATION_SCHEMA . equals ( name ) ) { return SqlInvariants . INFORMATION_SCHEMA_HSQLNAME ; } Schema schema = ( ( Schema ) schemaMap . get ( name ) ) ; if ( schema == null ) { throw Error . error ( ErrorCode . X_3F000 , name ) ; } return schema . name ; }
|
If schemaName is null return the default schema name else return the HsqlName object for the schema . If schemaName does not exist throw .
| 118
| 29
|
155,203
|
boolean isSchemaAuthorisation ( Grantee grantee ) { Iterator schemas = allSchemaNameIterator ( ) ; while ( schemas . hasNext ( ) ) { String schemaName = ( String ) schemas . next ( ) ; if ( grantee . equals ( toSchemaOwner ( schemaName ) ) ) { return true ; } } return false ; }
|
is a grantee the authorization of any schema
| 79
| 9
|
155,204
|
void dropSchemas ( Grantee grantee , boolean cascade ) { HsqlArrayList list = getSchemas ( grantee ) ; Iterator it = list . iterator ( ) ; while ( it . hasNext ( ) ) { Schema schema = ( Schema ) it . next ( ) ; dropSchema ( schema . name . name , cascade ) ; } }
|
drop all schemas with the given authorisation
| 77
| 9
|
155,205
|
public HsqlArrayList getAllTables ( ) { Iterator schemas = allSchemaNameIterator ( ) ; HsqlArrayList alltables = new HsqlArrayList ( ) ; while ( schemas . hasNext ( ) ) { String name = ( String ) schemas . next ( ) ; HashMappedList current = getTables ( name ) ; alltables . addAll ( current . values ( ) ) ; } return alltables ; }
|
Returns an HsqlArrayList containing references to all non - system tables and views . This includes all tables and views registered with this Database .
| 99
| 28
|
155,206
|
public Table getTable ( Session session , String name , String schema ) { Table t = null ; if ( schema == null ) { t = findSessionTable ( session , name , schema ) ; } if ( t == null ) { schema = session . getSchemaName ( schema ) ; t = findUserTable ( session , name , schema ) ; } if ( t == null ) { if ( SqlInvariants . INFORMATION_SCHEMA . equals ( schema ) && database . dbInfo != null ) { t = database . dbInfo . getSystemTable ( session , name ) ; } } if ( t == null ) { throw Error . error ( ErrorCode . X_42501 , name ) ; } return t ; }
|
Returns the specified user - defined table or view visible within the context of the specified Session or any system table of the given name . It excludes any temp tables created in other Sessions . Throws if the table does not exist in the context .
| 154
| 48
|
155,207
|
public Table getUserTable ( Session session , String name , String schema ) { Table t = findUserTable ( session , name , schema ) ; if ( t == null ) { throw Error . error ( ErrorCode . X_42501 , name ) ; } return t ; }
|
Returns the specified user - defined table or view visible within the context of the specified Session . It excludes system tables and any temp tables created in different Sessions . Throws if the table does not exist in the context .
| 58
| 43
|
155,208
|
public Table findUserTable ( Session session , String name , String schemaName ) { Schema schema = ( Schema ) schemaMap . get ( schemaName ) ; if ( schema == null ) { return null ; } if ( session != null ) { Table table = session . getLocalTable ( name ) ; if ( table != null ) { return table ; } } int i = schema . tableList . getIndex ( name ) ; if ( i == - 1 ) { return null ; } return ( Table ) schema . tableList . get ( i ) ; }
|
Returns the specified user - defined table or view visible within the context of the specified schema . It excludes system tables . Returns null if the table does not exist in the context .
| 117
| 35
|
155,209
|
public Table findSessionTable ( Session session , String name , String schemaName ) { return session . findSessionTable ( name ) ; }
|
Returns the specified session context table . Returns null if the table does not exist in the context .
| 28
| 19
|
155,210
|
void dropTableOrView ( Session session , Table table , boolean cascade ) { // ft - concurrent session . commit ( false ) ; if ( table . isView ( ) ) { removeSchemaObject ( table . getName ( ) , cascade ) ; } else { dropTable ( session , table , cascade ) ; } }
|
Drops the specified user - defined view or table from this Database object .
| 67
| 15
|
155,211
|
int getTableIndex ( Table table ) { Schema schema = ( Schema ) schemaMap . get ( table . getSchemaName ( ) . name ) ; if ( schema == null ) { return - 1 ; } HsqlName name = table . getName ( ) ; return schema . tableList . getIndex ( name . name ) ; }
|
Returns index of a table or view in the HashMappedList that contains the table objects for this Database .
| 73
| 22
|
155,212
|
void recompileDependentObjects ( Table table ) { OrderedHashSet set = getReferencingObjects ( table . getName ( ) ) ; Session session = database . sessionManager . getSysSession ( ) ; for ( int i = 0 ; i < set . size ( ) ; i ++ ) { HsqlName name = ( HsqlName ) set . get ( i ) ; switch ( name . type ) { case SchemaObject . VIEW : case SchemaObject . CONSTRAINT : case SchemaObject . ASSERTION : SchemaObject object = getSchemaObject ( name ) ; object . compile ( session ) ; break ; } } HsqlArrayList list = getAllTables ( ) ; for ( int i = 0 ; i < list . size ( ) ; i ++ ) { Table t = ( Table ) list . get ( i ) ; t . updateConstraintPath ( ) ; } }
|
After addition or removal of columns and indexes all views that reference the table should be recompiled .
| 197
| 19
|
155,213
|
Table findUserTableForIndex ( Session session , String name , String schemaName ) { Schema schema = ( Schema ) schemaMap . get ( schemaName ) ; HsqlName indexName = schema . indexLookup . getName ( name ) ; if ( indexName == null ) { return null ; } return findUserTable ( session , indexName . parent . name , schemaName ) ; }
|
Returns the table that has an index with the given name and schema .
| 84
| 14
|
155,214
|
public HsqlName getSchemaHsqlNameNoThrow ( String name , HsqlName defaultName ) { if ( name == null ) { return defaultSchemaHsqlName ; } if ( SqlInvariants . INFORMATION_SCHEMA . equals ( name ) ) { return SqlInvariants . INFORMATION_SCHEMA_HSQLNAME ; } Schema schema = ( ( Schema ) schemaMap . get ( name ) ) ; if ( schema == null ) { return defaultName ; } return schema . name ; }
|
If schemaName is null return the default schema name else return the HsqlName object for the schema . If schemaName does not exist return the defaultName provided . Not throwing the usual exception saves some throw - then - catch nonsense in the usual session setup .
| 113
| 52
|
155,215
|
public InitiateResponseMessage dedupe ( long inUniqueId , TransactionInfoBaseMessage in ) { if ( in instanceof Iv2InitiateTaskMessage ) { final Iv2InitiateTaskMessage init = ( Iv2InitiateTaskMessage ) in ; final StoredProcedureInvocation invocation = init . getStoredProcedureInvocation ( ) ; final String procName = invocation . getProcName ( ) ; /* * Ning - @LoadSinglepartTable and @LoadMultipartTable always have the same txnId * which is the txnId of the snapshot. */ if ( ! ( procName . equalsIgnoreCase ( "@LoadSinglepartitionTable" ) || procName . equalsIgnoreCase ( "@LoadMultipartitionTable" ) ) && inUniqueId <= m_lastSeenUniqueId ) { // already sequenced final InitiateResponseMessage resp = new InitiateResponseMessage ( init ) ; resp . setResults ( new ClientResponseImpl ( ClientResponseImpl . UNEXPECTED_FAILURE , new VoltTable [ 0 ] , ClientResponseImpl . IGNORED_TRANSACTION ) ) ; return resp ; } } return null ; }
|
Dedupe initiate task messages . Check if the initiate task message is seen before .
| 252
| 17
|
155,216
|
public void updateLastSeenUniqueId ( long inUniqueId , TransactionInfoBaseMessage in ) { if ( in instanceof Iv2InitiateTaskMessage && inUniqueId > m_lastSeenUniqueId ) { m_lastSeenUniqueId = inUniqueId ; } }
|
Update the last seen uniqueId for this partition if it s an initiate task message .
| 61
| 17
|
155,217
|
public VoltMessage poll ( ) { if ( m_mustDrain || m_replayEntries . isEmpty ( ) ) { return null ; } if ( m_replayEntries . firstEntry ( ) . getValue ( ) . isEmpty ( ) ) { m_replayEntries . pollFirstEntry ( ) ; } // All the drain conditions depend on being blocked, which // we will only really know for sure when we try to poll(). checkDrainCondition ( ) ; if ( m_mustDrain || m_replayEntries . isEmpty ( ) ) { return null ; } VoltMessage m = m_replayEntries . firstEntry ( ) . getValue ( ) . poll ( ) ; updateLastPolledUniqueId ( m_replayEntries . firstEntry ( ) . getKey ( ) , ( TransactionInfoBaseMessage ) m ) ; return m ; }
|
Return the next correctly sequenced message or null if none exists .
| 190
| 13
|
155,218
|
public boolean offer ( long inUniqueId , TransactionInfoBaseMessage in ) { ReplayEntry found = m_replayEntries . get ( inUniqueId ) ; if ( in instanceof Iv2EndOfLogMessage ) { m_mpiEOLReached = true ; return true ; } if ( in instanceof MultiPartitionParticipantMessage ) { //-------------------------------------------- // DRv1 path, mark for future removal /* * DR sends multiple @LoadMultipartitionTable proc calls with the * same txnId, which is the snapshot txnId. For each partition, * there is a sentinel paired with the @LoadMultipartitionTable * call. Dedupe the sentinels the same way as we dedupe fragments, * so that there won't be sentinels end up in the sequencer where * matching fragments are deduped. */ if ( inUniqueId <= m_lastPolledFragmentUniqueId ) { return true ; } //-------------------------------------------- if ( found == null ) { ReplayEntry newEntry = new ReplayEntry ( ) ; newEntry . m_sentinelUniqueId = inUniqueId ; m_replayEntries . put ( inUniqueId , newEntry ) ; } else { found . m_sentinelUniqueId = inUniqueId ; assert ( found . isReady ( ) ) ; } } else if ( in instanceof FragmentTaskMessage ) { // already sequenced if ( inUniqueId <= m_lastPolledFragmentUniqueId ) { return false ; } FragmentTaskMessage ftm = ( FragmentTaskMessage ) in ; if ( found == null ) { ReplayEntry newEntry = new ReplayEntry ( ) ; newEntry . m_firstFragment = ftm ; m_replayEntries . put ( inUniqueId , newEntry ) ; } else if ( found . m_firstFragment == null ) { found . m_firstFragment = ftm ; assert ( found . isReady ( ) ) ; } else { found . addQueuedMessage ( ftm ) ; } } else if ( in instanceof CompleteTransactionMessage ) { // don't sequence CompleteTranscationMessage, throw them to scheduler directly return false ; } else { //-------------------------------------------- // DRv1 path, mark for future removal if ( dedupe ( inUniqueId , in ) != null ) { // Ignore an already seen txn return true ; } //-------------------------------------------- updateLastSeenUniqueId ( inUniqueId , in ) ; if ( m_replayEntries . isEmpty ( ) || ! m_replayEntries . lastEntry ( ) . getValue ( ) . hasSentinel ( ) ) { // not-blocked work; rejected and not queued. return false ; } else { // queued the message with the newest replayEntry m_replayEntries . lastEntry ( ) . getValue ( ) . addQueuedMessage ( in ) ; } } return true ; }
|
Offer a new message . Return false if the offered message can be run immediately .
| 622
| 17
|
155,219
|
private void verifyDataCapacity ( int size ) { if ( size + 4 > m_dataNetwork . capacity ( ) ) { m_dataNetworkOrigin . discard ( ) ; m_dataNetworkOrigin = org . voltcore . utils . DBBPool . allocateDirect ( size + 4 ) ; m_dataNetwork = m_dataNetworkOrigin . b ( ) ; m_dataNetwork . position ( 4 ) ; m_data = m_dataNetwork . slice ( ) ; } }
|
private int m_counter ;
| 103
| 6
|
155,220
|
public void initialize ( final int clusterIndex , final long siteId , final int partitionId , final int sitesPerHost , final int hostId , final String hostname , final int drClusterId , final int defaultDrBufferSize , final long tempTableMemory , final HashinatorConfig hashinatorConfig , final boolean createDrReplicatedStream , final long exportFlushTimeout ) { synchronized ( printLockObject ) { System . out . println ( "Initializing an IPC EE " + this + " for hostId " + hostId + " siteId " + siteId + " from thread " + Thread . currentThread ( ) . getId ( ) ) ; } int result = ExecutionEngine . ERRORCODE_ERROR ; m_data . clear ( ) ; m_data . putInt ( Commands . Initialize . m_id ) ; m_data . putInt ( clusterIndex ) ; m_data . putLong ( siteId ) ; m_data . putInt ( partitionId ) ; m_data . putInt ( sitesPerHost ) ; m_data . putInt ( hostId ) ; m_data . putInt ( drClusterId ) ; m_data . putInt ( defaultDrBufferSize ) ; m_data . putLong ( EELoggers . getLogLevels ( ) ) ; m_data . putLong ( tempTableMemory ) ; m_data . putInt ( createDrReplicatedStream ? 1 : 0 ) ; m_data . putInt ( ( short ) hostname . length ( ) ) ; m_data . put ( hostname . getBytes ( Charsets . UTF_8 ) ) ; try { m_data . flip ( ) ; m_connection . write ( ) ; result = m_connection . readStatusByte ( ) ; } catch ( final IOException e ) { System . out . println ( "Exception: " + e . getMessage ( ) ) ; throw new RuntimeException ( e ) ; } checkErrorCode ( result ) ; updateHashinator ( hashinatorConfig ) ; }
|
the abstract api assumes construction initializes but here initialization is just another command .
| 432
| 15
|
155,221
|
@ Override protected void coreLoadCatalog ( final long timestamp , final byte [ ] catalogBytes ) throws EEException { int result = ExecutionEngine . ERRORCODE_ERROR ; verifyDataCapacity ( catalogBytes . length + 100 ) ; m_data . clear ( ) ; m_data . putInt ( Commands . LoadCatalog . m_id ) ; m_data . putLong ( timestamp ) ; m_data . put ( catalogBytes ) ; m_data . put ( ( byte ) ' ' ) ; try { m_data . flip ( ) ; m_connection . write ( ) ; result = m_connection . readStatusByte ( ) ; } catch ( final IOException e ) { System . out . println ( "Exception: " + e . getMessage ( ) ) ; throw new RuntimeException ( e ) ; } checkErrorCode ( result ) ; }
|
write the catalog as a UTF - 8 byte string via connection
| 182
| 12
|
155,222
|
@ Override public void coreUpdateCatalog ( final long timestamp , final boolean isStreamUpdate , final String catalogDiffs ) throws EEException { int result = ExecutionEngine . ERRORCODE_ERROR ; try { final byte catalogBytes [ ] = catalogDiffs . getBytes ( "UTF-8" ) ; verifyDataCapacity ( catalogBytes . length + 100 ) ; m_data . clear ( ) ; m_data . putInt ( Commands . UpdateCatalog . m_id ) ; m_data . putLong ( timestamp ) ; m_data . putInt ( isStreamUpdate ? 1 : 0 ) ; m_data . put ( catalogBytes ) ; m_data . put ( ( byte ) ' ' ) ; } catch ( final UnsupportedEncodingException ex ) { Logger . getLogger ( ExecutionEngineIPC . class . getName ( ) ) . log ( Level . SEVERE , null , ex ) ; } try { m_data . flip ( ) ; m_connection . write ( ) ; result = m_connection . readStatusByte ( ) ; } catch ( final IOException e ) { System . out . println ( "Exception: " + e . getMessage ( ) ) ; throw new RuntimeException ( e ) ; } checkErrorCode ( result ) ; }
|
write the diffs as a UTF - 8 byte string via connection
| 272
| 13
|
155,223
|
private void sendDependencyTable ( final int dependencyId ) throws IOException { final byte [ ] dependencyBytes = nextDependencyAsBytes ( dependencyId ) ; if ( dependencyBytes == null ) { m_connection . m_socket . getOutputStream ( ) . write ( Connection . kErrorCode_DependencyNotFound ) ; return ; } // 1 for response code + 4 for dependency length prefix + dependencyBytes.length final ByteBuffer message = ByteBuffer . allocate ( 1 + 4 + dependencyBytes . length ) ; // write the response code message . put ( ( byte ) Connection . kErrorCode_DependencyFound ) ; // write the dependency's length prefix message . putInt ( dependencyBytes . length ) ; // finally, write dependency table itself message . put ( dependencyBytes ) ; message . rewind ( ) ; if ( m_connection . m_socketChannel . write ( message ) != message . capacity ( ) ) { throw new IOException ( "Unable to send dependency table to client. Attempted blocking write of " + message . capacity ( ) + " but not all of it was written" ) ; } }
|
Retrieve a dependency table and send it via the connection . If no table is available send a response code indicating such . The message is prepended with two lengths . One length is for the network layer and is the size of the whole message not including the length prefix .
| 239
| 54
|
155,224
|
boolean enableScoreboard ( ) { assert ( s_barrier != null ) ; try { s_barrier . await ( 3L , TimeUnit . MINUTES ) ; } catch ( InterruptedException | BrokenBarrierException | TimeoutException e ) { hostLog . error ( "Cannot re-enable the scoreboard." ) ; s_barrier . reset ( ) ; return false ; } m_scoreboardEnabled = true ; if ( hostLog . isDebugEnabled ( ) ) { hostLog . debug ( "Scoreboard has been enabled." ) ; } return true ; }
|
After all sites has been fully initialized and ready for snapshot we should enable the scoreboard .
| 124
| 17
|
155,225
|
synchronized void offer ( TransactionTask task ) { Iv2Trace . logTransactionTaskQueueOffer ( task ) ; TransactionState txnState = task . getTransactionState ( ) ; if ( ! m_backlog . isEmpty ( ) ) { /* * This branch happens during regular execution when a multi-part is in progress. * The first task for the multi-part is the head of the queue, and all the single parts * are being queued behind it. The txnid check catches tasks that are part of the multi-part * and immediately queues them for execution. If any multi-part txn with smaller txnId shows up, * it must from repair process, just let it through. */ if ( txnState . isSinglePartition ( ) ) { m_backlog . addLast ( task ) ; return ; } //It is possible a RO MP read with higher TxnId could be executed before a RO MP reader with lower TxnId //so do not offer them to the site task queue in the same time, place it in the backlog instead. However, //if it is an MP Write with a lower TxnId than the TxnId at the head of the backlog it could be a repair //task so put the MP Write task into the Scoreboard or the SiteTaskQueue TransactionTask headTask = m_backlog . getFirst ( ) ; if ( txnState . isReadOnly ( ) && headTask . getTransactionState ( ) . isReadOnly ( ) ? TxnEgo . getSequence ( task . getTxnId ( ) ) != TxnEgo . getSequence ( headTask . getTxnId ( ) ) : TxnEgo . getSequence ( task . getTxnId ( ) ) > TxnEgo . getSequence ( headTask . getTxnId ( ) ) ) { m_backlog . addLast ( task ) ; } else if ( task . needCoordination ( ) && m_scoreboardEnabled ) { /* * This branch coordinates FragmentTask or CompletedTransactionTask, * holds the tasks until all the sites on the node receive the task. * Task with newer spHandle will */ coordinatedTaskQueueOffer ( task ) ; } else { taskQueueOffer ( task ) ; } } else { /* * Base case nothing queued nothing in progress * If the task is a multipart then put an entry in the backlog which * will act as a barrier for single parts, queuing them for execution after the * multipart */ if ( ! txnState . isSinglePartition ( ) ) { m_backlog . addLast ( task ) ; } /* * This branch coordinates FragmentTask or CompletedTransactionTask, * holds the tasks until all the sites on the node receive the task. * Task with newer spHandle will */ if ( task . needCoordination ( ) && m_scoreboardEnabled ) { coordinatedTaskQueueOffer ( task ) ; } else { taskQueueOffer ( task ) ; } } }
|
If necessary stick this task in the backlog . Many network threads may be racing to reach here synchronize to serialize queue order
| 647
| 25
|
155,226
|
synchronized int flush ( long txnId ) { if ( tmLog . isDebugEnabled ( ) ) { tmLog . debug ( "Flush backlog with txnId:" + TxnEgo . txnIdToString ( txnId ) + ", backlog head txnId is:" + ( m_backlog . isEmpty ( ) ? "empty" : TxnEgo . txnIdToString ( m_backlog . getFirst ( ) . getTxnId ( ) ) ) ) ; } int offered = 0 ; // If the first entry of the backlog is a completed transaction, clear it so it no longer // blocks the backlog then iterate the backlog for more work. // // Note the kooky corner case where a multi-part transaction can actually have multiple outstanding // tasks. At first glance you would think that because the relationship is request response there // can be only one outstanding task for a given multi-part transaction. // // That isn't true. // // A rollback can cause there to be a fragment task as well as a rollback // task. The rollback is generated asynchronously by another partition. // If we don't flush all the associated tasks now then flush won't be called again because it is waiting // for the complete transaction task that is languishing in the queue to do the flush post multi-part. // It can't be called eagerly because that would destructively flush single parts as well. if ( m_backlog . isEmpty ( ) || ! m_backlog . getFirst ( ) . getTransactionState ( ) . isDone ( ) ) { return offered ; } // Add a guard to protect the scenario that backlog been flushed multiple times for same txnId if ( m_backlog . getFirst ( ) . getTxnId ( ) != txnId ) { return offered ; } m_backlog . removeFirst ( ) ; Iterator < TransactionTask > iter = m_backlog . iterator ( ) ; while ( iter . hasNext ( ) ) { TransactionTask task = iter . next ( ) ; long lastQueuedTxnId = task . getTxnId ( ) ; if ( task . needCoordination ( ) && m_scoreboardEnabled ) { coordinatedTaskQueueOffer ( task ) ; } else { taskQueueOffer ( task ) ; } ++ offered ; if ( task . getTransactionState ( ) . isSinglePartition ( ) ) { // single part can be immediately removed and offered iter . remove ( ) ; continue ; } else { // leave the mp fragment at the head of the backlog but // iterate and take care of the kooky case explained above. while ( iter . hasNext ( ) ) { task = iter . next ( ) ; if ( task . getTxnId ( ) == lastQueuedTxnId ) { iter . remove ( ) ; if ( task . needCoordination ( ) && m_scoreboardEnabled ) { coordinatedTaskQueueOffer ( task ) ; } else { taskQueueOffer ( task ) ; } ++ offered ; } } break ; } } return offered ; }
|
Try to offer as many runnable Tasks to the SiteTaskerQueue as possible .
| 657
| 19
|
155,227
|
public synchronized List < TransactionTask > getBacklogTasks ( ) { List < TransactionTask > pendingTasks = new ArrayList <> ( ) ; Iterator < TransactionTask > iter = m_backlog . iterator ( ) ; // skip the first fragments which is streaming snapshot TransactionTask mpTask = iter . next ( ) ; assert ( ! mpTask . getTransactionState ( ) . isSinglePartition ( ) ) ; while ( iter . hasNext ( ) ) { TransactionTask task = iter . next ( ) ; // Skip all fragments of current transaction if ( task . getTxnId ( ) == mpTask . getTxnId ( ) ) { continue ; } assert ( task . getTransactionState ( ) . isSinglePartition ( ) ) ; pendingTasks . add ( task ) ; } return pendingTasks ; }
|
Called from streaming snapshot execution
| 175
| 6
|
155,228
|
public synchronized void removeMPReadTransactions ( ) { TransactionTask task = m_backlog . peekFirst ( ) ; while ( task != null && task . getTransactionState ( ) . isReadOnly ( ) ) { task . getTransactionState ( ) . setDone ( ) ; flush ( task . getTxnId ( ) ) ; task = m_backlog . peekFirst ( ) ; } }
|
flush mp readonly transactions out of backlog
| 86
| 8
|
155,229
|
public List < List < GeographyPointValue > > getRings ( ) { /* * Gets the loops that make up the polygon, with the outer loop first. * Note that we need to convert from XYZPoint to GeographyPointValue. * * Include the loop back to the first vertex. Also, since WKT wants * holes oriented Clockwise and S2 wants everything oriented CounterClockWise, * reverse the order of holes. We take care to leave the first vertex * the same. */ List < List < GeographyPointValue >> llLoops = new ArrayList <> ( ) ; boolean isShell = true ; for ( List < XYZPoint > xyzLoop : m_loops ) { List < GeographyPointValue > llLoop = new ArrayList <> ( ) ; // Add the first of xyzLoop first. llLoop . add ( xyzLoop . get ( 0 ) . toGeographyPointValue ( ) ) ; // Add shells left to right, and holes right to left. Make sure // not to add the first element we just added. int startIdx = ( isShell ? 1 : xyzLoop . size ( ) - 1 ) ; int endIdx = ( isShell ? xyzLoop . size ( ) : 0 ) ; int delta = ( isShell ? 1 : - 1 ) ; for ( int idx = startIdx ; idx != endIdx ; idx += delta ) { XYZPoint xyz = xyzLoop . get ( idx ) ; llLoop . add ( xyz . toGeographyPointValue ( ) ) ; } // Close the loop. llLoop . add ( xyzLoop . get ( 0 ) . toGeographyPointValue ( ) ) ; llLoops . add ( llLoop ) ; isShell = false ; } return llLoops ; }
|
Return the list of rings of a polygon . The list has the same values as the list of rings used to construct the polygon or the sequence of WKT rings used to construct the polygon .
| 389
| 41
|
155,230
|
public String toWKT ( ) { StringBuffer sb = new StringBuffer ( ) ; sb . append ( "POLYGON (" ) ; boolean isFirstLoop = true ; for ( List < XYZPoint > loop : m_loops ) { if ( ! isFirstLoop ) { sb . append ( ", " ) ; } sb . append ( "(" ) ; int startIdx = ( isFirstLoop ? 1 : loop . size ( ) - 1 ) ; int endIdx = ( isFirstLoop ? loop . size ( ) : 0 ) ; int increment = ( isFirstLoop ? 1 : - 1 ) ; sb . append ( loop . get ( 0 ) . toGeographyPointValue ( ) . formatLngLat ( ) ) . append ( ", " ) ; for ( int idx = startIdx ; idx != endIdx ; idx += increment ) { XYZPoint xyz = loop . get ( idx ) ; sb . append ( xyz . toGeographyPointValue ( ) . formatLngLat ( ) ) ; sb . append ( ", " ) ; } // Repeat the start vertex to close the loop as WKT requires. sb . append ( loop . get ( 0 ) . toGeographyPointValue ( ) . formatLngLat ( ) ) ; sb . append ( ")" ) ; isFirstLoop = false ; } sb . append ( ")" ) ; return sb . toString ( ) ; }
|
Return a representation of this object as well - known text .
| 318
| 12
|
155,231
|
public int getLengthInBytes ( ) { long length = polygonOverheadInBytes ( ) ; for ( List < XYZPoint > loop : m_loops ) { length += loopLengthInBytes ( loop . size ( ) ) ; } return ( int ) length ; }
|
Return the number of bytes in the serialization for this polygon . Returned value does not include the 4 - byte length prefix that precedes variable - length types .
| 59
| 34
|
155,232
|
private static < T > void diagnoseLoop ( List < T > loop , String excpMsgPrf ) throws IllegalArgumentException { if ( loop == null ) { throw new IllegalArgumentException ( excpMsgPrf + "a polygon must contain at least one ring " + "(with each ring at least 4 points, including repeated closing vertex)" ) ; } // 4 vertices = 3 unique vertices for polygon + 1 end point which is same as start point if ( loop . size ( ) < 4 ) { throw new IllegalArgumentException ( excpMsgPrf + "a polygon ring must contain at least 4 points " + "(including repeated closing vertex)" ) ; } // check if the end points of the loop are equal if ( loop . get ( 0 ) . equals ( loop . get ( loop . size ( ) - 1 ) ) == false ) { throw new IllegalArgumentException ( excpMsgPrf + "closing points of ring are not equal: \"" + loop . get ( 0 ) . toString ( ) + "\" != \"" + loop . get ( loop . size ( ) - 1 ) . toString ( ) + "\"" ) ; } }
|
A helper function to validate the loop structure If loop is invalid it generates IllegalArgumentException exception
| 253
| 19
|
155,233
|
@ Deprecated public GeographyValue add ( GeographyPointValue offset ) { List < List < GeographyPointValue >> newLoops = new ArrayList <> ( ) ; for ( List < XYZPoint > oneLoop : m_loops ) { List < GeographyPointValue > loop = new ArrayList <> ( ) ; for ( XYZPoint p : oneLoop ) { loop . add ( p . toGeographyPointValue ( ) . add ( offset ) ) ; } loop . add ( oneLoop . get ( 0 ) . toGeographyPointValue ( ) . add ( offset ) ) ; newLoops . add ( loop ) ; } return new GeographyValue ( newLoops , true ) ; }
|
Create a new GeographyValue which is offset from this one by the given point . The latitude and longitude values stay in range because we are using the normalizing operations in GeographyPointValue .
| 153
| 40
|
155,234
|
public void sync ( ) { if ( isClosed ) { return ; } synchronized ( fileStreamOut ) { if ( needsSync ) { if ( busyWriting ) { forceSync = true ; return ; } try { fileStreamOut . flush ( ) ; outDescriptor . sync ( ) ; syncCount ++ ; } catch ( IOException e ) { Error . printSystemOut ( "flush() or sync() error: " + e . toString ( ) ) ; } needsSync = false ; forceSync = false ; } } }
|
Called internally or externally in write delay intervals .
| 112
| 10
|
155,235
|
protected void openFile ( ) { try { FileAccess fa = isDump ? FileUtil . getDefaultInstance ( ) : database . getFileAccess ( ) ; OutputStream fos = fa . openOutputStreamElement ( outFile ) ; outDescriptor = fa . getFileSync ( fos ) ; fileStreamOut = new BufferedOutputStream ( fos , 2 << 12 ) ; } catch ( IOException e ) { throw Error . error ( ErrorCode . FILE_IO_ERROR , ErrorCode . M_Message_Pair , new Object [ ] { e . toString ( ) , outFile } ) ; } }
|
File is opened in append mode although in current usage the file never pre - exists
| 135
| 16
|
155,236
|
private Runnable createRunnableLoggingTask ( final Level level , final Object message , final Throwable t ) { // While logging, the logger thread temporarily disguises itself as its caller. final String callerThreadName = Thread . currentThread ( ) . getName ( ) ; final Runnable runnableLoggingTask = new Runnable ( ) { @ Override public void run ( ) { Thread loggerThread = Thread . currentThread ( ) ; loggerThread . setName ( callerThreadName ) ; try { m_logger . log ( level , message , t ) ; } catch ( Throwable t ) { System . err . println ( "Exception thrown in logging thread for " + callerThreadName + ":" + t ) ; } finally { loggerThread . setName ( ASYNCH_LOGGER_THREAD_NAME ) ; } } } ; return runnableLoggingTask ; }
|
Generate a runnable task that logs one message in an exception - safe way .
| 192
| 18
|
155,237
|
private Runnable createRunnableL7dLoggingTask ( final Level level , final String key , final Object [ ] params , final Throwable t ) { // While logging, the logger thread temporarily disguises itself as its caller. final String callerThreadName = Thread . currentThread ( ) . getName ( ) ; final Runnable runnableLoggingTask = new Runnable ( ) { @ Override public void run ( ) { Thread loggerThread = Thread . currentThread ( ) ; loggerThread . setName ( callerThreadName ) ; try { m_logger . l7dlog ( level , key , params , t ) ; } catch ( Throwable t ) { System . err . println ( "Exception thrown in logging thread for " + callerThreadName + ":" + t ) ; } finally { loggerThread . setName ( ASYNCH_LOGGER_THREAD_NAME ) ; } } } ; return runnableLoggingTask ; }
|
Generate a runnable task that logs one localized message in an exception - safe way .
| 206
| 19
|
155,238
|
public static void configure ( String xmlConfig , File voltroot ) { try { Class < ? > loggerClz = Class . forName ( "org.voltcore.logging.VoltLog4jLogger" ) ; assert ( loggerClz != null ) ; Method configureMethod = loggerClz . getMethod ( "configure" , String . class , File . class ) ; configureMethod . invoke ( null , xmlConfig , voltroot ) ; } catch ( Exception e ) { } }
|
Static method to change the Log4j config globally . This fails if you re not using Log4j for now .
| 105
| 24
|
155,239
|
public T get ( String name ) { if ( m_items == null ) { return null ; } return m_items . get ( name . toUpperCase ( ) ) ; }
|
Get an item from the map by name
| 39
| 8
|
155,240
|
@ Override public Iterator < T > iterator ( ) { if ( m_items == null ) { m_items = new TreeMap < String , T > ( ) ; } return m_items . values ( ) . iterator ( ) ; }
|
Get an iterator for the items in the map
| 52
| 9
|
155,241
|
private static void validateMigrateStmt ( String sql , VoltXMLElement xmlSQL , Database db ) { final Map < String , String > attributes = xmlSQL . attributes ; assert attributes . size ( ) == 1 ; final Table targetTable = db . getTables ( ) . get ( attributes . get ( "table" ) ) ; assert targetTable != null ; final CatalogMap < TimeToLive > ttls = targetTable . getTimetolive ( ) ; if ( ttls . isEmpty ( ) ) { throw new PlanningErrorException ( String . format ( "%s: Cannot migrate from table %s because it does not have a TTL column" , sql , targetTable . getTypeName ( ) ) ) ; } else { final Column ttl = ttls . iterator ( ) . next ( ) . getTtlcolumn ( ) ; final TupleValueExpression columnExpression = new TupleValueExpression ( targetTable . getTypeName ( ) , ttl . getName ( ) , ttl . getIndex ( ) ) ; if ( ! ExpressionUtil . collectTerminals ( ExpressionUtil . from ( db , VoltXMLElementHelper . getFirstChild ( VoltXMLElementHelper . getFirstChild ( xmlSQL , "condition" ) , "operation" ) ) ) . contains ( columnExpression ) ) { throw new PlanningErrorException ( String . format ( "%s: Cannot migrate from table %s because the WHERE caluse does not contain TTL column %s" , sql , targetTable . getTypeName ( ) , ttl . getName ( ) ) ) ; } } }
|
Check that MIGRATE FROM tbl WHERE ... statement is valid .
| 343
| 15
|
155,242
|
public String parameterize ( ) { Set < Integer > paramIds = new HashSet <> ( ) ; ParameterizationInfo . findUserParametersRecursively ( m_xmlSQL , paramIds ) ; m_adhocUserParamsCount = paramIds . size ( ) ; m_paramzInfo = null ; if ( paramIds . size ( ) == 0 ) { m_paramzInfo = ParameterizationInfo . parameterize ( m_xmlSQL ) ; } // skip plans with pre-existing parameters and plans that don't parameterize // assume a user knows how to cache/optimize these if ( m_paramzInfo != null ) { // if requested output the second version of the parsed plan m_planSelector . outputParameterizedCompiledStatement ( m_paramzInfo . getParameterizedXmlSQL ( ) ) ; return m_paramzInfo . getParameterizedXmlSQL ( ) . toMinString ( ) ; } // fallback when parameterization is return m_xmlSQL . toMinString ( ) ; }
|
Auto - parameterize all of the literals in the parsed SQL statement .
| 225
| 15
|
155,243
|
public CompiledPlan plan ( ) throws PlanningErrorException { // reset any error message m_recentErrorMsg = null ; // what's going to happen next: // If a parameterized statement exists, try to make a plan with it // On success return the plan. // On failure, try the plan again without parameterization if ( m_paramzInfo != null ) { try { // compile the plan with new parameters CompiledPlan plan = compileFromXML ( m_paramzInfo . getParameterizedXmlSQL ( ) , m_paramzInfo . getParamLiteralValues ( ) ) ; if ( plan != null ) { if ( plan . extractParamValues ( m_paramzInfo ) ) { return plan ; } } else if ( DEBUGGING_STATIC_MODE_TO_RETRY_ON_ERROR ) { compileFromXML ( m_paramzInfo . getParameterizedXmlSQL ( ) , m_paramzInfo . getParamLiteralValues ( ) ) ; } // fall through to try replan without parameterization. } catch ( Exception | StackOverflowError e ) { // ignore any errors planning with parameters // fall through to re-planning without them m_hasExceptionWhenParameterized = true ; // note, expect real planning errors ignored here to be thrown again below m_recentErrorMsg = null ; m_partitioning . resetAnalysisState ( ) ; } } // if parameterization isn't requested or if it failed, plan here CompiledPlan plan = compileFromXML ( m_xmlSQL , null ) ; if ( plan == null ) { if ( DEBUGGING_STATIC_MODE_TO_RETRY_ON_ERROR ) { plan = compileFromXML ( m_xmlSQL , null ) ; } throw new PlanningErrorException ( m_recentErrorMsg ) ; } return plan ; }
|
Get the best plan for the SQL statement given assuming the given costModel .
| 394
| 15
|
155,244
|
private void harmonizeCommonTableSchemas ( CompiledPlan plan ) { List < AbstractPlanNode > seqScanNodes = plan . rootPlanGraph . findAllNodesOfClass ( SeqScanPlanNode . class ) ; for ( AbstractPlanNode planNode : seqScanNodes ) { SeqScanPlanNode seqScanNode = ( SeqScanPlanNode ) planNode ; StmtCommonTableScan scan = seqScanNode . getCommonTableScan ( ) ; if ( scan != null ) { scan . harmonizeOutputSchema ( ) ; } } }
|
Make sure that schemas in base and recursive plans in common table scans have identical schemas . This is important because otherwise we will get data corruption in the EE . We look for SeqScanPlanNodes then look for a common table scan and ask the scan node to harmonize its schemas .
| 119
| 61
|
155,245
|
public void resetCapacity ( int newCapacity , int newPolicy ) throws IllegalArgumentException { if ( newCapacity != 0 && hashIndex . elementCount > newCapacity ) { int surplus = hashIndex . elementCount - newCapacity ; surplus += ( surplus >> 5 ) ; if ( surplus > hashIndex . elementCount ) { surplus = hashIndex . elementCount ; } clear ( surplus , ( surplus >> 6 ) ) ; } if ( newCapacity != 0 && newCapacity < threshold ) { rehash ( newCapacity ) ; if ( newCapacity < hashIndex . elementCount ) { newCapacity = maxCapacity ; } } this . maxCapacity = newCapacity ; this . purgePolicy = newPolicy ; }
|
In rare circumstances resetCapacity may not succeed in which case capacity remains unchanged but purge policy is set to newPolicy
| 158
| 23
|
155,246
|
protected void initParams ( Database database , String baseFileName ) { fileName = baseFileName + ".data.tmp" ; this . database = database ; fa = FileUtil . getDefaultInstance ( ) ; int cacheSizeScale = 10 ; cacheFileScale = 8 ; Error . printSystemOut ( "cache_size_scale: " + cacheSizeScale ) ; maxCacheSize = 2048 ; int avgRowBytes = 1 << cacheSizeScale ; maxCacheBytes = maxCacheSize * avgRowBytes ; maxDataFileSize = ( long ) Integer . MAX_VALUE * 4 ; dataFile = null ; }
|
Initial external parameters are set here . The size if fixed .
| 130
| 12
|
155,247
|
public synchronized void close ( boolean write ) { try { if ( dataFile != null ) { dataFile . close ( ) ; dataFile = null ; fa . removeElement ( fileName ) ; } } catch ( Throwable e ) { database . logger . appLog . logContext ( e , null ) ; throw Error . error ( ErrorCode . FILE_IO_ERROR , ErrorCode . M_DataFileCache_close , new Object [ ] { e , fileName } ) ; } }
|
Parameter write is always false . The backing file is simply closed and deleted .
| 103
| 15
|
155,248
|
private AbstractPlanNode recursivelyApply ( AbstractPlanNode plan , int childIdx ) { // If this is an insert plan node, then try to // inline it. There will only ever by one insert // node, so if we can't inline it we just return the // given plan. if ( plan instanceof InsertPlanNode ) { InsertPlanNode insertNode = ( InsertPlanNode ) plan ; assert ( insertNode . getChildCount ( ) == 1 ) ; AbstractPlanNode abstractChild = insertNode . getChild ( 0 ) ; ScanPlanNodeWhichCanHaveInlineInsert targetNode = ( abstractChild instanceof ScanPlanNodeWhichCanHaveInlineInsert ) ? ( ( ScanPlanNodeWhichCanHaveInlineInsert ) abstractChild ) : null ; // If we have a sequential/index scan node without an inline aggregate // node then we can inline the insert node. if ( targetNode != null && ! insertNode . isUpsert ( ) && ! targetNode . hasInlineAggregateNode ( ) // If INSERT INTO and SELECT FROM have the same target table name, // then it could be a recursive insert into select. // Currently, our scan executor implementations cannot handle it well. (ENG-13036) && ! targetNode . getTargetTableName ( ) . equalsIgnoreCase ( insertNode . getTargetTableName ( ) ) ) { AbstractPlanNode parent = ( insertNode . getParentCount ( ) > 0 ) ? insertNode . getParent ( 0 ) : null ; AbstractPlanNode abstractTargetNode = targetNode . getAbstractNode ( ) ; abstractTargetNode . addInlinePlanNode ( insertNode ) ; // Don't call removeFromGraph. That // screws up the order of the children. insertNode . clearChildren ( ) ; insertNode . clearParents ( ) ; // Remvoe all the abstractTarget node's parents. // It used to be the insertNode, which is now // dead to us. abstractTargetNode . clearParents ( ) ; if ( parent != null ) { parent . setAndLinkChild ( childIdx , abstractTargetNode ) ; } plan = abstractTargetNode ; } return plan ; } for ( int idx = 0 ; idx < plan . getChildCount ( ) ; idx += 1 ) { AbstractPlanNode child = plan . getChild ( idx ) ; recursivelyApply ( child , idx ) ; } return plan ; }
|
This helper function is called when we recurse down the childIdx - th child of a parent node .
| 508
| 22
|
155,249
|
static void updateTableNames ( List < ParsedColInfo > src , String tblName ) { src . forEach ( ci -> ci . updateTableName ( tblName , tblName ) . toTVE ( ci . m_index , ci . m_index ) ) ; }
|
table names .
| 66
| 3
|
155,250
|
ParsedSelectStmt rewriteAsMV ( Table view ) { m_groupByColumns . clear ( ) ; m_distinctGroupByColumns = null ; m_groupByExpressions . clear ( ) ; m_distinctProjectSchema = null ; m_distinct = m_hasAggregateExpression = m_hasComplexGroupby = m_hasComplexAgg = false ; // Resets paramsBy* filters, assuming that it's equivalent to "SELECT * from MV". // In future, this needs update to accommodate for revised filters (e.g. removes // one or more filters). setParamsByIndex ( new TreeMap <> ( ) ) ; m_paramsById . clear ( ) ; m_paramValues = null ; // m_sql does not need updating m_tableList . clear ( ) ; m_tableList . add ( view ) ; // reset m_tableAliasMap that keeps tracks of sub-queries m_tableAliasMap . clear ( ) ; m_tableAliasListAsJoinOrder . clear ( ) ; m_tableAliasListAsJoinOrder . add ( view . getTypeName ( ) ) ; m_joinTree = new TableLeafNode ( 0 , null , null , generateStmtTableScan ( view ) ) ; prepareMVBasedQueryFix ( ) ; // update MaterializedViewFixInfo when partition key comes from multiple tables. return this ; }
|
Updates miscellaneous fields as part of rewriting as materialized view .
| 303
| 14
|
155,251
|
public StmtTargetTableScan generateStmtTableScan ( Table view ) { StmtTargetTableScan st = new StmtTargetTableScan ( view ) ; m_displayColumns . forEach ( ci -> st . resolveTVE ( ( TupleValueExpression ) ( ci . m_expression ) ) ) ; defineTableScanByAlias ( view . getTypeName ( ) , st ) ; return st ; }
|
Generate table scan and add the scan to m_tableAliasMap
| 90
| 14
|
155,252
|
public void switchOptimalSuiteForAvgPushdown ( ) { m_displayColumns = m_avgPushdownDisplayColumns ; m_aggResultColumns = m_avgPushdownAggResultColumns ; m_groupByColumns = m_avgPushdownGroupByColumns ; m_distinctGroupByColumns = m_avgPushdownDistinctGroupByColumns ; m_orderColumns = m_avgPushdownOrderColumns ; m_projectSchema = m_avgPushdownProjectSchema ; m_distinctProjectSchema = m_avgPushdownFinalProjectSchema ; m_hasComplexAgg = true ; m_having = m_avgPushdownHaving ; }
|
Switch the optimal set for pushing down AVG
| 161
| 8
|
155,253
|
private void prepareMVBasedQueryFix ( ) { // ENG-5386: Edge cases query returning correct answers with // aggregation push down does not need reAggregation work. if ( m_hasComplexGroupby ) { m_mvFixInfo . setEdgeCaseQueryNoFixNeeded ( false ) ; } // Handle joined query case case. // MV partitioned table without partition column can only join with // replicated tables. For all tables in this query, the # of tables // that need to be fixed should not exceed one. for ( StmtTableScan mvTableScan : allScans ( ) ) { Set < SchemaColumn > mvNewScanColumns = new HashSet <> ( ) ; Collection < SchemaColumn > columns = mvTableScan . getScanColumns ( ) ; // For a COUNT(*)-only scan, a table may have no scan columns. // For a joined query without processed columns from table TB, // TB has no scan columns if ( columns != null ) { mvNewScanColumns . addAll ( columns ) ; } // ENG-5669: HAVING aggregation and order by aggregation // also need to be checked. if ( m_mvFixInfo . processMVBasedQueryFix ( mvTableScan , mvNewScanColumns , m_joinTree , m_aggResultColumns , groupByColumns ( ) ) ) { break ; } } }
|
Prepare for the mv based distributed query fix only if it might be required .
| 303
| 17
|
155,254
|
private void placeTVEsinColumns ( ) { // Build the association between the table column with its index Map < AbstractExpression , Integer > aggTableIndexMap = new HashMap <> ( ) ; Map < Integer , ParsedColInfo > indexToColumnMap = new HashMap <> ( ) ; int index = 0 ; for ( ParsedColInfo col : m_aggResultColumns ) { aggTableIndexMap . put ( col . m_expression , index ) ; if ( col . m_alias == null ) { // hack any unique string col . m_alias = "$$_" + col . m_expression . getExpressionType ( ) . symbol ( ) + "_$$_" + index ; } indexToColumnMap . put ( index , col ) ; index ++ ; } // Replace TVE for group by columns m_groupByExpressions = new HashMap <> ( ) ; for ( ParsedColInfo groupbyCol : m_groupByColumns ) { AbstractExpression expr = groupbyCol . m_expression ; assert ( aggTableIndexMap . get ( expr ) != null ) ; expr = expr . replaceWithTVE ( aggTableIndexMap , indexToColumnMap ) ; m_groupByExpressions . put ( groupbyCol . m_alias , expr ) ; } if ( m_having != null ) { m_having = m_having . replaceWithTVE ( aggTableIndexMap , indexToColumnMap ) ; ExpressionUtil . finalizeValueTypes ( m_having ) ; } // Replace TVE for display columns m_projectSchema = new NodeSchema ( ) ; for ( ParsedColInfo col : m_displayColumns ) { AbstractExpression expr = col . m_expression ; if ( hasComplexAgg ( ) ) { expr = expr . replaceWithTVE ( aggTableIndexMap , indexToColumnMap ) ; } m_projectSchema . addColumn ( col . m_tableName , col . m_tableAlias , col . m_columnName , col . m_alias , expr , col . m_differentiator ) ; } // DISTINCT group by expressions are already TVEs when set placeTVEsForOrderby ( aggTableIndexMap , indexToColumnMap ) ; }
|
Generate new output Schema and Place TVEs for display columns if needed . Place TVEs for order by columns always .
| 486
| 25
|
155,255
|
private void insertAggExpressionsToAggResultColumns ( List < AbstractExpression > aggColumns , ParsedColInfo cookedCol ) { for ( AbstractExpression expr : aggColumns ) { assert ( expr instanceof AggregateExpression ) ; if ( expr . hasSubquerySubexpression ( ) ) { throw new PlanningErrorException ( "SQL Aggregate function calls with subquery expression arguments are not allowed." ) ; } ParsedColInfo col = new ParsedColInfo ( ) ; col . m_expression = expr . clone ( ) ; assert ( col . m_expression instanceof AggregateExpression ) ; if ( col . m_expression . getExpressionType ( ) == ExpressionType . AGGREGATE_AVG ) { m_hasAverage = true ; } if ( aggColumns . size ( ) == 1 && cookedCol . m_expression . equals ( aggColumns . get ( 0 ) ) ) { col . m_alias = cookedCol . m_alias ; col . m_tableName = cookedCol . m_tableName ; col . m_tableAlias = cookedCol . m_tableAlias ; col . m_columnName = cookedCol . m_columnName ; if ( ! m_aggResultColumns . contains ( col ) ) { m_aggResultColumns . add ( col ) ; } return ; } // Try to check complexAggs earlier m_hasComplexAgg = true ; // Aggregation column use the the hacky stuff col . m_tableName = TEMP_TABLE_NAME ; col . m_tableAlias = TEMP_TABLE_NAME ; col . m_columnName = "" ; if ( ! m_aggResultColumns . contains ( col ) ) { m_aggResultColumns . add ( col ) ; } ExpressionUtil . finalizeValueTypes ( col . m_expression ) ; } }
|
ParseDisplayColumns and ParseOrderColumns will call this function to add Aggregation expressions to aggResultColumns
| 399
| 25
|
155,256
|
private static void insertToColumnList ( List < ParsedColInfo > columnList , List < ParsedColInfo > newCols ) { for ( ParsedColInfo col : newCols ) { if ( ! columnList . contains ( col ) ) { columnList . add ( col ) ; } } }
|
Concat elements to the XXXColumns list
| 66
| 9
|
155,257
|
private void findAllTVEs ( AbstractExpression expr , List < TupleValueExpression > tveList ) { if ( ! isNewtoColumnList ( m_aggResultColumns , expr ) ) { return ; } if ( expr instanceof TupleValueExpression ) { tveList . add ( ( TupleValueExpression ) expr . clone ( ) ) ; return ; } if ( expr . getLeft ( ) != null ) { findAllTVEs ( expr . getLeft ( ) , tveList ) ; } if ( expr . getRight ( ) != null ) { findAllTVEs ( expr . getRight ( ) , tveList ) ; } if ( expr . getArgs ( ) != null ) { for ( AbstractExpression ae : expr . getArgs ( ) ) { findAllTVEs ( ae , tveList ) ; } } }
|
Find all TVEs except inside of AggregationExpression
| 191
| 11
|
155,258
|
private void verifyWindowFunctionExpressions ( ) { // Check for windowed expressions. if ( m_windowFunctionExpressions . size ( ) > 0 ) { if ( m_windowFunctionExpressions . size ( ) > 1 ) { throw new PlanningErrorException ( "Only one windowed function call may appear in a selection list." ) ; } if ( m_hasAggregateExpression ) { throw new PlanningErrorException ( "Use of window functions (in an OVER clause) isn't supported with other aggregate functions on the SELECT list." ) ; } if ( m_windowFunctionExpressions . get ( 0 ) . hasSubqueryArgs ( ) ) { throw new PlanningErrorException ( "Window function calls with subquery expression arguments are not allowed." ) ; } WindowFunctionExpression windowFunctionExpression = m_windowFunctionExpressions . get ( 0 ) ; List < AbstractExpression > orderByExpressions = windowFunctionExpression . getOrderByExpressions ( ) ; ExpressionType exprType = windowFunctionExpression . getExpressionType ( ) ; String aggName = exprType . symbol ( ) . toUpperCase ( ) ; switch ( exprType ) { case AGGREGATE_WINDOWED_RANK : case AGGREGATE_WINDOWED_DENSE_RANK : if ( orderByExpressions . size ( ) == 0 ) { throw new PlanningErrorException ( "Windowed " + aggName + " function call expressions require an ORDER BY specification." ) ; } VoltType valType = orderByExpressions . get ( 0 ) . getValueType ( ) ; assert ( valType != null ) ; if ( ! valType . isAnyIntegerType ( ) && ( valType != VoltType . TIMESTAMP ) ) { throw new PlanningErrorException ( "Windowed function call expressions can have only integer or TIMESTAMP value types in the ORDER BY expression of their window." ) ; } break ; case AGGREGATE_WINDOWED_COUNT : if ( windowFunctionExpression . getAggregateArguments ( ) . size ( ) > 1 ) { throw new PlanningErrorException ( String . format ( "Windowed COUNT must have either exactly one argument or else a star for an argument" ) ) ; } // Any type is ok, so we won't inspect the type. break ; case AGGREGATE_WINDOWED_MAX : case AGGREGATE_WINDOWED_MIN : if ( windowFunctionExpression . getAggregateArguments ( ) . size ( ) != 1 ) { throw new PlanningErrorException ( String . format ( "Windowed %s must have exactly one argument" , aggName ) ) ; } // Any type is ok, so we won't inspect the type. break ; case AGGREGATE_WINDOWED_SUM : if ( windowFunctionExpression . getAggregateArguments ( ) . size ( ) != 1 ) { throw new PlanningErrorException ( String . format ( "Windowed SUM must have exactly one numeric argument" ) ) ; } AbstractExpression arg = windowFunctionExpression . getAggregateArguments ( ) . get ( 0 ) ; VoltType vt = arg . getValueType ( ) ; assert ( vt != null ) ; if ( ! vt . isNumber ( ) ) { throw new PlanningErrorException ( "Windowed SUM must have exactly one numeric argument" ) ; } break ; case AGGREGATE_WINDOWED_ROW_NUMBER : break ; default : { String opName = ( exprType == null ) ? "NULL" : exprType . symbol ( ) ; throw new PlanningErrorException ( "Unknown windowed aggregate function type: " + opName ) ; } } } }
|
Verify the validity of the windowed expressions .
| 785
| 10
|
155,259
|
private boolean canPushdownLimit ( ) { boolean limitCanPushdown = ( m_limitOffset . hasLimit ( ) && ! m_distinct ) ; if ( limitCanPushdown ) { for ( ParsedColInfo col : m_displayColumns ) { AbstractExpression rootExpr = col . m_expression ; if ( rootExpr instanceof AggregateExpression ) { if ( ( ( AggregateExpression ) rootExpr ) . isDistinct ( ) ) { limitCanPushdown = false ; break ; } } } } return limitCanPushdown ; }
|
Check if the LimitPlanNode can be pushed down . The LimitPlanNode may have a LIMIT clause only OFFSET clause only or both . Offset only cannot be pushed down .
| 123
| 38
|
155,260
|
@ Override public boolean isOrderDeterministic ( ) { if ( ! hasTopLevelScans ( ) ) { // This currently applies to parent queries that do all their // scanning in subqueries and so take on the order determinism of // their subqueries. This might have to be rethought to allow // ordering in parent queries to effect determinism of unordered // "FROM CLAUSE" subquery results. return true ; } if ( hasAOneRowResult ( ) ) { return true ; } if ( ! hasOrderByColumns ( ) ) { return false ; } // The nonOrdered expression list is used as a short-cut // -- if an expression has been determined to be // non-ordered when encountered as a GROUP BY expression, // it will also be non-ordered when encountered in the select list. ArrayList < AbstractExpression > nonOrdered = new ArrayList <> ( ) ; if ( isGrouped ( ) ) { // Does the ordering of a statements's GROUP BY columns // ensure determinism? // All display columns and order-by expressions // are functionally dependent on the GROUP BY // columns even if the display column's values // are not ordered or unique, // so ordering by ALL of the GROUP BY columns is enough // to get full determinism, // EVEN if ordering by other (dependent) expressions, // regardless of the placement of non-GROUP BY expressions // in the ORDER BY list. if ( orderByColumnsDetermineAllColumns ( m_groupByColumns , nonOrdered ) ) { return true ; } if ( orderByColumnsDetermineAllDisplayColumns ( nonOrdered ) ) { return true ; } } else { if ( orderByColumnsDetermineAllDisplayColumns ( nonOrdered ) ) { return true ; } if ( orderByColumnsCoverUniqueKeys ( ) ) { return true ; } } return false ; }
|
Returns true if this select statement can be proved to always produce its result rows in the same order every time that it is executed .
| 403
| 26
|
155,261
|
public boolean orderByColumnsDetermineAllDisplayColumnsForUnion ( List < ParsedColInfo > orderColumns ) { Set < AbstractExpression > orderExprs = new HashSet <> ( ) ; for ( ParsedColInfo col : orderColumns ) { orderExprs . add ( col . m_expression ) ; } for ( ParsedColInfo col : m_displayColumns ) { if ( ! orderExprs . contains ( col . m_expression ) ) { return false ; } } return true ; }
|
This is a very simple version of the above method for when an ORDER BY clause appears on a UNION . Does the ORDER BY clause reference every item on the display list? If so then the order is deterministic .
| 117
| 44
|
155,262
|
public boolean isPartitionColumnInWindowedAggregatePartitionByList ( ) { if ( getWindowFunctionExpressions ( ) . size ( ) == 0 ) { return false ; } // We can't really have more than one Windowed Aggregate Expression. // If we ever do, this should fail gracelessly. assert ( getWindowFunctionExpressions ( ) . size ( ) == 1 ) ; WindowFunctionExpression we = getWindowFunctionExpressions ( ) . get ( 0 ) ; List < AbstractExpression > partitionByExprs = we . getPartitionByExpressions ( ) ; boolean foundPartExpr = false ; for ( AbstractExpression ae : partitionByExprs ) { if ( ! ( ae instanceof TupleValueExpression ) ) { continue ; } TupleValueExpression tve = ( TupleValueExpression ) ae ; String tableAlias = tve . getTableAlias ( ) ; String columnName = tve . getColumnName ( ) ; StmtTableScan scanTable = getStmtTableScanByAlias ( tableAlias ) ; if ( scanTable == null || scanTable . getPartitioningColumns ( ) == null ) { continue ; } boolean foundPartCol = false ; for ( SchemaColumn pcol : scanTable . getPartitioningColumns ( ) ) { if ( pcol != null && pcol . getColumnName ( ) . equals ( columnName ) ) { foundPartCol = true ; break ; } } // If we found a partition column, then we don't // need to look at any other partition by expressions // in this windowed expression. if ( foundPartCol ) { foundPartExpr = true ; break ; } } return foundPartExpr ; }
|
Return true iff all the windowed partition expressions have a table partition column in their partition by list and if there is one such windowed partition expression . If there are no windowed expressions we return false . Note that there can only be one windowed expression currently so this is more general than it needs to be .
| 372
| 64
|
155,263
|
@ Deprecated public static GeographyValue CreateRegularConvex ( GeographyPointValue center , GeographyPointValue firstVertex , int numVertices , double sizeOfHole ) { assert ( 0 <= sizeOfHole && sizeOfHole < 1.0 ) ; double phi = 360.0 / numVertices ; GeographyPointValue holeFirstVertex = null ; if ( sizeOfHole > 0 ) { holeFirstVertex = firstVertex . scale ( center , sizeOfHole ) ; } List < GeographyPointValue > oneLoop = new ArrayList <> ( ) ; List < GeographyPointValue > hole = ( sizeOfHole < 0 ? null : new ArrayList <> ( ) ) ; // We will add the nth point at angle n*phi. For shells // We want to add points in a CCW order, so phi must be // a positive angle. For holes we want to add in a CW order, // so phy mist be a negative angle. for ( int idx = 0 ; idx < numVertices ; idx += 1 ) { int holeIdx = numVertices - idx ; oneLoop . add ( firstVertex . rotate ( idx * phi , center ) ) ; if ( sizeOfHole > 0 ) { hole . add ( holeFirstVertex . rotate ( - ( holeIdx * phi ) , center ) ) ; } } // Add the closing vertices. oneLoop . add ( firstVertex ) ; if ( sizeOfHole > 0 ) { hole . add ( holeFirstVertex ) ; } List < List < GeographyPointValue > > loops = new ArrayList <> ( ) ; loops . add ( oneLoop ) ; if ( sizeOfHole > 0 ) { loops . add ( hole ) ; } return new GeographyValue ( loops ) ; }
|
Create a regular convex polygon with an optional hole .
| 402
| 12
|
155,264
|
@ Deprecated public static GeographyValue reverseLoops ( GeographyValue goodPolygon ) { List < List < GeographyPointValue >> newLoops = new ArrayList <> ( ) ; List < List < GeographyPointValue > > oldLoops = goodPolygon . getRings ( ) ; for ( List < GeographyPointValue > loop : oldLoops ) { // Copy loop, but reverse the points. List < GeographyPointValue > newLoop = new ArrayList <> ( ) ; // Leave the first and last one fixed, but copy // all the others from the end. newLoop . add ( loop . get ( 0 ) ) ; for ( int idx = loop . size ( ) - 2 ; idx > 1 ; idx -= 1 ) { newLoop . add ( loop . get ( idx ) ) ; } newLoops . add ( newLoop ) ; } return new GeographyValue ( newLoops ) ; }
|
Reverse all the loops in a polygon . Don t change the order of the loops just reverse each loop .
| 203
| 24
|
155,265
|
public void grant ( String granteeName , String roleName , Grantee grantor ) { Grantee grantee = get ( granteeName ) ; if ( grantee == null ) { throw Error . error ( ErrorCode . X_28501 , granteeName ) ; } if ( isImmutable ( granteeName ) ) { throw Error . error ( ErrorCode . X_28502 , granteeName ) ; } Grantee role = getRole ( roleName ) ; if ( role == null ) { throw Error . error ( ErrorCode . X_0P000 , roleName ) ; } if ( role == grantee ) { throw Error . error ( ErrorCode . X_0P501 , granteeName ) ; } // boucherb@users 20050515 // SQL 2003 Foundation, 4.34.3 // No cycles of role grants are allowed. if ( role . hasRole ( grantee ) ) { // boucherb@users /** @todo: Correct reporting of actual grant path */ throw Error . error ( ErrorCode . X_0P501 , roleName ) ; } if ( ! grantor . isGrantable ( role ) ) { throw Error . error ( ErrorCode . X_0L000 , grantor . getNameString ( ) ) ; } grantee . grant ( role ) ; grantee . updateAllRights ( ) ; if ( grantee . isRole ) { updateAllRights ( grantee ) ; } }
|
Grant a role to this Grantee .
| 308
| 8
|
155,266
|
public void revoke ( String granteeName , String roleName , Grantee grantor ) { if ( ! grantor . isAdmin ( ) ) { throw Error . error ( ErrorCode . X_42507 ) ; } Grantee grantee = get ( granteeName ) ; if ( grantee == null ) { throw Error . error ( ErrorCode . X_28000 , granteeName ) ; } Grantee role = ( Grantee ) roleMap . get ( roleName ) ; grantee . revoke ( role ) ; grantee . updateAllRights ( ) ; if ( grantee . isRole ) { updateAllRights ( grantee ) ; } }
|
Revoke a role from a Grantee
| 140
| 8
|
155,267
|
void removeEmptyRole ( Grantee role ) { for ( int i = 0 ; i < map . size ( ) ; i ++ ) { Grantee grantee = ( Grantee ) map . get ( i ) ; grantee . roles . remove ( role ) ; } }
|
Removes a role without any privileges from all grantees
| 57
| 11
|
155,268
|
public void removeDbObject ( HsqlName name ) { for ( int i = 0 ; i < map . size ( ) ; i ++ ) { Grantee g = ( Grantee ) map . get ( i ) ; g . revokeDbObject ( name ) ; } }
|
Removes all rights mappings for the database object identified by the dbobject argument from all Grantee objects in the set .
| 57
| 25
|
155,269
|
void updateAllRights ( Grantee role ) { for ( int i = 0 ; i < map . size ( ) ; i ++ ) { Grantee grantee = ( Grantee ) map . get ( i ) ; if ( grantee . isRole ) { grantee . updateNestedRoles ( role ) ; } } for ( int i = 0 ; i < map . size ( ) ; i ++ ) { Grantee grantee = ( Grantee ) map . get ( i ) ; if ( ! grantee . isRole ) { grantee . updateAllRights ( ) ; } } }
|
First updates all ROLE Grantee objects . Then updates all USER Grantee Objects .
| 127
| 18
|
155,270
|
public Grantee getRole ( String name ) { Grantee g = ( Grantee ) roleMap . get ( name ) ; if ( g == null ) { throw Error . error ( ErrorCode . X_0P000 , name ) ; } return g ; }
|
Returns Grantee for the named Role
| 55
| 7
|
155,271
|
private void connect ( Session session , boolean withReadOnlyData ) { // Open new cache: if ( ( dataSource . length ( ) == 0 ) || isConnected ) { // nothing to do return ; } PersistentStore store = database . persistentStoreCollection . getStore ( this ) ; this . store = store ; DataFileCache cache = null ; try { cache = ( TextCache ) database . logger . openTextCache ( this , dataSource , withReadOnlyData , isReversed ) ; store . setCache ( cache ) ; // read and insert all the rows from the source file Row row = null ; int nextpos = 0 ; if ( ( ( TextCache ) cache ) . ignoreFirst ) { nextpos += ( ( TextCache ) cache ) . readHeaderLine ( ) ; } while ( true ) { row = ( Row ) store . get ( nextpos , false ) ; if ( row == null ) { break ; } Object [ ] data = row . getData ( ) ; nextpos = row . getPos ( ) + row . getStorageSize ( ) ; ( ( RowAVLDiskData ) row ) . setNewNodes ( ) ; systemUpdateIdentityValue ( data ) ; enforceRowConstraints ( session , data ) ; for ( int i = 0 ; i < indexList . length ; i ++ ) { indexList [ i ] . insert ( null , store , row ) ; } } } catch ( Exception e ) { int linenumber = cache == null ? 0 : ( ( TextCache ) cache ) . getLineNumber ( ) ; clearAllData ( session ) ; if ( cache != null ) { database . logger . closeTextCache ( this ) ; store . release ( ) ; } // everything is in order here. // At this point table should either have a valid (old) data // source and cache or have an empty source and null cache. throw Error . error ( ErrorCode . TEXT_FILE , 0 , new Object [ ] { new Integer ( linenumber ) , e . getMessage ( ) } ) ; } isConnected = true ; isReadOnly = withReadOnlyData ; }
|
connects to the data source
| 450
| 6
|
155,272
|
public void disconnect ( ) { this . store = null ; PersistentStore store = database . persistentStoreCollection . getStore ( this ) ; store . release ( ) ; isConnected = false ; }
|
disconnects from the data source
| 42
| 7
|
155,273
|
private void openCache ( Session session , String dataSourceNew , boolean isReversedNew , boolean isReadOnlyNew ) { String dataSourceOld = dataSource ; boolean isReversedOld = isReversed ; boolean isReadOnlyOld = isReadOnly ; if ( dataSourceNew == null ) { dataSourceNew = "" ; } disconnect ( ) ; dataSource = dataSourceNew ; isReversed = ( isReversedNew && dataSource . length ( ) > 0 ) ; try { connect ( session , isReadOnlyNew ) ; } catch ( HsqlException e ) { dataSource = dataSourceOld ; isReversed = isReversedOld ; connect ( session , isReadOnlyOld ) ; throw e ; } }
|
This method does some of the work involved with managing the creation and openning of the cache the rest is done in Log . java and TextCache . java .
| 160
| 32
|
155,274
|
protected void setDataSource ( Session session , String dataSourceNew , boolean isReversedNew , boolean createFile ) { if ( getTableType ( ) == Table . TEMP_TEXT_TABLE ) { ; } else { session . getGrantee ( ) . checkSchemaUpdateOrGrantRights ( getSchemaName ( ) . name ) ; } dataSourceNew = dataSourceNew . trim ( ) ; if ( createFile && FileUtil . getDefaultInstance ( ) . exists ( dataSourceNew ) ) { throw Error . error ( ErrorCode . TEXT_SOURCE_EXISTS , dataSourceNew ) ; } //-- Open if descending, direction changed, file changed, or not connected currently if ( isReversedNew || ( isReversedNew != isReversed ) || ! dataSource . equals ( dataSourceNew ) || ! isConnected ) { openCache ( session , dataSourceNew , isReversedNew , isReadOnly ) ; } if ( isReversed ) { isReadOnly = true ; } }
|
High level command to assign a data source to the table definition . Reassigns only if the data source or direction has changed .
| 224
| 27
|
155,275
|
void checkDataReadOnly ( ) { if ( dataSource . length ( ) == 0 ) { throw Error . error ( ErrorCode . TEXT_TABLE_UNKNOWN_DATA_SOURCE ) ; } if ( isReadOnly ) { throw Error . error ( ErrorCode . DATA_IS_READONLY ) ; } }
|
Used by INSERT DELETE UPDATE operations . This class will return a more appropriate message when there is no data source .
| 67
| 25
|
155,276
|
@ Override public void addBatch ( ) throws SQLException { checkClosed ( ) ; if ( this . Query . isOfType ( VoltSQL . TYPE_EXEC , VoltSQL . TYPE_SELECT ) ) { throw SQLError . get ( SQLError . ILLEGAL_STATEMENT , this . Query . toSqlString ( ) ) ; } this . addBatch ( this . Query . getExecutableQuery ( this . parameters ) ) ; this . parameters = this . Query . getParameterArray ( ) ; }
|
Adds a set of parameters to this PreparedStatement object s batch of commands .
| 119
| 16
|
155,277
|
@ Override public boolean execute ( ) throws SQLException { checkClosed ( ) ; boolean result = this . execute ( this . Query . getExecutableQuery ( this . parameters ) ) ; this . parameters = this . Query . getParameterArray ( ) ; return result ; }
|
Executes the SQL statement in this PreparedStatement object which may be any kind of SQL statement .
| 60
| 20
|
155,278
|
@ Override public ResultSet executeQuery ( ) throws SQLException { checkClosed ( ) ; if ( ! this . Query . isOfType ( VoltSQL . TYPE_EXEC , VoltSQL . TYPE_SELECT ) ) { throw SQLError . get ( SQLError . ILLEGAL_STATEMENT , this . Query . toSqlString ( ) ) ; } ResultSet result = this . executeQuery ( this . Query . getExecutableQuery ( this . parameters ) ) ; this . parameters = this . Query . getParameterArray ( ) ; return result ; }
|
Executes the SQL query in this PreparedStatement object and returns the ResultSet object generated by the query .
| 126
| 22
|
155,279
|
@ Override public void setArray ( int parameterIndex , Array x ) throws SQLException { checkParameterBounds ( parameterIndex ) ; throw SQLError . noSupport ( ) ; }
|
Sets the designated parameter to the given java . sql . Array object .
| 42
| 15
|
155,280
|
@ Override public void setByte ( int parameterIndex , byte x ) throws SQLException { checkParameterBounds ( parameterIndex ) ; this . parameters [ parameterIndex - 1 ] = x ; }
|
Sets the designated parameter to the given Java byte value .
| 43
| 12
|
155,281
|
@ Override public void setBytes ( int parameterIndex , byte [ ] x ) throws SQLException { checkParameterBounds ( parameterIndex ) ; this . parameters [ parameterIndex - 1 ] = x ; }
|
Sets the designated parameter to the given Java array of bytes .
| 45
| 13
|
155,282
|
@ Override public void setCharacterStream ( int parameterIndex , Reader reader ) throws SQLException { checkParameterBounds ( parameterIndex ) ; throw SQLError . noSupport ( ) ; }
|
Sets the designated parameter to the given Reader object .
| 43
| 11
|
155,283
|
@ Override public void setDouble ( int parameterIndex , double x ) throws SQLException { checkParameterBounds ( parameterIndex ) ; this . parameters [ parameterIndex - 1 ] = x ; }
|
Sets the designated parameter to the given Java double value .
| 43
| 12
|
155,284
|
@ Override public void setFloat ( int parameterIndex , float x ) throws SQLException { checkParameterBounds ( parameterIndex ) ; this . parameters [ parameterIndex - 1 ] = ( double ) x ; }
|
Sets the designated parameter to the given Java float value .
| 46
| 12
|
155,285
|
@ Override public void setInt ( int parameterIndex , int x ) throws SQLException { checkParameterBounds ( parameterIndex ) ; this . parameters [ parameterIndex - 1 ] = x ; }
|
Sets the designated parameter to the given Java int value .
| 43
| 12
|
155,286
|
@ Override public void setLong ( int parameterIndex , long x ) throws SQLException { checkParameterBounds ( parameterIndex ) ; this . parameters [ parameterIndex - 1 ] = x ; }
|
Sets the designated parameter to the given Java long value .
| 43
| 12
|
155,287
|
@ Override public void setNString ( int parameterIndex , String value ) throws SQLException { checkParameterBounds ( parameterIndex ) ; throw SQLError . noSupport ( ) ; }
|
Sets the designated paramter to the given String object .
| 43
| 12
|
155,288
|
@ Override public void setNull ( int parameterIndex , int sqlType ) throws SQLException { checkParameterBounds ( parameterIndex ) ; switch ( sqlType ) { case Types . TINYINT : this . parameters [ parameterIndex - 1 ] = VoltType . NULL_TINYINT ; break ; case Types . SMALLINT : this . parameters [ parameterIndex - 1 ] = VoltType . NULL_SMALLINT ; break ; case Types . INTEGER : this . parameters [ parameterIndex - 1 ] = VoltType . NULL_INTEGER ; break ; case Types . BIGINT : this . parameters [ parameterIndex - 1 ] = VoltType . NULL_BIGINT ; break ; case Types . DOUBLE : this . parameters [ parameterIndex - 1 ] = VoltType . NULL_FLOAT ; break ; case Types . DECIMAL : this . parameters [ parameterIndex - 1 ] = VoltType . NULL_DECIMAL ; break ; case Types . TIMESTAMP : this . parameters [ parameterIndex - 1 ] = VoltType . NULL_TIMESTAMP ; break ; case Types . VARBINARY : case Types . VARCHAR : case Types . NVARCHAR : case Types . OTHER : case Types . NULL : this . parameters [ parameterIndex - 1 ] = VoltType . NULL_STRING_OR_VARBINARY ; break ; default : throw SQLError . get ( SQLError . ILLEGAL_ARGUMENT ) ; } }
|
Sets the designated parameter to SQL NULL .
| 322
| 9
|
155,289
|
@ Override public void setObject ( int parameterIndex , Object x ) throws SQLException { checkParameterBounds ( parameterIndex ) ; this . parameters [ parameterIndex - 1 ] = x ; }
|
Sets the value of the designated parameter using the given object .
| 43
| 13
|
155,290
|
@ Override public void setShort ( int parameterIndex , short x ) throws SQLException { checkParameterBounds ( parameterIndex ) ; this . parameters [ parameterIndex - 1 ] = x ; }
|
Sets the designated parameter to the given Java short value .
| 43
| 12
|
155,291
|
@ Override public void setTimestamp ( int parameterIndex , Timestamp x , Calendar cal ) throws SQLException { checkParameterBounds ( parameterIndex ) ; throw SQLError . noSupport ( ) ; }
|
Sets the designated parameter to the given java . sql . Timestamp value using the given Calendar object .
| 47
| 21
|
155,292
|
@ Override public void setURL ( int parameterIndex , URL x ) throws SQLException { checkParameterBounds ( parameterIndex ) ; this . parameters [ parameterIndex - 1 ] = x == null ? VoltType . NULL_STRING_OR_VARBINARY : x . toString ( ) ; }
|
Sets the designated parameter to the given java . net . URL value .
| 67
| 15
|
155,293
|
public final AbstractImporter createImporter ( ImporterConfig config ) { AbstractImporter importer = create ( config ) ; importer . setImportServerAdapter ( m_importServerAdapter ) ; return importer ; }
|
Method that is used by the importer framework classes to create an importer instance and wire it correctly for use within the server .
| 46
| 26
|
155,294
|
private static final byte [ ] expandToLength16 ( byte scaledValue [ ] , final boolean isNegative ) { if ( scaledValue . length == 16 ) { return scaledValue ; } byte replacement [ ] = new byte [ 16 ] ; if ( isNegative ) { Arrays . fill ( replacement , ( byte ) - 1 ) ; } int shift = ( 16 - scaledValue . length ) ; for ( int ii = 0 ; ii < scaledValue . length ; ++ ii ) { replacement [ ii + shift ] = scaledValue [ ii ] ; } return replacement ; }
|
Converts BigInteger s byte representation containing a scaled magnitude to a fixed size 16 byte array and set the sign in the most significant byte s most significant bit .
| 120
| 32
|
155,295
|
public static BigDecimal deserializeBigDecimalFromString ( String decimal ) throws IOException { if ( decimal == null ) { return null ; } BigDecimal bd = new BigDecimal ( decimal ) ; // if the scale is too large, check for trailing zeros if ( bd . scale ( ) > kDefaultScale ) { bd = bd . stripTrailingZeros ( ) ; if ( bd . scale ( ) > kDefaultScale ) { bd = roundToScale ( bd , kDefaultScale , getRoundingMode ( ) ) ; } } // enforce scale 12 to make the precision check right if ( bd . scale ( ) < kDefaultScale ) { bd = bd . setScale ( kDefaultScale ) ; } if ( bd . precision ( ) > 38 ) { throw new RuntimeException ( "Decimal " + bd + " has more than " + kDefaultPrecision + " digits of precision." ) ; } return bd ; }
|
Deserialize a Volt fixed precision and scale 16 - byte decimal from a String representation
| 213
| 17
|
155,296
|
private static boolean isFileModifiedInCollectionPeriod ( File file ) { long diff = m_currentTimeMillis - file . lastModified ( ) ; if ( diff >= 0 ) { return TimeUnit . MILLISECONDS . toDays ( diff ) + 1 <= m_config . days ; } return false ; }
|
value of diff = 0 indicates current day
| 70
| 8
|
155,297
|
public static boolean voltMutateToBigintType ( Expression maybeConstantNode , Expression parent , int childIndex ) { if ( maybeConstantNode . opType == OpTypes . VALUE && maybeConstantNode . dataType != null && maybeConstantNode . dataType . isBinaryType ( ) ) { ExpressionValue exprVal = ( ExpressionValue ) maybeConstantNode ; if ( exprVal . valueData == null ) { return false ; } BinaryData data = ( BinaryData ) exprVal . valueData ; parent . nodes [ childIndex ] = new ExpressionValue ( data . toLong ( ) , Type . SQL_BIGINT ) ; return true ; } return false ; }
|
Given a ExpressionValue that is a VARBINARY constant convert it to a BIGINT constant . Returns true for a successful conversion and false otherwise .
| 146
| 30
|
155,298
|
private void getFKStatement ( StringBuffer a ) { if ( ! getName ( ) . isReservedName ( ) ) { a . append ( Tokens . T_CONSTRAINT ) . append ( ' ' ) ; a . append ( getName ( ) . statementName ) ; a . append ( ' ' ) ; } a . append ( Tokens . T_FOREIGN ) . append ( ' ' ) . append ( Tokens . T_KEY ) ; int [ ] col = getRefColumns ( ) ; getColumnList ( getRef ( ) , col , col . length , a ) ; a . append ( ' ' ) . append ( Tokens . T_REFERENCES ) . append ( ' ' ) ; a . append ( getMain ( ) . getName ( ) . getSchemaQualifiedStatementName ( ) ) ; col = getMainColumns ( ) ; getColumnList ( getMain ( ) , col , col . length , a ) ; if ( getDeleteAction ( ) != Constraint . NO_ACTION ) { a . append ( ' ' ) . append ( Tokens . T_ON ) . append ( ' ' ) . append ( Tokens . T_DELETE ) . append ( ' ' ) ; a . append ( getDeleteActionString ( ) ) ; } if ( getUpdateAction ( ) != Constraint . NO_ACTION ) { a . append ( ' ' ) . append ( Tokens . T_ON ) . append ( ' ' ) . append ( Tokens . T_UPDATE ) . append ( ' ' ) ; a . append ( getUpdateActionString ( ) ) ; } }
|
Generates the foreign key declaration for a given Constraint object .
| 343
| 14
|
155,299
|
private static void getColumnList ( Table t , int [ ] col , int len , StringBuffer a ) { a . append ( ' ' ) ; for ( int i = 0 ; i < len ; i ++ ) { a . append ( t . getColumn ( col [ i ] ) . getName ( ) . statementName ) ; if ( i < len - 1 ) { a . append ( ' ' ) ; } } a . append ( ' ' ) ; }
|
Generates the column definitions for a table .
| 98
| 9
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.