idx
int64
0
165k
question
stringlengths
73
4.15k
target
stringlengths
5
918
len_question
int64
21
890
len_target
int64
3
255
154,100
public void closePhysically ( ) throws SQLException { SQLException exception = null ; if ( ! isClosed && this . connection != null && ! this . connection . isClosed ( ) ) { try { this . connection . close ( ) ; } catch ( SQLException e ) { //catch and hold so that the rest of the finalizer is run too. Throw at the end if present. exception = e ; } } this . isClosed = true ; this . pooledConnection = null ; this . connection = null ; this . connectionDefaults = null ; this . connectionListeners . clear ( ) ; this . connectionListeners = null ; if ( exception != null ) { throw exception ; } }
Closes the connection physically . The pool is not notified of this .
154
14
154,101
public void startSnapshotWithTargets ( Collection < SnapshotDataTarget > targets , long now ) { // TRAIL [SnapSave:9] 5 [all SP] Start snapshot by putting task into the site queue. //Basically asserts that there are no tasks with null targets at this point //getTarget checks and crashes for ( SnapshotTableTask t : m_snapshotTableTasks . values ( ) ) { t . getTarget ( ) ; } ArrayList < SnapshotDataTarget > targetsToClose = Lists . newArrayList ( ) ; for ( final SnapshotDataTarget target : targets ) { if ( target . needsFinalClose ( ) ) { targetsToClose . add ( target ) ; } } m_snapshotTargets = targetsToClose ; // Queue the first snapshot task VoltDB . instance ( ) . schedulePriorityWork ( new Runnable ( ) { @ Override public void run ( ) { m_siteTaskerQueue . offer ( new SnapshotTask ( ) ) ; } } , ( m_quietUntil + ( 5 * m_snapshotPriority ) - now ) , 0 , TimeUnit . MILLISECONDS ) ; m_quietUntil += 5 * m_snapshotPriority ; }
This is called from the snapshot IO thread when the deferred setup is finished . It sets the data targets and queues a snapshot task onto the site thread .
264
30
154,102
private List < BBContainer > getOutputBuffers ( Collection < SnapshotTableTask > tableTasks , boolean noSchedule ) { final int desired = tableTasks . size ( ) ; while ( true ) { int available = m_availableSnapshotBuffers . get ( ) ; //Limit the number of buffers used concurrently if ( desired > available ) { return null ; } if ( m_availableSnapshotBuffers . compareAndSet ( available , available - desired ) ) break ; } List < BBContainer > outputBuffers = new ArrayList < BBContainer > ( tableTasks . size ( ) ) ; for ( int ii = 0 ; ii < tableTasks . size ( ) ; ii ++ ) { final BBContainer origin = DBBPool . allocateDirectAndPool ( m_snapshotBufferLength ) ; outputBuffers . add ( createNewBuffer ( origin , noSchedule ) ) ; } return outputBuffers ; }
Create an output buffer for each task .
201
8
154,103
public void write ( RowOutputInterface out , ResultMetaData meta ) throws IOException { beforeFirst ( ) ; out . writeLong ( id ) ; out . writeInt ( size ) ; out . writeInt ( 0 ) ; // offset out . writeInt ( size ) ; while ( hasNext ( ) ) { Object [ ] data = getNext ( ) ; out . writeData ( meta . getColumnCount ( ) , meta . columnTypes , data , null , null ) ; } beforeFirst ( ) ; }
reading and writing
107
3
154,104
public static ClientInterface create ( HostMessenger messenger , CatalogContext context , ReplicationRole replicationRole , Cartographer cartographer , InetAddress clientIntf , int clientPort , InetAddress adminIntf , int adminPort , SslContext SslContext ) throws Exception { /* * Construct the runnables so they have access to the list of connections */ final ClientInterface ci = new ClientInterface ( clientIntf , clientPort , adminIntf , adminPort , context , messenger , replicationRole , cartographer , SslContext ) ; return ci ; }
Static factory method to easily create a ClientInterface with the default settings .
119
14
154,105
public void initializeSnapshotDaemon ( HostMessenger messenger , GlobalServiceElector gse ) { m_snapshotDaemon . init ( this , messenger , new Runnable ( ) { @ Override public void run ( ) { bindAdapter ( m_snapshotDaemonAdapter , null ) ; } } , gse ) ; }
Initializes the snapshot daemon so that it s ready to take snapshots
72
13
154,106
public ClientInterfaceHandleManager bindAdapter ( final Connection adapter , final ClientInterfaceRepairCallback repairCallback ) { return bindAdapter ( adapter , repairCallback , false ) ; }
Tell the clientInterface about a connection adapter .
35
9
154,107
public void mayActivateSnapshotDaemon ( ) { SnapshotSchedule schedule = m_catalogContext . get ( ) . database . getSnapshotschedule ( ) . get ( "default" ) ; if ( schedule != null ) { final ListenableFuture < Void > future = m_snapshotDaemon . mayGoActiveOrInactive ( schedule ) ; future . addListener ( new Runnable ( ) { @ Override public void run ( ) { try { future . get ( ) ; } catch ( InterruptedException e ) { VoltDB . crashLocalVoltDB ( "Failed to make SnapshotDaemon active" , false , e ) ; } catch ( ExecutionException e ) { VoltDB . crashLocalVoltDB ( "Failed to make SnapshotDaemon active" , false , e ) ; } } } , CoreUtils . SAMETHREADEXECUTOR ) ; } }
in the cluster make our SnapshotDaemon responsible for snapshots
195
12
154,108
public void notifyOfCatalogUpdate ( ) { m_catalogContext . set ( VoltDB . instance ( ) . getCatalogContext ( ) ) ; /* * Update snapshot daemon settings. * * Don't do it if the system is still initializing (CL replay), * because snapshot daemon may call @SnapshotScan on activation and * it will mess replaying txns up. */ if ( VoltDB . instance ( ) . getMode ( ) != OperationMode . INITIALIZING ) { mayActivateSnapshotDaemon ( ) ; //add a notification to client right away StoredProcedureInvocation spi = new StoredProcedureInvocation ( ) ; spi . setProcName ( "@SystemCatalog" ) ; spi . setParams ( "PROCEDURES" ) ; spi . setClientHandle ( ASYNC_PROC_HANDLE ) ; notifyClients ( m_currentProcValues , m_currentProcSupplier , spi , OpsSelector . SYSTEMCATALOG ) ; } }
Set the flag that tells this client interface to update its catalog when it s threadsafe .
225
18
154,109
private final void checkForDeadConnections ( final long now ) { final ArrayList < Pair < Connection , Integer > > connectionsToRemove = new ArrayList < Pair < Connection , Integer > > ( ) ; for ( final ClientInterfaceHandleManager cihm : m_cihm . values ( ) ) { // Internal connections don't implement calculatePendingWriteDelta(), so check for real connection first if ( VoltPort . class == cihm . connection . getClass ( ) ) { final int delta = cihm . connection . writeStream ( ) . calculatePendingWriteDelta ( now ) ; if ( delta > CLIENT_HANGUP_TIMEOUT ) { connectionsToRemove . add ( Pair . of ( cihm . connection , delta ) ) ; } } } for ( final Pair < Connection , Integer > p : connectionsToRemove ) { Connection c = p . getFirst ( ) ; networkLog . warn ( "Closing connection to " + c + " because it hasn't read a response that was pending for " + p . getSecond ( ) + " milliseconds" ) ; c . unregister ( ) ; } }
Check for dead connections by providing each connection with the current time so it can calculate the delta between now and the time the oldest message was queued for sending .
237
32
154,110
protected void shutdown ( ) throws InterruptedException { if ( m_deadConnectionFuture != null ) { m_deadConnectionFuture . cancel ( false ) ; try { m_deadConnectionFuture . get ( ) ; } catch ( Throwable t ) { } } if ( m_topologyCheckFuture != null ) { m_topologyCheckFuture . cancel ( false ) ; try { m_topologyCheckFuture . get ( ) ; } catch ( Throwable t ) { } } if ( m_maxConnectionUpdater != null ) { m_maxConnectionUpdater . cancel ( false ) ; } if ( m_acceptor != null ) { m_acceptor . shutdown ( ) ; } if ( m_adminAcceptor != null ) { m_adminAcceptor . shutdown ( ) ; } if ( m_snapshotDaemon != null ) { m_snapshotDaemon . shutdown ( ) ; } if ( m_migratePartitionLeaderExecutor != null ) { m_migratePartitionLeaderExecutor . shutdown ( ) ; } m_notifier . shutdown ( ) ; }
all your read buffers events .. or something ..
234
9
154,111
public void sendEOLMessage ( int partitionId ) { final long initiatorHSId = m_cartographer . getHSIdForMaster ( partitionId ) ; Iv2EndOfLogMessage message = new Iv2EndOfLogMessage ( partitionId ) ; m_mailbox . send ( initiatorHSId , message ) ; }
Sends an end of log message to the master of that partition . This should only be called at the end of replay .
70
25
154,112
private ClientResponseImpl getMispartitionedErrorResponse ( StoredProcedureInvocation task , Procedure catProc , Exception ex ) { Object invocationParameter = null ; try { invocationParameter = task . getParameterAtIndex ( catProc . getPartitionparameter ( ) ) ; } catch ( Exception ex2 ) { } String exMsg = "Unknown" ; if ( ex != null ) { exMsg = ex . getMessage ( ) ; } String errorMessage = "Error sending procedure " + task . getProcName ( ) + " to the correct partition. Make sure parameter values are correct." + " Parameter value " + invocationParameter + ", partition column " + catProc . getPartitioncolumn ( ) . getName ( ) + " type " + catProc . getPartitioncolumn ( ) . getType ( ) + " Message: " + exMsg ; authLog . warn ( errorMessage ) ; ClientResponseImpl clientResponse = new ClientResponseImpl ( ClientResponse . UNEXPECTED_FAILURE , new VoltTable [ 0 ] , errorMessage , task . clientHandle ) ; return clientResponse ; }
Generate a mispartitioned response also log the message .
240
13
154,113
public boolean ceaseAllPublicFacingTrafficImmediately ( ) { try { if ( m_acceptor != null ) { // This call seems to block until the shutdown is done // which is good becasue we assume there will be no new // connections afterward m_acceptor . shutdown ( ) ; } if ( m_adminAcceptor != null ) { m_adminAcceptor . shutdown ( ) ; } } catch ( InterruptedException e ) { // this whole method is really a best effort kind of thing... log . error ( e ) ; // if we didn't succeed, let the caller know and take action return false ; } finally { m_isAcceptingConnections . set ( false ) ; // this feels like an unclean thing to do... but should work // for the purposes of cutting all responses right before we deliberately // end the process // m_cihm itself is thread-safe, and the regular shutdown code won't // care if it's empty... so... this. m_cihm . clear ( ) ; } return true ; }
This is not designed to be a safe shutdown . This is designed to stop sending messages to clients as fast as possible . It is currently called from VoltDB . crash ...
221
34
154,114
void processMigratePartitionLeaderTask ( MigratePartitionLeaderMessage message ) { synchronized ( m_lock ) { //start MigratePartitionLeader service if ( message . startMigratingPartitionLeaders ( ) ) { if ( m_migratePartitionLeaderExecutor == null ) { m_migratePartitionLeaderExecutor = Executors . newSingleThreadScheduledExecutor ( CoreUtils . getThreadFactory ( "MigratePartitionLeader" ) ) ; final int interval = Integer . parseInt ( System . getProperty ( "MIGRATE_PARTITION_LEADER_INTERVAL" , "1" ) ) ; final int delay = Integer . parseInt ( System . getProperty ( "MIGRATE_PARTITION_LEADER_DELAY" , "1" ) ) ; m_migratePartitionLeaderExecutor . scheduleAtFixedRate ( ( ) -> startMigratePartitionLeader ( message . isForStopNode ( ) ) , delay , interval , TimeUnit . SECONDS ) ; } hostLog . info ( "MigratePartitionLeader task is started." ) ; return ; } //stop MigratePartitionLeader service if ( m_migratePartitionLeaderExecutor != null ) { m_migratePartitionLeaderExecutor . shutdown ( ) ; m_migratePartitionLeaderExecutor = null ; } } hostLog . info ( "MigratePartitionLeader task is stopped." ) ; }
start or stop MigratePartitionLeader task
317
9
154,115
public VoltTable [ ] run ( SystemProcedureExecutionContext ctx ) { // Choose the lowest site ID on this host to actually flip the bit VoltDBInterface voltdb = VoltDB . instance ( ) ; OperationMode opMode = voltdb . getMode ( ) ; if ( ctx . isLowestSiteId ( ) ) { ZooKeeper zk = voltdb . getHostMessenger ( ) . getZK ( ) ; try { Stat stat ; OperationMode zkMode = null ; Code code ; do { stat = new Stat ( ) ; code = Code . BADVERSION ; try { byte [ ] data = zk . getData ( VoltZK . operationMode , false , stat ) ; zkMode = data == null ? opMode : OperationMode . valueOf ( data ) ; if ( zkMode == RUNNING ) { break ; } stat = zk . setData ( VoltZK . operationMode , RUNNING . getBytes ( ) , stat . getVersion ( ) ) ; code = Code . OK ; zkMode = RUNNING ; break ; } catch ( BadVersionException ex ) { code = ex . code ( ) ; } } while ( zkMode != RUNNING && code == Code . BADVERSION ) ; m_stat = stat ; voltdb . getHostMessenger ( ) . unpause ( ) ; voltdb . setMode ( RUNNING ) ; // for snmp SnmpTrapSender snmp = voltdb . getSnmpTrapSender ( ) ; if ( snmp != null ) { snmp . resume ( "Cluster resumed." ) ; } } catch ( Exception e ) { throw new RuntimeException ( e ) ; } } VoltTable t = new VoltTable ( VoltSystemProcedure . STATUS_SCHEMA ) ; t . addRow ( VoltSystemProcedure . STATUS_OK ) ; return new VoltTable [ ] { t } ; }
Exit admin mode
417
3
154,116
private static File getNativeLibraryFile ( String libname ) { // for now, arch is always x86_64 String pathFormat = "/org/voltdb/native/%s/x86_64" ; String libPath = null ; String osName = System . getProperty ( "os.name" ) . toLowerCase ( ) ; if ( osName . contains ( "mac" ) ) { libPath = String . format ( pathFormat , "Mac" ) ; } else if ( osName . contains ( "linux" ) ) { libPath = String . format ( pathFormat , "Linux" ) ; } else { throw new RuntimeException ( "Unsupported system: " + osName ) ; } String libFileName = System . mapLibraryName ( libname ) ; if ( NativeLibraryLoader . class . getResource ( libPath + "/" + libFileName ) == null ) { // mapLibraryName does not give us the correct name on mac sometimes if ( osName . contains ( "mac" ) ) { libFileName = "lib" + libname + ".jnilib" ; } if ( NativeLibraryLoader . class . getResource ( libPath + "/" + libFileName ) == null ) { String msg = "Could not find library resource using path: " + libPath + "/" + libFileName ; s_hostLog . warn ( msg ) ; throw new RuntimeException ( msg ) ; } } File tmpFilePath = new File ( System . getProperty ( VOLT_TMP_DIR , System . getProperty ( "java.io.tmpdir" ) ) ) ; if ( s_hostLog . isDebugEnabled ( ) ) { s_hostLog . debug ( "Temp directory to which shared libs are extracted is: " + tmpFilePath . getAbsolutePath ( ) ) ; } try { return loadLibraryFile ( libPath , libFileName , tmpFilePath . getAbsolutePath ( ) ) ; } catch ( IOException e ) { s_hostLog . error ( "Error loading Volt library file from jar" , e ) ; throw new RuntimeException ( e ) ; } }
Returns the native library file copied into a readable location .
455
11
154,117
public static void writeString ( String value , ByteBuffer buf ) { if ( value == null ) { buf . putInt ( VoltType . NULL_STRING_LENGTH ) ; return ; } byte [ ] strbytes = value . getBytes ( Constants . UTF8ENCODING ) ; int len = strbytes . length ; buf . putInt ( len ) ; buf . put ( strbytes ) ; }
Write a string in the standard VoltDB way
87
9
154,118
public static void writeVarbinary ( byte [ ] bytes , ByteBuffer buf ) throws IOException { if ( bytes == null ) { buf . putInt ( VoltType . NULL_STRING_LENGTH ) ; return ; } buf . putInt ( bytes . length ) ; buf . put ( bytes ) ; }
Write a set of bytes in the standard VoltDB way
65
11
154,119
private static long getMaxBidId ( Client client ) { long currentMaxBidId = 0 ; try { VoltTable vt = client . callProcedure ( "@AdHoc" , "select max(id) from bids" ) . getResults ( ) [ 0 ] ; vt . advanceRow ( ) ; currentMaxBidId = vt . getLong ( 0 ) ; if ( vt . wasNull ( ) ) { currentMaxBidId = 0 ; } } catch ( IOException | ProcCallException e ) { e . printStackTrace ( ) ; } return currentMaxBidId ; }
Find the current highest bid id in the bids table . We ll start generating new bids at this number plus one .
134
23
154,120
@ Override public void run ( ) { long bidId = m_bidId ++ ; long advertiserId = Math . abs ( m_rand . nextLong ( ) ) % NUM_ADVERTISERS ; GeographyValue bidRegion = Regions . pickRandomRegion ( ) ; TimestampType bidStartTime = new TimestampType ( ) ; TimestampType bidEndTime = new TimestampType ( bidStartTime . getTime ( ) + AdBrokerBenchmark . BID_DURATION_SECONDS * 1000000 ) ; // Amount of bid: a hundredth of a penny up to around a tenth of a penny. double amount = 0.00001 + 0.01 * m_rand . nextDouble ( ) ; DecimalFormat df = new DecimalFormat ( "#.####" ) ; amount = Double . valueOf ( df . format ( amount ) ) ; try { m_client . callProcedure ( new NullCallback ( ) , "bids.Insert" , bidId , advertiserId , bidRegion , bidStartTime , bidEndTime , amount ) ; } catch ( Exception e ) { e . printStackTrace ( ) ; } }
This is the run method for this Runnable subclass .
249
12
154,121
String getName ( ) { int idx = mName . lastIndexOf ( ' ' ) ; return ( idx > 0 ) ? mName . substring ( idx ) : mName ; }
Strip the other pathname components and return the basename
43
12
154,122
public AbstractExpression singlePartitioningExpression ( ) { AbstractExpression e = singlePartitioningExpressionForReport ( ) ; if ( e != null && isUsefulPartitioningExpression ( e ) ) { return e ; } return null ; }
smart accessor - only returns a value if it was unique and is useful
56
15
154,123
void analyzeTablePartitioning ( Collection < StmtTableScan > collection ) throws PlanningErrorException { m_countOfPartitionedTables = 0 ; // Do we have a need for a distributed scan at all? // Iterate over the tables to collect partition columns. for ( StmtTableScan tableScan : collection ) { if ( ! tableScan . getIsReplicated ( ) ) { ++ m_countOfPartitionedTables ; } } // Initial guess -- as if no equality filters. m_countOfIndependentlyPartitionedTables = m_countOfPartitionedTables ; }
This simple analysis counts the number of partitioned tables in the join tree of a query and initializes a guess for the count of independently partitioned tables .
130
31
154,124
public void resetAnalysisState ( ) { m_countOfIndependentlyPartitionedTables = - 1 ; m_countOfPartitionedTables = - 1 ; m_fullColumnName = null ; m_inferredExpression . clear ( ) ; m_inferredParameterIndex = - 1 ; m_inferredValue = null ; m_isDML = false ; setJoinValid ( true ) ; setJoinInvalidReason ( null ) ; m_partitionColForDML = null ; }
Sometimes when we fail to plan a statement we try again with different inputs using the same StatementPartitioning object . In this case it s incumbent on callers to reset the cached analysis state set by calling this method .
109
44
154,125
public boolean callProcedure ( Invocation invocation , ProcedureCallback callback ) { try { boolean result = m_importServerAdapter . callProcedure ( this , m_backPressurePredicate , callback , invocation . getProcedure ( ) , invocation . getParams ( ) ) ; reportStat ( result , invocation . getProcedure ( ) ) ; return result ; } catch ( Exception ex ) { rateLimitedLog ( Level . ERROR , ex , "%s: Error trying to import" , getName ( ) ) ; reportFailureStat ( invocation . getProcedure ( ) ) ; return false ; } }
This should be used importer implementations to execute a stored procedure .
131
13
154,126
@ Override public void rateLimitedLog ( Level level , Throwable cause , String format , Object ... args ) { m_logger . rateLimitedLog ( LOG_SUPPRESSION_INTERVAL_SECONDS , level , cause , format , args ) ; }
This rate limited log must be used by the importers to log messages that may happen frequently and must be rate limited .
56
24
154,127
protected void trace ( Throwable t , String msgFormat , Object ... args ) { m_logger . trace ( String . format ( msgFormat , args ) , t ) ; }
Log a TRACE level log message .
38
8
154,128
@ Override public void warn ( Throwable t , String msgFormat , Object ... args ) { m_logger . warn ( String . format ( msgFormat , args ) , t ) ; }
Log a WARN level log message .
41
7
154,129
public void add ( Right right ) { if ( isFull ) { return ; } if ( right . isFull ) { clear ( ) ; isFull = true ; return ; } isFullSelect |= right . isFullSelect ; isFullInsert |= right . isFullInsert ; isFullUpdate |= right . isFullUpdate ; isFullReferences |= right . isFullReferences ; isFullDelete |= right . isFullDelete ; if ( isFullSelect ) { selectColumnSet = null ; } else if ( right . selectColumnSet != null ) { if ( selectColumnSet == null ) { selectColumnSet = new OrderedHashSet ( ) ; } selectColumnSet . addAll ( right . selectColumnSet ) ; } if ( isFullInsert ) { insertColumnSet = null ; } else if ( right . insertColumnSet != null ) { if ( insertColumnSet == null ) { insertColumnSet = new OrderedHashSet ( ) ; } insertColumnSet . addAll ( right . insertColumnSet ) ; } if ( isFullUpdate ) { updateColumnSet = null ; } else if ( right . updateColumnSet != null ) { if ( updateColumnSet == null ) { updateColumnSet = new OrderedHashSet ( ) ; } updateColumnSet . addAll ( right . updateColumnSet ) ; } if ( isFullReferences ) { referencesColumnSet = null ; } else if ( right . referencesColumnSet != null ) { if ( referencesColumnSet == null ) { referencesColumnSet = new OrderedHashSet ( ) ; } referencesColumnSet . addAll ( right . referencesColumnSet ) ; } if ( isFullTrigger ) { triggerColumnSet = null ; } else if ( right . triggerColumnSet != null ) { if ( triggerColumnSet == null ) { triggerColumnSet = new OrderedHashSet ( ) ; } triggerColumnSet . addAll ( right . triggerColumnSet ) ; } }
Supports column level GRANT
408
6
154,130
public void remove ( SchemaObject object , Right right ) { if ( right . isFull ) { clear ( ) ; return ; } if ( isFull ) { isFull = false ; isFullSelect = isFullInsert = isFullUpdate = isFullReferences = isFullDelete = true ; } if ( right . isFullDelete ) { isFullDelete = false ; } if ( ! isFullSelect && selectColumnSet == null ) { } else if ( right . isFullSelect ) { isFullSelect = false ; selectColumnSet = null ; } else if ( right . selectColumnSet != null ) { if ( isFullSelect ) { isFullSelect = false ; selectColumnSet = ( ( Table ) object ) . getColumnNameSet ( ) ; } selectColumnSet . removeAll ( right . selectColumnSet ) ; if ( selectColumnSet . isEmpty ( ) ) { selectColumnSet = null ; } } if ( ! isFullInsert && insertColumnSet == null ) { } else if ( right . isFullInsert ) { isFullInsert = false ; insertColumnSet = null ; } else if ( right . insertColumnSet != null ) { if ( isFullInsert ) { isFullInsert = false ; insertColumnSet = ( ( Table ) object ) . getColumnNameSet ( ) ; } insertColumnSet . removeAll ( right . insertColumnSet ) ; if ( insertColumnSet . isEmpty ( ) ) { insertColumnSet = null ; } } if ( ! isFullUpdate && updateColumnSet == null ) { } else if ( right . isFullUpdate ) { isFullUpdate = false ; updateColumnSet = null ; } else if ( right . updateColumnSet != null ) { if ( isFullUpdate ) { isFullUpdate = false ; updateColumnSet = ( ( Table ) object ) . getColumnNameSet ( ) ; } updateColumnSet . removeAll ( right . updateColumnSet ) ; if ( updateColumnSet . isEmpty ( ) ) { updateColumnSet = null ; } } if ( ! isFullReferences && referencesColumnSet == null ) { } else if ( right . isFullReferences ) { isFullReferences = false ; referencesColumnSet = null ; } else if ( right . referencesColumnSet != null ) { if ( isFullReferences ) { isFullReferences = false ; referencesColumnSet = ( ( Table ) object ) . getColumnNameSet ( ) ; } referencesColumnSet . removeAll ( right . referencesColumnSet ) ; if ( referencesColumnSet . isEmpty ( ) ) { referencesColumnSet = null ; } } if ( ! isFullTrigger && triggerColumnSet == null ) { } else if ( right . isFullTrigger ) { isFullTrigger = false ; triggerColumnSet = null ; } else if ( right . triggerColumnSet != null ) { if ( isFullTrigger ) { isFullTrigger = false ; triggerColumnSet = ( ( Table ) object ) . getColumnNameSet ( ) ; } triggerColumnSet . removeAll ( right . triggerColumnSet ) ; if ( triggerColumnSet . isEmpty ( ) ) { triggerColumnSet = null ; } } }
supports column level REVOKE
659
7
154,131
static boolean containsAllColumns ( OrderedHashSet columnSet , Table table , boolean [ ] columnCheckList ) { for ( int i = 0 ; i < columnCheckList . length ; i ++ ) { if ( columnCheckList [ i ] ) { if ( columnSet == null ) { return false ; } if ( columnSet . contains ( table . getColumn ( i ) . getName ( ) ) ) { continue ; } return false ; } } return true ; }
Supports column level checks
100
5
154,132
String getTableRightsSQL ( Table table ) { StringBuffer sb = new StringBuffer ( ) ; if ( isFull ) { return Tokens . T_ALL ; } if ( isFullSelect ) { sb . append ( Tokens . T_SELECT ) ; sb . append ( ' ' ) ; } else if ( selectColumnSet != null ) { sb . append ( Tokens . T_SELECT ) ; getColumnList ( table , selectColumnSet , sb ) ; sb . append ( ' ' ) ; } if ( isFullInsert ) { sb . append ( Tokens . T_INSERT ) ; sb . append ( ' ' ) ; } else if ( insertColumnSet != null ) { sb . append ( Tokens . T_INSERT ) ; getColumnList ( table , insertColumnSet , sb ) ; sb . append ( ' ' ) ; } if ( isFullUpdate ) { sb . append ( Tokens . T_UPDATE ) ; sb . append ( ' ' ) ; } else if ( updateColumnSet != null ) { sb . append ( Tokens . T_UPDATE ) ; getColumnList ( table , updateColumnSet , sb ) ; sb . append ( ' ' ) ; } if ( isFullDelete ) { sb . append ( Tokens . T_DELETE ) ; sb . append ( ' ' ) ; } if ( isFullReferences ) { sb . append ( Tokens . T_REFERENCES ) ; sb . append ( ' ' ) ; } else if ( referencesColumnSet != null ) { sb . append ( Tokens . T_REFERENCES ) ; sb . append ( ' ' ) ; } if ( isFullTrigger ) { sb . append ( Tokens . T_TRIGGER ) ; sb . append ( ' ' ) ; } else if ( triggerColumnSet != null ) { sb . append ( Tokens . T_TRIGGER ) ; sb . append ( ' ' ) ; } return sb . toString ( ) . substring ( 0 , sb . length ( ) - 1 ) ; }
supports column level GRANT
451
6
154,133
public synchronized int next ( ) { while ( nextPort <= MAX_STATIC_PORT ) { int port = nextPort ++ ; if ( MiscUtils . isBindable ( port ) ) { return port ; } } throw new RuntimeException ( "Exhausted all possible ports" ) ; }
Return the next bindable port
62
6
154,134
static Range < Long > range ( long start , long end ) { return Range . closed ( start , end ) . canonical ( DiscreteDomain . longs ( ) ) ; }
Returns a canonical range that can be added to the internal range set . Only ranges returned by this method can be added to the range set otherwise range operations like contains may yield unexpected results . Consult the Guava doc on Range for details .
37
47
154,135
private static long start ( Range < Long > range ) { if ( range . lowerBoundType ( ) == BoundType . OPEN ) { return DiscreteDomain . longs ( ) . next ( range . lowerEndpoint ( ) ) ; } else { return range . lowerEndpoint ( ) ; } }
Get the start of the range . Always use this method to get the start of a range because it respects the bound type .
63
25
154,136
private static long end ( Range < Long > range ) { if ( range . upperBoundType ( ) == BoundType . OPEN ) { return DiscreteDomain . longs ( ) . previous ( range . upperEndpoint ( ) ) ; } else { return range . upperEndpoint ( ) ; } }
Get the end of the range . Always use this method to get the end of a range because it respects the bound type .
63
25
154,137
public void append ( long start , long end ) { assert ( start <= end && ( m_map . isEmpty ( ) || start > end ( m_map . span ( ) ) ) ) ; addRange ( start , end ) ; }
Appends a range to the tracker . The range has to be after the last sequence number of the tracker .
51
22
154,138
public int truncate ( long newTruncationPoint ) { int truncated = 0 ; if ( m_map . isEmpty ( ) ) { m_map . add ( range ( newTruncationPoint , newTruncationPoint ) ) ; m_hasSentinel = true ; return truncated ; } if ( newTruncationPoint < getFirstSeqNo ( ) ) { return truncated ; } // Sentinel doesn't count as valid sequence if ( m_hasSentinel ) { truncated -= 1 ; } final Iterator < Range < Long > > iter = m_map . asRanges ( ) . iterator ( ) ; while ( iter . hasNext ( ) ) { final Range < Long > next = iter . next ( ) ; if ( end ( next ) < newTruncationPoint ) { truncated += end ( next ) - start ( next ) + 1 ; iter . remove ( ) ; } else if ( next . contains ( newTruncationPoint ) ) { truncated += newTruncationPoint - start ( next ) + 1 ; iter . remove ( ) ; m_map . add ( range ( newTruncationPoint , end ( next ) ) ) ; m_hasSentinel = true ; return truncated ; } else { break ; } } if ( ! m_map . contains ( newTruncationPoint ) ) { m_map . add ( range ( newTruncationPoint , newTruncationPoint ) ) ; m_hasSentinel = true ; } return truncated ; }
Truncate the tracker to the given safe point . After truncation the new safe point will be the first sequence number of the tracker . If the new safe point is before the first sequence number of the tracker it s a no - op . If the map is empty truncation point will be the new safe point of tracker .
325
66
154,139
public void truncateAfter ( long newTruncationPoint ) { if ( size ( ) == 0 ) { m_map . add ( range ( newTruncationPoint , newTruncationPoint ) ) ; m_hasSentinel = true ; return ; } if ( newTruncationPoint > getLastSeqNo ( ) ) { return ; } final Iterator < Range < Long > > iter = m_map . asDescendingSetOfRanges ( ) . iterator ( ) ; while ( iter . hasNext ( ) ) { final Range < Long > next = iter . next ( ) ; if ( start ( next ) > newTruncationPoint ) { iter . remove ( ) ; } else if ( next . contains ( newTruncationPoint ) ) { iter . remove ( ) ; m_map . add ( range ( start ( next ) , newTruncationPoint ) ) ; return ; } else { break ; } } if ( m_map . isEmpty ( ) ) { m_map . add ( range ( newTruncationPoint , newTruncationPoint ) ) ; m_hasSentinel = true ; } }
Truncate the tracker to the given truncation point . After truncation any ranges after the new truncation point will be removed . If the new safe point is after the last sequence number of the tracker it s a no - op . If the map is empty truncation point will be the new safe point of tracker .
244
65
154,140
public Pair < Long , Long > getRangeContaining ( long seq ) { Range < Long > range = m_map . rangeContaining ( seq ) ; if ( range != null ) { return new Pair < Long , Long > ( start ( range ) , end ( range ) ) ; } return null ; }
Get range that contains given sequence number
65
7
154,141
public Pair < Long , Long > getFirstGap ( ) { if ( m_map . isEmpty ( ) || size ( ) < 2 ) { return null ; } Iterator < Range < Long > > iter = m_map . asRanges ( ) . iterator ( ) ; long start = end ( iter . next ( ) ) + 1 ; long end = start ( iter . next ( ) ) - 1 ; return new Pair < Long , Long > ( start , end ) ; }
Find range of the first gap if it exists . If there is only one entry range after the first entry is NOT a gap .
103
26
154,142
public int sizeInSequence ( ) { int sequence = 0 ; if ( m_map . isEmpty ( ) ) { return sequence ; } final Iterator < Range < Long > > iter = m_map . asRanges ( ) . iterator ( ) ; while ( iter . hasNext ( ) ) { Range < Long > range = iter . next ( ) ; sequence += end ( range ) - start ( range ) + 1 ; } if ( m_hasSentinel ) { sequence -= 1 ; } return sequence ; }
Get total number of sequence from the tracker .
110
9
154,143
void sendEvent ( CallEvent call ) throws NoConnectionsException , IOException , ProcCallException { if ( call . endTS == null ) { assert ( call . startTS != null ) ; // null callback isn't ideal for production code, but errors are tracked // here through client stats so we'll let it slide client . callProcedure ( new NullCallback ( ) , "BeginCall" , call . agentId , call . phoneNoStr ( ) , call . callId , call . startTS ) ; } else { assert ( call . startTS == null ) ; client . callProcedure ( new NullCallback ( ) , "EndCall" , call . agentId , call . phoneNoStr ( ) , call . callId , call . endTS ) ; } }
Send a call event to either BeginCall or EndCall based on the event .
165
16
154,144
public void write ( char [ ] c , int off , int len ) { ensureRoom ( len * 2 ) ; for ( int i = off ; i < len ; i ++ ) { int v = c [ i ] ; buffer [ count ++ ] = ( byte ) ( v >>> 8 ) ; buffer [ count ++ ] = ( byte ) v ; } }
additional public methods not in similar java . util classes
75
11
154,145
long executeSQL ( boolean isFinal ) throws VoltAbortException { long count = 0 ; VoltTable [ ] results = voltExecuteSQL ( isFinal ) ; for ( VoltTable result : results ) { long dmlUpdated = result . asScalarLong ( ) ; if ( dmlUpdated == 0 ) { throw new VoltAbortException ( "Insert failed for tuple." ) ; } if ( dmlUpdated > 1 ) { throw new VoltAbortException ( "Insert modified more than one tuple." ) ; } ++ count ; } return count ; }
Execute a set of queued inserts . Ensure each insert successfully inserts one row . Throw exception if not .
118
22
154,146
protected static boolean _isCloseSurrogateMethod ( final Class clazz , final Method method ) { return ( ( Connection . class . isAssignableFrom ( clazz ) || Statement . class . isAssignableFrom ( clazz ) ) && "close" . equals ( method . getName ( ) ) ) ; }
Simple test used only during static initialization .
69
8
154,147
protected static Class [ ] _computeProxiedInterface ( Object delegate ) { // NOTE: Order is important for XXXStatement. if ( delegate instanceof Array ) { return arrayInterface ; } else if ( delegate instanceof Connection ) { return connectionInterface ; } else if ( delegate instanceof CallableStatement ) { return callableStatementInterface ; } else if ( delegate instanceof DatabaseMetaData ) { return databaseMetaDataInterface ; } else if ( delegate instanceof PreparedStatement ) { return preparedStatementInterface ; } else if ( delegate instanceof ResultSet ) { return resultSetInterface ; } else if ( delegate instanceof Statement ) { return statementInterface ; } else { return null ; } }
Given a delegate retrieves the interface that must be implemented by a surrogate dynamic proxy to ensure pooling sensitive methods of the delegate are not exposed directly to clients .
144
32
154,148
protected void closeConnectionSurrogate ( ) throws Throwable { ConnectionPool connectionPool = this . connectionPool ; if ( connectionPool == null ) { // CHECKME: policy? // pool has "disapeared" or was never provided (why?): should // "really" close the connection since it will no be reused. Connection connection = ( Connection ) this . delegate ; try { connection . close ( ) ; } catch ( SQLException ex ) { } } else { Connection connection = ( Connection ) this . delegate ; StatementPool statementPool = this . statementPool ; connectionPool . checkIn ( connection , statementPool ) ; } }
Does work toward enabling reuse of the delegate when it is a Connection .
135
14
154,149
public static String jsonifyClusterTrackers ( Pair < Long , Long > lastConsumerUniqueIds , Map < Integer , Map < Integer , DRSiteDrIdTracker > > allProducerTrackers ) throws JSONException { JSONStringer stringer = new JSONStringer ( ) ; stringer . object ( ) ; stringer . keySymbolValuePair ( "lastConsumerSpUniqueId" , lastConsumerUniqueIds . getFirst ( ) ) ; stringer . keySymbolValuePair ( "lastConsumerMpUniqueId" , lastConsumerUniqueIds . getSecond ( ) ) ; stringer . key ( "trackers" ) . object ( ) ; if ( allProducerTrackers != null ) { for ( Map . Entry < Integer , Map < Integer , DRSiteDrIdTracker > > clusterTrackers : allProducerTrackers . entrySet ( ) ) { stringer . key ( Integer . toString ( clusterTrackers . getKey ( ) ) ) . object ( ) ; for ( Map . Entry < Integer , DRSiteDrIdTracker > e : clusterTrackers . getValue ( ) . entrySet ( ) ) { stringer . key ( e . getKey ( ) . toString ( ) ) ; stringer . value ( e . getValue ( ) . toJSON ( ) ) ; } stringer . endObject ( ) ; } } stringer . endObject ( ) ; stringer . endObject ( ) ; return stringer . toString ( ) ; }
Serialize the cluster trackers into JSON .
320
9
154,150
public static Map < Integer , Map < Integer , DRSiteDrIdTracker > > dejsonifyClusterTrackers ( final String jsonData , boolean resetLastReceivedLogIds ) throws JSONException { Map < Integer , Map < Integer , DRSiteDrIdTracker > > producerTrackers = new HashMap <> ( ) ; JSONObject clusterData = new JSONObject ( jsonData ) ; final JSONObject trackers = clusterData . getJSONObject ( "trackers" ) ; Iterator < String > clusterIdKeys = trackers . keys ( ) ; while ( clusterIdKeys . hasNext ( ) ) { final String clusterIdStr = clusterIdKeys . next ( ) ; final int clusterId = Integer . parseInt ( clusterIdStr ) ; final JSONObject trackerData = trackers . getJSONObject ( clusterIdStr ) ; Iterator < String > srcPidKeys = trackerData . keys ( ) ; while ( srcPidKeys . hasNext ( ) ) { final String srcPidStr = srcPidKeys . next ( ) ; final int srcPid = Integer . valueOf ( srcPidStr ) ; final JSONObject ids = trackerData . getJSONObject ( srcPidStr ) ; final DRSiteDrIdTracker tracker = new DRSiteDrIdTracker ( ids , resetLastReceivedLogIds ) ; Map < Integer , DRSiteDrIdTracker > clusterTrackers = producerTrackers . computeIfAbsent ( clusterId , k -> new HashMap <> ( ) ) ; clusterTrackers . put ( srcPid , tracker ) ; } } return producerTrackers ; }
Deserialize the trackers retrieved from each consumer partitions .
349
12
154,151
public static void mergeTrackers ( Map < Integer , Map < Integer , DRSiteDrIdTracker > > base , Map < Integer , Map < Integer , DRSiteDrIdTracker > > add ) { for ( Map . Entry < Integer , Map < Integer , DRSiteDrIdTracker > > clusterEntry : add . entrySet ( ) ) { final Map < Integer , DRSiteDrIdTracker > baseClusterEntry = base . get ( clusterEntry . getKey ( ) ) ; if ( baseClusterEntry == null ) { base . put ( clusterEntry . getKey ( ) , clusterEntry . getValue ( ) ) ; } else { for ( Map . Entry < Integer , DRSiteDrIdTracker > partitionEntry : clusterEntry . getValue ( ) . entrySet ( ) ) { final DRConsumerDrIdTracker basePartitionTracker = baseClusterEntry . get ( partitionEntry . getKey ( ) ) ; if ( basePartitionTracker == null ) { baseClusterEntry . put ( partitionEntry . getKey ( ) , partitionEntry . getValue ( ) ) ; } else { basePartitionTracker . mergeTracker ( partitionEntry . getValue ( ) ) ; } } } } }
Merge trackers in the additional map into the base map .
259
13
154,152
public JSONWriter array ( Iterable < ? extends JSONString > iter ) throws JSONException { array ( ) ; for ( JSONString element : iter ) { value ( element ) ; } endArray ( ) ; return this ; }
Append an array value based on a custom JSONString implementation .
47
13
154,153
public JSONWriter keySymbolValuePair ( String aKey , String aValue ) throws JSONException { assert ( aKey != null ) ; assert ( m_mode == ' ' ) ; // The key should not have already been seen in this scope. assert ( m_scopeStack [ m_top ] . add ( aKey ) ) ; try { m_writer . write ( m_expectingComma ? ",\"" : "\"" ) ; m_writer . write ( aKey ) ; if ( aValue == null ) { m_writer . write ( "\":null" ) ; } else { m_writer . write ( "\":\"" ) ; m_writer . write ( JSONObject . quotable ( aValue ) ) ; m_writer . write ( ' ' ) ; } } catch ( IOException e ) { throw new JSONException ( e ) ; } m_expectingComma = true ; return this ; }
Write a JSON key - value pair in one optimized step that assumes that the key is a symbol composed of normal characters requiring no escaping and asserts that keys are non - null and unique within an object ONLY if asserts are enabled . This method is most suitable in the common case where the caller is making a hard - coded series of calls with the same hard - coded strings for keys . Any sequencing errors can be detected in debug runs with asserts enabled .
199
89
154,154
public static byte [ ] gunzipBytes ( byte [ ] compressedBytes ) throws IOException { ByteArrayOutputStream bos = new ByteArrayOutputStream ( ( int ) ( compressedBytes . length * 1.5 ) ) ; InflaterOutputStream dos = new InflaterOutputStream ( bos ) ; dos . write ( compressedBytes ) ; dos . close ( ) ; return bos . toByteArray ( ) ; }
to avoid linking all that jazz into the client code
87
10
154,155
public Object [ ] getGroupData ( Object [ ] data ) { if ( isSimpleAggregate ) { if ( simpleAggregateData == null ) { simpleAggregateData = data ; return null ; } return simpleAggregateData ; } RowIterator it = groupIndex . findFirstRow ( session , store , data ) ; if ( it . hasNext ( ) ) { Row row = it . getNextRow ( ) ; if ( isAggregate ) { row . setChanged ( ) ; } return row . getData ( ) ; } return null ; }
Special case for isSimpleAggregate cannot use index lookup .
117
12
154,156
public void addConstraint ( Constraint c ) { int index = c . getConstraintType ( ) == Constraint . PRIMARY_KEY ? 0 : constraintList . length ; constraintList = ( Constraint [ ] ) ArrayUtil . toAdjustedArray ( constraintList , c , index , 1 ) ; updateConstraintLists ( ) ; }
Adds a constraint .
82
4
154,157
Constraint getUniqueConstraintForColumns ( int [ ] cols ) { for ( int i = 0 , size = constraintList . length ; i < size ; i ++ ) { Constraint c = constraintList [ i ] ; if ( c . isUniqueWithColumns ( cols ) ) { return c ; } } return null ; }
Returns the UNIQUE or PK constraint with the given column signature .
76
14
154,158
Constraint getUniqueConstraintForColumns ( int [ ] mainTableCols , int [ ] refTableCols ) { for ( int i = 0 , size = constraintList . length ; i < size ; i ++ ) { Constraint c = constraintList [ i ] ; // A VoltDB extension -- Don't consider non-column expression indexes for this purpose if ( c . hasExprs ( ) ) { continue ; } // End of VoltDB extension int type = c . getConstraintType ( ) ; if ( type != Constraint . UNIQUE && type != Constraint . PRIMARY_KEY ) { continue ; } int [ ] constraintCols = c . getMainColumns ( ) ; if ( constraintCols . length != mainTableCols . length ) { continue ; } if ( ArrayUtil . areEqual ( constraintCols , mainTableCols , mainTableCols . length , true ) ) { return c ; } if ( ArrayUtil . areEqualSets ( constraintCols , mainTableCols ) ) { int [ ] newRefTableCols = new int [ mainTableCols . length ] ; for ( int j = 0 ; j < mainTableCols . length ; j ++ ) { int pos = ArrayUtil . find ( constraintCols , mainTableCols [ j ] ) ; newRefTableCols [ pos ] = refTableCols [ j ] ; } for ( int j = 0 ; j < mainTableCols . length ; j ++ ) { refTableCols [ j ] = newRefTableCols [ j ] ; } return c ; } } return null ; }
Returns the UNIQUE or PK constraint with the given column signature . Modifies the composition of refTableCols if necessary .
360
26
154,159
Constraint getFKConstraintForColumns ( Table tableMain , int [ ] mainCols , int [ ] refCols ) { for ( int i = 0 , size = constraintList . length ; i < size ; i ++ ) { Constraint c = constraintList [ i ] ; if ( c . isEquivalent ( tableMain , mainCols , this , refCols ) ) { return c ; } } return null ; }
Returns any foreign key constraint equivalent to the column sets
96
10
154,160
public Constraint getUniqueOrPKConstraintForIndex ( Index index ) { for ( int i = 0 , size = constraintList . length ; i < size ; i ++ ) { Constraint c = constraintList [ i ] ; if ( c . getMainIndex ( ) == index && ( c . getConstraintType ( ) == Constraint . UNIQUE || c . getConstraintType ( ) == Constraint . PRIMARY_KEY ) ) { return c ; } } return null ; }
Returns any unique Constraint using this index
113
9
154,161
int getNextConstraintIndex ( int from , int type ) { for ( int i = from , size = constraintList . length ; i < size ; i ++ ) { Constraint c = constraintList [ i ] ; if ( c . getConstraintType ( ) == type ) { return i ; } } return - 1 ; }
Returns the next constraint of a given type
73
8
154,162
public void addColumn ( ColumnSchema column ) { String name = column . getName ( ) . name ; if ( findColumn ( name ) >= 0 ) { throw Error . error ( ErrorCode . X_42504 , name ) ; } if ( column . isIdentity ( ) ) { if ( identityColumn != - 1 ) { throw Error . error ( ErrorCode . X_42525 , name ) ; } identityColumn = getColumnCount ( ) ; identitySequence = column . getIdentitySequence ( ) ; } addColumnNoCheck ( column ) ; }
Performs the table level checks and adds a column to the table at the DDL level . Only used at table creation not at alter column .
121
29
154,163
void checkColumnsMatch ( int [ ] col , Table other , int [ ] othercol ) { for ( int i = 0 ; i < col . length ; i ++ ) { Type type = colTypes [ col [ i ] ] ; Type otherType = other . colTypes [ othercol [ i ] ] ; if ( type . typeComparisonGroup != otherType . typeComparisonGroup ) { throw Error . error ( ErrorCode . X_42562 ) ; } } }
Match two valid equal length columns arrays for type of columns
101
11
154,164
OrderedHashSet getDependentConstraints ( int colIndex ) { OrderedHashSet set = new OrderedHashSet ( ) ; for ( int i = 0 , size = constraintList . length ; i < size ; i ++ ) { Constraint c = constraintList [ i ] ; if ( c . hasColumnOnly ( colIndex ) ) { set . add ( c ) ; } } return set ; }
Returns list of constraints dependent only on one column
89
9
154,165
OrderedHashSet getContainingConstraints ( int colIndex ) { OrderedHashSet set = new OrderedHashSet ( ) ; for ( int i = 0 , size = constraintList . length ; i < size ; i ++ ) { Constraint c = constraintList [ i ] ; if ( c . hasColumnPlus ( colIndex ) ) { set . add ( c ) ; } } return set ; }
Returns list of constraints dependent on more than one column
89
10
154,166
OrderedHashSet getDependentConstraints ( Constraint constraint ) { OrderedHashSet set = new OrderedHashSet ( ) ; for ( int i = 0 , size = constraintList . length ; i < size ; i ++ ) { Constraint c = constraintList [ i ] ; if ( c . getConstraintType ( ) == Constraint . MAIN ) { if ( c . core . uniqueName == constraint . getName ( ) ) { set . add ( c ) ; } } } return set ; }
Returns list of MAIN constraints dependent on this PK or UNIQUE constraint
115
15
154,167
void checkColumnInFKConstraint ( int colIndex , int actionType ) { for ( int i = 0 , size = constraintList . length ; i < size ; i ++ ) { Constraint c = constraintList [ i ] ; if ( c . getConstraintType ( ) == Constraint . FOREIGN_KEY && c . hasColumn ( colIndex ) && ( actionType == c . getUpdateAction ( ) || actionType == c . getDeleteAction ( ) ) ) { HsqlName name = c . getName ( ) ; throw Error . error ( ErrorCode . X_42533 , name . getSchemaQualifiedStatementName ( ) ) ; } } }
Used for column defaults and nullability . Checks whether column is in an FK with a given referential action type .
148
25
154,168
public int getColumnIndex ( String name ) { int i = findColumn ( name ) ; if ( i == - 1 ) { throw Error . error ( ErrorCode . X_42501 , name ) ; } return i ; }
Returns the index of given column name or throws if not found
48
12
154,169
void setDefaultExpression ( int columnIndex , Expression def ) { ColumnSchema column = getColumn ( columnIndex ) ; column . setDefaultExpression ( def ) ; setColumnTypeVars ( columnIndex ) ; }
Sets the SQL default value for a columm .
47
11
154,170
void resetDefaultsFlag ( ) { hasDefaultValues = false ; for ( int i = 0 ; i < colDefaults . length ; i ++ ) { hasDefaultValues = hasDefaultValues || colDefaults [ i ] != null ; } }
sets the flag for the presence of any default expression
52
10
154,171
Index getIndexForColumn ( int col ) { int i = bestIndexForColumn [ col ] ; return i == - 1 ? null : this . indexList [ i ] ; }
Finds an existing index for a column
38
8
154,172
public void createPrimaryKey ( HsqlName indexName , int [ ] columns , boolean columnsNotNull ) { if ( primaryKeyCols != null ) { throw Error . runtimeError ( ErrorCode . U_S0500 , "Table" ) ; } if ( columns == null ) { columns = ValuePool . emptyIntArray ; } else { for ( int i = 0 ; i < columns . length ; i ++ ) { getColumn ( columns [ i ] ) . setPrimaryKey ( true ) ; } } primaryKeyCols = columns ; setColumnStructures ( ) ; primaryKeyTypes = new Type [ primaryKeyCols . length ] ; ArrayUtil . projectRow ( colTypes , primaryKeyCols , primaryKeyTypes ) ; primaryKeyColsSequence = new int [ primaryKeyCols . length ] ; ArrayUtil . fillSequence ( primaryKeyColsSequence ) ; HsqlName name = indexName ; if ( name == null ) { name = database . nameManager . newAutoName ( "IDX" , getSchemaName ( ) , getName ( ) , SchemaObject . INDEX ) ; } createPrimaryIndex ( primaryKeyCols , primaryKeyTypes , name ) ; setBestRowIdentifiers ( ) ; }
Creates a single or multi - column primary key and index . sets the colTypes array . Finalises the creation of the table . ( fredt
268
31
154,173
void addTrigger ( TriggerDef td , HsqlName otherName ) { int index = triggerList . length ; if ( otherName != null ) { int pos = getTriggerIndex ( otherName . name ) ; if ( pos != - 1 ) { index = pos + 1 ; } } triggerList = ( TriggerDef [ ] ) ArrayUtil . toAdjustedArray ( triggerList , td , index , 1 ) ; TriggerDef [ ] list = triggerLists [ td . vectorIndex ] ; index = list . length ; if ( otherName != null ) { for ( int i = 0 ; i < list . length ; i ++ ) { TriggerDef trigger = list [ i ] ; if ( trigger . name . name . equals ( otherName . name ) ) { index = i + 1 ; break ; } } } list = ( TriggerDef [ ] ) ArrayUtil . toAdjustedArray ( list , td , index , 1 ) ; triggerLists [ td . vectorIndex ] = list ; }
Adds a trigger .
211
4
154,174
TriggerDef getTrigger ( String name ) { for ( int i = triggerList . length - 1 ; i >= 0 ; i -- ) { if ( triggerList [ i ] . name . name . equals ( name ) ) { return triggerList [ i ] ; } } return null ; }
Returns a trigger .
60
4
154,175
void removeTrigger ( String name ) { TriggerDef td = null ; for ( int i = 0 ; i < triggerList . length ; i ++ ) { td = triggerList [ i ] ; if ( td . name . name . equals ( name ) ) { td . terminate ( ) ; triggerList = ( TriggerDef [ ] ) ArrayUtil . toAdjustedArray ( triggerList , null , i , - 1 ) ; break ; } } if ( td == null ) { return ; } int index = td . vectorIndex ; // look in each trigger list of each type of trigger for ( int j = 0 ; j < triggerLists [ index ] . length ; j ++ ) { td = triggerLists [ index ] [ j ] ; if ( td . name . name . equals ( name ) ) { td . terminate ( ) ; triggerLists [ index ] = ( TriggerDef [ ] ) ArrayUtil . toAdjustedArray ( triggerLists [ index ] , null , j , - 1 ) ; break ; } } }
Drops a trigger .
218
5
154,176
void releaseTriggers ( ) { // look in each trigger list of each type of trigger for ( int i = 0 ; i < TriggerDef . NUM_TRIGS ; i ++ ) { for ( int j = 0 ; j < triggerLists [ i ] . length ; j ++ ) { triggerLists [ i ] [ j ] . terminate ( ) ; } triggerLists [ i ] = TriggerDef . emptyArray ; } }
Drops all triggers .
93
5
154,177
int getIndexIndex ( String indexName ) { Index [ ] indexes = indexList ; for ( int i = 0 ; i < indexes . length ; i ++ ) { if ( indexName . equals ( indexes [ i ] . getName ( ) . name ) ) { return i ; } } // no such index return - 1 ; }
Returns the index of the Index object of the given name or - 1 if not found .
70
18
154,178
Index getIndex ( String indexName ) { Index [ ] indexes = indexList ; int i = getIndexIndex ( indexName ) ; return i == - 1 ? null : indexes [ i ] ; }
Returns the Index object of the given name or null if not found .
42
14
154,179
int getConstraintIndex ( String constraintName ) { for ( int i = 0 , size = constraintList . length ; i < size ; i ++ ) { if ( constraintList [ i ] . getName ( ) . name . equals ( constraintName ) ) { return i ; } } return - 1 ; }
Return the position of the constraint within the list
66
9
154,180
public Constraint getConstraint ( String constraintName ) { int i = getConstraintIndex ( constraintName ) ; return ( i < 0 ) ? null : constraintList [ i ] ; }
return the named constriant
43
6
154,181
Index createIndexForColumns ( int [ ] columns ) { HsqlName indexName = database . nameManager . newAutoName ( "IDX_T" , getSchemaName ( ) , getName ( ) , SchemaObject . INDEX ) ; try { Index index = createAndAddIndexStructure ( indexName , columns , null , null , false , false , false , false ) ; return index ; } catch ( Throwable t ) { return null ; } }
Used to create an index automatically for system tables .
101
10
154,182
void enforceRowConstraints ( Session session , Object [ ] data ) { for ( int i = 0 ; i < defaultColumnMap . length ; i ++ ) { Type type = colTypes [ i ] ; data [ i ] = type . convertToTypeLimits ( session , data [ i ] ) ; if ( type . isDomainType ( ) ) { Constraint [ ] constraints = type . userTypeModifier . getConstraints ( ) ; for ( int j = 0 ; j < constraints . length ; j ++ ) { constraints [ j ] . checkCheckConstraint ( session , this , data [ i ] ) ; } } if ( data [ i ] == null ) { if ( colNotNull [ i ] ) { Constraint c = getNotNullConstraintForColumn ( i ) ; if ( c == null ) { if ( getColumn ( i ) . isPrimaryKey ( ) ) { c = this . getPrimaryConstraint ( ) ; } } String [ ] info = new String [ ] { c . getName ( ) . name , tableName . name } ; throw Error . error ( ErrorCode . X_23503 , ErrorCode . CONSTRAINT , info ) ; } } } }
Enforce max field sizes according to SQL column definition . SQL92 13 . 8
263
16
154,183
Index getIndexForColumns ( int [ ] cols ) { int i = bestIndexForColumn [ cols [ 0 ] ] ; if ( i > - 1 ) { return indexList [ i ] ; } switch ( tableType ) { case TableBase . SYSTEM_SUBQUERY : case TableBase . SYSTEM_TABLE : case TableBase . VIEW_TABLE : case TableBase . TEMP_TABLE : { Index index = createIndexForColumns ( cols ) ; return index ; } } return null ; }
Finds an existing index for a column group
110
9
154,184
Index getIndexForColumns ( OrderedIntHashSet set ) { int maxMatchCount = 0 ; Index selected = null ; if ( set . isEmpty ( ) ) { return null ; } for ( int i = 0 , count = indexList . length ; i < count ; i ++ ) { Index currentindex = getIndex ( i ) ; int [ ] indexcols = currentindex . getColumns ( ) ; int matchCount = set . getOrderedMatchCount ( indexcols ) ; if ( matchCount == 0 ) { continue ; } if ( matchCount == indexcols . length ) { return currentindex ; } if ( matchCount > maxMatchCount ) { maxMatchCount = matchCount ; selected = currentindex ; } } if ( selected != null ) { return selected ; } switch ( tableType ) { case TableBase . SYSTEM_SUBQUERY : case TableBase . SYSTEM_TABLE : case TableBase . VIEW_TABLE : case TableBase . TEMP_TABLE : { selected = createIndexForColumns ( set . toArray ( ) ) ; } } return selected ; }
Finds an existing index for a column set or create one for temporary tables
234
15
154,185
public final int [ ] getIndexRootsArray ( ) { PersistentStore store = database . persistentStoreCollection . getStore ( this ) ; int [ ] roots = new int [ getIndexCount ( ) ] ; for ( int i = 0 ; i < getIndexCount ( ) ; i ++ ) { CachedObject accessor = store . getAccessor ( indexList [ i ] ) ; roots [ i ] = accessor == null ? - 1 : accessor . getPos ( ) ; } return roots ; }
Return the list of file pointers to root nodes for this table s indexes .
109
15
154,186
void setIndexRoots ( Session session , String s ) { if ( ! isCached ) { throw Error . error ( ErrorCode . X_42501 , tableName . name ) ; } ParserDQL p = new ParserDQL ( session , new Scanner ( s ) ) ; int [ ] roots = new int [ getIndexCount ( ) ] ; p . read ( ) ; for ( int i = 0 ; i < getIndexCount ( ) ; i ++ ) { int v = p . readInteger ( ) ; roots [ i ] = v ; } setIndexRoots ( roots ) ; }
Sets the index roots and next identity .
130
9
154,187
public void dropIndex ( Session session , String indexname ) { // find the array index for indexname and remove int todrop = getIndexIndex ( indexname ) ; indexList = ( Index [ ] ) ArrayUtil . toAdjustedArray ( indexList , null , todrop , - 1 ) ; for ( int i = 0 ; i < indexList . length ; i ++ ) { indexList [ i ] . setPosition ( i ) ; } setBestRowIdentifiers ( ) ; if ( store != null ) { store . resetAccessorKeys ( indexList ) ; } }
Performs Table structure modification and changes to the index nodes to remove a given index from a MEMORY or TEXT table . Not for PK index .
123
29
154,188
void insertRow ( Session session , PersistentStore store , Object [ ] data ) { setIdentityColumn ( session , data ) ; if ( triggerLists [ Trigger . INSERT_BEFORE ] . length != 0 ) { fireBeforeTriggers ( session , Trigger . INSERT_BEFORE , null , data , null ) ; } if ( isView ) { return ; } checkRowDataInsert ( session , data ) ; insertNoCheck ( session , store , data ) ; }
Mid level method for inserting rows . Performs constraint checks and fires row level triggers .
103
17
154,189
void insertIntoTable ( Session session , Result result ) { PersistentStore store = session . sessionData . getRowStore ( this ) ; RowSetNavigator nav = result . initialiseNavigator ( ) ; while ( nav . hasNext ( ) ) { Object [ ] data = nav . getNext ( ) ; Object [ ] newData = ( Object [ ] ) ArrayUtil . resizeArrayIfDifferent ( data , getColumnCount ( ) ) ; insertData ( store , newData ) ; } }
Multi - row insert method . Used for CREATE TABLE AS ... queries .
107
15
154,190
private Row insertNoCheck ( Session session , PersistentStore store , Object [ ] data ) { Row row = ( Row ) store . getNewCachedObject ( session , data ) ; store . indexRow ( session , row ) ; session . addInsertAction ( this , row ) ; return row ; }
Low level method for row insert . UNIQUE or PRIMARY constraints are enforced by attempting to add the row to the indexes .
64
27
154,191
public int insertSys ( PersistentStore store , Result ins ) { RowSetNavigator nav = ins . getNavigator ( ) ; int count = 0 ; while ( nav . hasNext ( ) ) { insertSys ( store , nav . getNext ( ) ) ; count ++ ; } return count ; }
Used for system table inserts . No checks . No identity columns .
64
13
154,192
void insertResult ( PersistentStore store , Result ins ) { RowSetNavigator nav = ins . initialiseNavigator ( ) ; while ( nav . hasNext ( ) ) { Object [ ] data = nav . getNext ( ) ; Object [ ] newData = ( Object [ ] ) ArrayUtil . resizeArrayIfDifferent ( data , getColumnCount ( ) ) ; insertData ( store , newData ) ; } }
Used for subquery inserts . No checks . No identity columns .
90
13
154,193
public void insertFromScript ( PersistentStore store , Object [ ] data ) { systemUpdateIdentityValue ( data ) ; insertData ( store , data ) ; }
Not for general use . Used by ScriptReader to unconditionally insert a row into the table when the . script file is read .
35
26
154,194
protected void systemUpdateIdentityValue ( Object [ ] data ) { if ( identityColumn != - 1 ) { Number id = ( Number ) data [ identityColumn ] ; if ( id != null ) { identitySequence . systemUpdate ( id . longValue ( ) ) ; } } }
If there is an identity column in the table sets the max identity value .
60
15
154,195
void deleteNoRefCheck ( Session session , Row row ) { Object [ ] data = row . getData ( ) ; fireBeforeTriggers ( session , Trigger . DELETE_BEFORE , data , null , null ) ; if ( isView ) { return ; } deleteNoCheck ( session , row ) ; }
Mid level row delete method . Fires triggers but no integrity constraint checks .
68
14
154,196
private void deleteNoCheck ( Session session , Row row ) { if ( row . isDeleted ( session ) ) { return ; } session . addDeleteAction ( this , row ) ; }
Low level row delete method . Removes the row from the indexes and from the Cache .
40
18
154,197
public void deleteNoCheckFromLog ( Session session , Object [ ] data ) { Row row = null ; PersistentStore store = session . sessionData . getRowStore ( this ) ; if ( hasPrimaryKey ( ) ) { RowIterator it = getPrimaryIndex ( ) . findFirstRow ( session , store , data , primaryKeyColsSequence ) ; row = it . getNextRow ( ) ; } else if ( bestIndex == null ) { RowIterator it = rowIterator ( session ) ; while ( true ) { row = it . getNextRow ( ) ; if ( row == null ) { break ; } if ( IndexAVL . compareRows ( row . getData ( ) , data , defaultColumnMap , colTypes ) == 0 ) { break ; } } } else { RowIterator it = bestIndex . findFirstRow ( session , store , data ) ; while ( true ) { row = it . getNextRow ( ) ; if ( row == null ) { break ; } Object [ ] rowdata = row . getData ( ) ; // reached end of range if ( bestIndex . compareRowNonUnique ( data , bestIndex . getColumns ( ) , rowdata ) != 0 ) { row = null ; break ; } if ( IndexAVL . compareRows ( rowdata , data , defaultColumnMap , colTypes ) == 0 ) { break ; } } } if ( row == null ) { return ; } deleteNoCheck ( session , row ) ; }
For log statements . Delete a single row .
315
9
154,198
public void addTTL ( int ttlValue , String ttlUnit , String ttlColumn , int batchSize , int maxFrequency , String streamName ) { dropTTL ( ) ; timeToLive = new TimeToLiveVoltDB ( ttlValue , ttlUnit , getColumn ( findColumn ( ttlColumn ) ) , batchSize , maxFrequency , streamName ) ; }
A VoltDB extension to support TTL
85
7
154,199
static public int getStart ( int field ) { Integer iObject = ( Integer ) starts . get ( new Integer ( field ) ) ; if ( iObject == null ) { throw new IllegalArgumentException ( RB . singleton . getString ( RB . UNEXPECTED_HEADER_KEY , field ) ) ; } return iObject . intValue ( ) ; }
not some problem with a Header or generating or reading a Header .
78
13