idx
int64
0
165k
question
stringlengths
73
4.15k
target
stringlengths
5
918
len_question
int64
21
890
len_target
int64
3
255
154,400
private Expression readCoalesceExpression ( ) { Expression c = null ; // turn into a CASEWHEN read ( ) ; readThis ( Tokens . OPENBRACKET ) ; Expression leaf = null ; while ( true ) { Expression current = XreadValueExpression ( ) ; if ( leaf != null && token . tokenType == Tokens . CLOSEBRACKET ) { readThis ( Tokens . CLOSEBRACKET ) ; leaf . setLeftNode ( current ) ; break ; } Expression condition = new ExpressionLogical ( OpTypes . IS_NULL , current ) ; Expression alternatives = new ExpressionOp ( OpTypes . ALTERNATIVE , new ExpressionValue ( ( Object ) null , ( Type ) null ) , current ) ; Expression casewhen = new ExpressionOp ( OpTypes . CASEWHEN , condition , alternatives ) ; if ( c == null ) { c = casewhen ; } else { leaf . setLeftNode ( casewhen ) ; } leaf = alternatives ; readThis ( Tokens . COMMA ) ; } return c ; }
Reads a COALESE or IFNULL expression
222
10
154,401
StatementDMQL compileCursorSpecification ( ) { QueryExpression queryExpression = XreadQueryExpression ( ) ; queryExpression . setAsTopLevel ( ) ; queryExpression . resolve ( session ) ; if ( token . tokenType == Tokens . FOR ) { read ( ) ; if ( token . tokenType == Tokens . READ ) { read ( ) ; readThis ( Tokens . ONLY ) ; } else { readThis ( Tokens . UPDATE ) ; if ( token . tokenType == Tokens . OF ) { readThis ( Tokens . OF ) ; OrderedHashSet colNames = readColumnNameList ( null , false ) ; } } } StatementDMQL cs = new StatementQuery ( session , queryExpression , compileContext ) ; return cs ; }
Retrieves a SELECT or other query expression Statement from this parse context .
160
15
154,402
public long toLong ( ) { byte [ ] data = getBytes ( ) ; if ( data == null || data . length <= 0 || data . length > 8 ) { // Assume that we're in a numeric context and that the user // made a typo entering a hex string. throw Error . error ( ErrorCode . X_42585 ) ; // malformed numeric constant } byte [ ] dataWithLeadingZeros = new byte [ ] { 0 , 0 , 0 , 0 , 0 , 0 , 0 , 0 } ; int lenDiff = 8 - data . length ; for ( int j = lenDiff ; j < 8 ; ++ j ) { dataWithLeadingZeros [ j ] = data [ j - lenDiff ] ; } BigInteger bi = new BigInteger ( dataWithLeadingZeros ) ; return bi . longValue ( ) ; }
Given a sequence of bytes that would otherwise be a VARBINARY constant return a long value with the understanding that the caller has determined that this value is in a numeric context .
181
36
154,403
public static void main ( String args [ ] ) { Statement stmts [ ] = null ; try { stmts = getStatements ( args [ 0 ] ) ; } catch ( Throwable e ) { System . out . println ( e . getMessage ( ) ) ; return ; } for ( Statement s : stmts ) { System . out . print ( s . statement ) ; } }
Run the parser as a stand - alone tool sending output to standard out .
84
15
154,404
void watchPartition ( int pid , ExecutorService es , boolean shouldBlock ) throws InterruptedException , ExecutionException { String dir = LeaderElector . electionDirForPartition ( VoltZK . leaders_initiators , pid ) ; m_callbacks . put ( pid , new PartitionCallback ( pid ) ) ; BabySitter babySitter ; if ( shouldBlock ) { babySitter = BabySitter . blockingFactory ( m_zk , dir , m_callbacks . get ( pid ) , es ) . getFirst ( ) ; } else { babySitter = BabySitter . nonblockingFactory ( m_zk , dir , m_callbacks . get ( pid ) , es ) ; } m_partitionWatchers . put ( pid , babySitter ) ; }
Watch the partition ZK dir in the leader appointer .
172
13
154,405
private int getInitialPartitionCount ( ) throws IllegalAccessException { AppointerState currentState = m_state . get ( ) ; if ( currentState != AppointerState . INIT && currentState != AppointerState . CLUSTER_START ) { throw new IllegalAccessException ( "Getting cached partition count after cluster " + "startup" ) ; } return m_initialPartitionCount ; }
Gets the initial cluster partition count on startup . This can only be called during initialization . Calling this after initialization throws because the partition count may not reflect the actual partition count in the cluster .
89
38
154,406
public void updatePartitionLeader ( int partitionId , long newMasterHISD , boolean isLeaderMigrated ) { PartitionCallback cb = m_callbacks . get ( partitionId ) ; if ( cb != null && cb . m_currentLeader != newMasterHISD ) { cb . m_previousLeader = cb . m_currentLeader ; cb . m_currentLeader = newMasterHISD ; cb . m_isLeaderMigrated = isLeaderMigrated ; } }
update the partition call back with current master and replica
112
10
154,407
public int compare ( String a , String b ) { int i ; if ( collator == null ) { i = a . compareTo ( b ) ; } else { i = collator . compare ( a , b ) ; } return ( i == 0 ) ? 0 : ( i < 0 ? - 1 : 1 ) ; }
returns - 1 0 or + 1
69
8
154,408
int get ( int rowSize ) { if ( lookup . size ( ) == 0 ) { return - 1 ; } int index = lookup . findFirstGreaterEqualKeyIndex ( rowSize ) ; if ( index == - 1 ) { return - 1 ; } // statistics for successful requests only - to be used later for midSize requestCount ++ ; requestSize += rowSize ; int length = lookup . getValue ( index ) ; int difference = length - rowSize ; int key = lookup . getKey ( index ) ; lookup . remove ( index ) ; if ( difference >= midSize ) { int pos = key + ( rowSize / scale ) ; lookup . add ( pos , difference ) ; } else { lostFreeBlockSize += difference ; } return key ; }
Returns the position of a free block or 0 .
160
10
154,409
public void seek ( long position ) throws IOException { if ( ! readOnly && file . length ( ) < position ) { long tempSize = position - file . length ( ) ; if ( tempSize > 1 << 18 ) { tempSize = 1 << 18 ; } byte [ ] temp = new byte [ ( int ) tempSize ] ; try { long pos = file . length ( ) ; for ( ; pos < position - tempSize ; pos += tempSize ) { file . seek ( pos ) ; file . write ( temp , 0 , ( int ) tempSize ) ; } file . seek ( pos ) ; file . write ( temp , 0 , ( int ) ( position - pos ) ) ; realPosition = position ; } catch ( IOException e ) { appLog . logContext ( e , null ) ; throw e ; } } seekPosition = position ; }
Some JVM s do not allow seek beyond end of file so zeros are written first in that case . Reported by bohgammer
181
28
154,410
void getBytes ( byte [ ] output ) { if ( m_totalAvailable < output . length ) { throw new IllegalStateException ( "Requested " + output . length + " bytes; only have " + m_totalAvailable + " bytes; call tryRead() first" ) ; } int bytesCopied = 0 ; while ( bytesCopied < output . length ) { BBContainer firstC = m_readBBContainers . peekFirst ( ) ; if ( firstC == null ) { // Steal the write buffer m_poolBBContainer . b ( ) . flip ( ) ; m_readBBContainers . add ( m_poolBBContainer ) ; firstC = m_poolBBContainer ; m_poolBBContainer = null ; } ByteBuffer first = firstC . b ( ) ; assert first . remaining ( ) > 0 ; // Copy bytes from first into output int bytesRemaining = first . remaining ( ) ; int bytesToCopy = output . length - bytesCopied ; if ( bytesToCopy > bytesRemaining ) bytesToCopy = bytesRemaining ; first . get ( output , bytesCopied , bytesToCopy ) ; bytesCopied += bytesToCopy ; m_totalAvailable -= bytesToCopy ; if ( first . remaining ( ) == 0 ) { // read an entire block: move it to the empty buffers list m_readBBContainers . poll ( ) ; firstC . discard ( ) ; } } }
Move all bytes in current read buffers to output array free read buffers back to thread local memory pool .
308
20
154,411
void reindex ( Session session , Index index ) { setAccessor ( index , null ) ; RowIterator it = table . rowIterator ( session ) ; while ( it . hasNext ( ) ) { Row row = it . getNextRow ( ) ; // may need to clear the node before insert index . insert ( session , this , row ) ; } }
for result tables
75
3
154,412
public XAConnection getXAConnection ( ) throws SQLException { // Comment out before public release: System . err . print ( "Executing " + getClass ( ) . getName ( ) + ".getXAConnection()..." ) ; try { Class . forName ( driver ) . newInstance ( ) ; } catch ( ClassNotFoundException e ) { throw new SQLException ( "Error opening connection: " + e . getMessage ( ) ) ; } catch ( IllegalAccessException e ) { throw new SQLException ( "Error opening connection: " + e . getMessage ( ) ) ; } catch ( InstantiationException e ) { throw new SQLException ( "Error opening connection: " + e . getMessage ( ) ) ; } JDBCConnection connection = ( JDBCConnection ) DriverManager . getConnection ( url , connProperties ) ; // Comment out before public release: System . err . print ( "New phys: " + connection ) ; JDBCXAResource xaResource = new JDBCXAResource ( connection , this ) ; JDBCXAConnectionWrapper xaWrapper = new JDBCXAConnectionWrapper ( connection , xaResource , connectionDefaults ) ; JDBCXAConnection xaConnection = new JDBCXAConnection ( xaWrapper , xaResource ) ; xaWrapper . setPooledConnection ( xaConnection ) ; return xaConnection ; }
Get new PHYSICAL connection to be managed by a connection manager .
324
14
154,413
public XAConnection getXAConnection ( String user , String password ) throws SQLException { validateSpecifiedUserAndPassword ( user , password ) ; return getXAConnection ( ) ; }
Gets a new physical connection after validating the given username and password .
48
15
154,414
static String id ( Object o ) { if ( o == null ) return "(null)" ; Thread t = Thread . currentThread ( ) ; StringBuilder sb = new StringBuilder ( 128 ) ; sb . append ( "(T[" ) . append ( t . getName ( ) ) . append ( "]@" ) ; sb . append ( Long . toString ( t . getId ( ) , Character . MAX_RADIX ) ) ; sb . append ( ":O[" ) . append ( o . getClass ( ) . getSimpleName ( ) ) ; sb . append ( "]@" ) ; sb . append ( Long . toString ( System . identityHashCode ( o ) , Character . MAX_RADIX ) ) ; sb . append ( ")" ) ; return sb . toString ( ) ; }
Tracing utility method useful for debugging
182
7
154,415
public void registerCallback ( String importer , ChannelChangeCallback callback ) { Preconditions . checkArgument ( importer != null && ! importer . trim ( ) . isEmpty ( ) , "importer is null or empty" ) ; callback = checkNotNull ( callback , "callback is null" ) ; if ( m_done . get ( ) ) return ; int [ ] stamp = new int [ ] { 0 } ; NavigableMap < String , ChannelChangeCallback > prev = null ; NavigableMap < String , ChannelChangeCallback > next = null ; ImmutableSortedMap . Builder < String , ChannelChangeCallback > mbldr = null ; synchronized ( m_undispatched ) { do { prev = m_callbacks . get ( stamp ) ; mbldr = ImmutableSortedMap . naturalOrder ( ) ; mbldr . putAll ( Maps . filterKeys ( prev , not ( equalTo ( importer ) ) ) ) ; mbldr . put ( importer , callback ) ; next = mbldr . build ( ) ; } while ( ! m_callbacks . compareAndSet ( prev , next , stamp [ 0 ] , stamp [ 0 ] + 1 ) ) ; NavigableSet < String > registered = next . navigableKeySet ( ) ; NavigableSet < String > unregistered = m_unregistered . getReference ( ) ; Iterator < ImporterChannelAssignment > itr = m_undispatched . iterator ( ) ; while ( itr . hasNext ( ) ) { final ImporterChannelAssignment assignment = itr . next ( ) ; if ( registered . contains ( assignment . getImporter ( ) ) ) { final ChannelChangeCallback dispatch = next . get ( assignment . getImporter ( ) ) ; m_buses . submit ( new DistributerRunnable ( ) { @ Override public void susceptibleRun ( ) throws Exception { dispatch . onChange ( assignment ) ; } } ) ; itr . remove ( ) ; } else if ( unregistered . contains ( assignment . getImporter ( ) ) ) { itr . remove ( ) ; if ( ! assignment . getAdded ( ) . isEmpty ( ) ) { LOG . warn ( "(" + m_hostId + ") discarding assignment to unregistered importer " + assignment ) ; } } } } }
Registers a (
507
4
154,416
public void unregisterCallback ( String importer ) { if ( importer == null || ! m_callbacks . getReference ( ) . containsKey ( importer ) || m_unregistered . getReference ( ) . contains ( importer ) ) { return ; } if ( m_done . get ( ) ) return ; int [ ] rstamp = new int [ ] { 0 } ; NavigableMap < String , ChannelChangeCallback > rprev = null ; NavigableMap < String , ChannelChangeCallback > rnext = null ; int [ ] ustamp = new int [ ] { 0 } ; NavigableSet < String > uprev = null ; NavigableSet < String > unext = null ; synchronized ( m_undispatched ) { do { rprev = m_callbacks . get ( rstamp ) ; rnext = ImmutableSortedMap . < String , ChannelChangeCallback > naturalOrder ( ) . putAll ( Maps . filterKeys ( rprev , not ( equalTo ( importer ) ) ) ) . build ( ) ; } while ( rprev . containsKey ( importer ) && ! m_callbacks . compareAndSet ( rprev , rnext , rstamp [ 0 ] , rstamp [ 0 ] + 1 ) ) ; do { uprev = m_unregistered . get ( ustamp ) ; unext = ImmutableSortedSet . < String > naturalOrder ( ) . addAll ( Sets . filter ( uprev , not ( equalTo ( importer ) ) ) ) . add ( importer ) . build ( ) ; } while ( ! uprev . contains ( importer ) && m_unregistered . compareAndSet ( uprev , unext , ustamp [ 0 ] , ustamp [ 0 ] + 1 ) ) ; Iterator < ImporterChannelAssignment > itr = m_undispatched . iterator ( ) ; while ( itr . hasNext ( ) ) { final ImporterChannelAssignment assignment = itr . next ( ) ; if ( unext . contains ( assignment . getImporter ( ) ) ) { itr . remove ( ) ; } } } }
Unregisters the callback assigned to given importer . Once it is unregistered it can no longer be re - registered
465
24
154,417
public void shutdown ( ) { if ( m_done . compareAndSet ( false , true ) ) { m_es . shutdown ( ) ; m_buses . shutdown ( ) ; DeleteNode deleteHost = new DeleteNode ( joinZKPath ( HOST_DN , m_hostId ) ) ; DeleteNode deleteCandidate = new DeleteNode ( m_candidate ) ; try { m_es . awaitTermination ( 365 , TimeUnit . DAYS ) ; } catch ( InterruptedException e ) { throw loggedDistributerException ( e , "interrupted while waiting for executor termination" ) ; } try { m_buses . awaitTermination ( 365 , TimeUnit . DAYS ) ; } catch ( InterruptedException e ) { throw loggedDistributerException ( e , "interrupted while waiting for executor termination" ) ; } deleteHost . onComplete ( ) ; deleteCandidate . onComplete ( ) ; } }
Sets the done flag shuts down its executor thread and deletes its own host and candidate nodes
199
20
154,418
@ Subscribe public void undispatched ( DeadEvent e ) { if ( ! m_done . get ( ) && e . getEvent ( ) instanceof ImporterChannelAssignment ) { ImporterChannelAssignment assignment = ( ImporterChannelAssignment ) e . getEvent ( ) ; synchronized ( m_undispatched ) { NavigableSet < String > registered = m_callbacks . getReference ( ) . navigableKeySet ( ) ; NavigableSet < String > unregistered = m_unregistered . getReference ( ) ; if ( registered . contains ( assignment . getImporter ( ) ) ) { m_eb . post ( assignment ) ; } else if ( ! assignment . getAdded ( ) . isEmpty ( ) && unregistered . contains ( assignment . getImporter ( ) ) ) { LOG . warn ( "(" + m_hostId + ") disgarding assignment to unregistered importer " + assignment ) ; } else { m_undispatched . add ( assignment ) ; } } } }
Keeps assignments for unregistered importers
219
8
154,419
public Object next ( ) { // for chained iterators if ( chained ) { if ( it1 == null ) { if ( it2 == null ) { throw new NoSuchElementException ( ) ; } if ( it2 . hasNext ( ) ) { return it2 . next ( ) ; } it2 = null ; next ( ) ; } else { if ( it1 . hasNext ( ) ) { return it1 . next ( ) ; } it1 = null ; next ( ) ; } } // for other itertors if ( hasNext ( ) ) { return elements [ i ++ ] ; } throw new NoSuchElementException ( ) ; }
Returns the next element .
137
5
154,420
public List < Long > getSitesForPartitions ( int [ ] partitions ) { ArrayList < Long > all_sites = new ArrayList < Long > ( ) ; for ( int p : partitions ) { List < Long > sites = getSitesForPartition ( p ) ; for ( long site : sites ) { all_sites . add ( site ) ; } } return all_sites ; }
Get the ids of all sites that contain a copy of ANY of the given partitions .
85
18
154,421
public long [ ] getSitesForPartitionsAsArray ( int [ ] partitions ) { ArrayList < Long > all_sites = new ArrayList < Long > ( ) ; for ( int p : partitions ) { List < Long > sites = getSitesForPartition ( p ) ; for ( long site : sites ) { all_sites . add ( site ) ; } } return longListToArray ( all_sites ) ; }
Get the ids of all live sites that contain a copy of ANY of the given partitions .
92
19
154,422
protected void readCompressedBlocks ( int blocks ) throws IOException { int bytesSoFar = 0 ; int requiredBytes = 512 * blocks ; // This method works with individual bytes! int i ; while ( bytesSoFar < requiredBytes ) { i = readStream . read ( readBuffer , bytesSoFar , requiredBytes - bytesSoFar ) ; if ( i < 0 ) { // A VoltDB extension to disable tagging eof as an error. return ; /* disable 3 lines ... throw new EOFException( RB.singleton.getString( RB.DECOMPRESS_RANOUT, bytesSoFar, requiredBytes)); ... disabled 3 lines */ // End of VoltDB extension } bytesRead += i ; bytesSoFar += i ; } }
Work - around for the problem that compressed InputReaders don t fill the read buffer before returning .
156
20
154,423
static < T > T [ ] arraysCopyOf ( T [ ] original , int newLength ) { T [ ] copy = newArray ( original , newLength ) ; System . arraycopy ( original , 0 , copy , 0 , Math . min ( original . length , newLength ) ) ; return copy ; }
GWT safe version of Arrays . copyOf .
65
11
154,424
public static void initialize ( Class < ? extends TheHashinator > hashinatorImplementation , byte config [ ] ) { TheHashinator hashinator = constructHashinator ( hashinatorImplementation , config , false ) ; m_pristineHashinator = hashinator ; m_cachedHashinators . put ( 0L , hashinator ) ; instance . set ( Pair . of ( 0L , hashinator ) ) ; }
Initialize TheHashinator with the specified implementation class and configuration . The starting version number will be 0 .
87
21
154,425
public static TheHashinator getHashinator ( Class < ? extends TheHashinator > hashinatorImplementation , byte config [ ] , boolean cooked ) { return constructHashinator ( hashinatorImplementation , config , cooked ) ; }
Get TheHashinator instanced based on known implementation and configuration . Used by client after asking server what it is running .
47
24
154,426
public static TheHashinator constructHashinator ( Class < ? extends TheHashinator > hashinatorImplementation , byte configBytes [ ] , boolean cooked ) { try { Constructor < ? extends TheHashinator > constructor = hashinatorImplementation . getConstructor ( byte [ ] . class , boolean . class ) ; return constructor . newInstance ( configBytes , cooked ) ; } catch ( Exception e ) { Throwables . propagate ( e ) ; } return null ; }
Helper method to do the reflection boilerplate to call the constructor of the selected hashinator and convert the exceptions to runtime exceptions .
97
25
154,427
static public long computeConfigurationSignature ( byte [ ] config ) { PureJavaCrc32C crc = new PureJavaCrc32C ( ) ; crc . update ( config ) ; return crc . getValue ( ) ; }
It computes a signature from the given configuration bytes
51
10
154,428
public static int getPartitionForParameter ( VoltType partitionType , Object invocationParameter ) { return instance . get ( ) . getSecond ( ) . getHashedPartitionForParameter ( partitionType , invocationParameter ) ; }
Given the type of the targeting partition parameter and an object coerce the object to the correct type and hash it . NOTE NOTE NOTE NOTE! THIS SHOULD BE THE ONLY WAY THAT YOU FIGURE OUT THE PARTITIONING FOR A PARAMETER! ON SERVER
47
53
154,429
public static Pair < ? extends UndoAction , TheHashinator > updateHashinator ( Class < ? extends TheHashinator > hashinatorImplementation , long version , byte configBytes [ ] , boolean cooked ) { //Use a cached/canonical hashinator if possible TheHashinator existingHashinator = m_cachedHashinators . get ( version ) ; if ( existingHashinator == null ) { existingHashinator = constructHashinator ( hashinatorImplementation , configBytes , cooked ) ; TheHashinator tempVal = m_cachedHashinators . putIfAbsent ( version , existingHashinator ) ; if ( tempVal != null ) { existingHashinator = tempVal ; } } //Do a CAS loop to maintain a global instance while ( true ) { final Pair < Long , ? extends TheHashinator > snapshot = instance . get ( ) ; if ( version > snapshot . getFirst ( ) ) { final Pair < Long , ? extends TheHashinator > update = Pair . of ( version , existingHashinator ) ; if ( instance . compareAndSet ( snapshot , update ) ) { if ( ! m_elasticallyModified ) { if ( ! update . getSecond ( ) . pIsPristine ( ) ) { // This is not a lock protected (atomic) but it should be fine because // release() should only be called by the one thread that successfully // updated the hashinator hostLogger . debug ( "The Hashinator has been elastically modified." ) ; m_elasticallyModified = true ; } } // Note: Only undo is ever called and only from a failure in @BalancePartitions return Pair . of ( new UndoAction ( ) { @ Override public void release ( ) { } @ Override public void undo ( ) { boolean rolledBack = instance . compareAndSet ( update , snapshot ) ; if ( ! rolledBack ) { hostLogger . info ( "Didn't roll back hashinator because it wasn't set to expected hashinator" ) ; } } } , existingHashinator ) ; } } else { return Pair . of ( new UndoAction ( ) { @ Override public void release ( ) { } @ Override public void undo ( ) { } } , existingHashinator ) ; } } }
Update the hashinator in a thread safe manner with a newer version of the hash function . A version number must be provided and the new config will only be used if it is greater than the current version of the hash function .
473
45
154,430
public static Map < Integer , Integer > getRanges ( int partition ) { return instance . get ( ) . getSecond ( ) . pGetRanges ( partition ) ; }
Get the ranges the given partition is assigned to .
37
10
154,431
public static HashinatorSnapshotData serializeConfiguredHashinator ( ) throws IOException { Pair < Long , ? extends TheHashinator > currentInstance = instance . get ( ) ; byte [ ] cookedData = currentInstance . getSecond ( ) . getCookedBytes ( ) ; return new HashinatorSnapshotData ( cookedData , currentInstance . getFirst ( ) ) ; }
Get optimized configuration data for wire serialization .
80
9
154,432
public static Pair < ? extends UndoAction , TheHashinator > updateConfiguredHashinator ( long version , byte config [ ] ) { return updateHashinator ( getConfiguredHashinatorClass ( ) , version , config , true ) ; }
Update the current configured hashinator class . Used by snapshot restore .
51
13
154,433
public static VoltTable getPartitionKeys ( TheHashinator hashinator , VoltType type ) { // get partitionKeys response table so we can copy it final VoltTable partitionKeys ; switch ( type ) { case INTEGER : partitionKeys = hashinator . m_integerPartitionKeys . get ( ) ; break ; case STRING : partitionKeys = hashinator . m_stringPartitionKeys . get ( ) ; break ; case VARBINARY : partitionKeys = hashinator . m_varbinaryPartitionKeys . get ( ) ; break ; default : return null ; } // return a clone because if the table is used at all in the voltdb process, // (like by an NT procedure), // you can corrupt the various offsets and positions in the underlying buffer return partitionKeys . semiDeepCopy ( ) ; }
Get a VoltTable containing the partition keys for each partition that can be found for the given hashinator . May be missing some partitions during elastic rebalance when the partitions don t own enough of the ring to be probed
172
45
154,434
public void append ( long startDrId , long endDrId , long spUniqueId , long mpUniqueId ) { assert ( startDrId <= endDrId && ( m_map . isEmpty ( ) || startDrId > end ( m_map . span ( ) ) ) ) ; addRange ( startDrId , endDrId , spUniqueId , mpUniqueId ) ; }
Appends a range to the tracker . The range has to be after the last DrId of the tracker .
83
22
154,435
public void truncate ( long newTruncationPoint ) { if ( newTruncationPoint < getFirstDrId ( ) ) { return ; } final Iterator < Range < Long > > iter = m_map . asRanges ( ) . iterator ( ) ; while ( iter . hasNext ( ) ) { final Range < Long > next = iter . next ( ) ; if ( end ( next ) < newTruncationPoint ) { iter . remove ( ) ; } else if ( next . contains ( newTruncationPoint ) ) { iter . remove ( ) ; m_map . add ( range ( newTruncationPoint , end ( next ) ) ) ; return ; } else { break ; } } m_map . add ( range ( newTruncationPoint , newTruncationPoint ) ) ; }
Truncate the tracker to the given safe point . After truncation the new safe point will be the first DrId of the tracker . If the new safe point is before the first DrId of the tracker it s a no - op .
175
49
154,436
public void mergeTracker ( DRConsumerDrIdTracker tracker ) { final long newSafePoint = Math . max ( tracker . getSafePointDrId ( ) , getSafePointDrId ( ) ) ; m_map . addAll ( tracker . m_map ) ; truncate ( newSafePoint ) ; m_lastSpUniqueId = Math . max ( m_lastSpUniqueId , tracker . m_lastSpUniqueId ) ; m_lastMpUniqueId = Math . max ( m_lastMpUniqueId , tracker . m_lastMpUniqueId ) ; }
Merge the given tracker with the current tracker . Ranges can overlap . After the merge the current tracker will be truncated to the larger safe point .
123
31
154,437
private JSONObject readJSONObjFromWire ( MessagingChannel messagingChannel ) throws IOException , JSONException { ByteBuffer messageBytes = messagingChannel . readMessage ( ) ; JSONObject jsObj = new JSONObject ( new String ( messageBytes . array ( ) , StandardCharsets . UTF_8 ) ) ; return jsObj ; }
Read a length prefixed JSON message
70
7
154,438
private JSONObject processJSONResponse ( MessagingChannel messagingChannel , Set < String > activeVersions , boolean checkVersion ) throws IOException , JSONException { // read the json response from socketjoiner with version info JSONObject jsonResponse = readJSONObjFromWire ( messagingChannel ) ; if ( ! checkVersion ) { return jsonResponse ; } VersionChecker versionChecker = m_acceptor . getVersionChecker ( ) ; String remoteVersionString = jsonResponse . getString ( VERSION_STRING ) ; String remoteBuildString = jsonResponse . getString ( BUILD_STRING ) ; boolean remoteAcceptsLocalVersion = jsonResponse . getBoolean ( VERSION_COMPATIBLE ) ; if ( remoteVersionString . equals ( versionChecker . getVersionString ( ) ) ) { if ( ! versionChecker . getBuildString ( ) . equals ( remoteBuildString ) ) { // ignore test/eclipse build string so tests still work if ( ! versionChecker . getBuildString ( ) . equals ( "VoltDB" ) && ! remoteBuildString . equals ( "VoltDB" ) ) { org . voltdb . VoltDB . crashLocalVoltDB ( "For VoltDB version " + versionChecker . getVersionString ( ) + " git tag/hash is not identical across the cluster. Node join failed.\n" + " joining build string: " + versionChecker . getBuildString ( ) + "\n" + " existing build string: " + remoteBuildString , false , null ) ; return null ; } } } else if ( ! remoteAcceptsLocalVersion ) { if ( ! versionChecker . isCompatibleVersionString ( remoteVersionString ) ) { org . voltdb . VoltDB . crashLocalVoltDB ( "Cluster contains nodes running VoltDB version " + remoteVersionString + " which is incompatibile with local version " + versionChecker . getVersionString ( ) + ".\n" , false , null ) ; return null ; } } //Do this only after we think we are compatible. activeVersions . add ( remoteVersionString ) ; return jsonResponse ; }
Read version info from a socket and check compatibility . After verifying versions return if paused start is indicated . True if paused start otherwise normal start .
456
28
154,439
private SocketChannel createLeaderSocket ( SocketAddress hostAddr , ConnectStrategy mode ) throws IOException { SocketChannel socket ; int connectAttempts = 0 ; do { try { socket = SocketChannel . open ( ) ; socket . socket ( ) . connect ( hostAddr , 5000 ) ; } catch ( java . net . ConnectException | java . nio . channels . UnresolvedAddressException | java . net . NoRouteToHostException | java . net . PortUnreachableException e ) { // reset the socket to null for loop purposes socket = null ; if ( mode == ConnectStrategy . PROBE ) { return null ; } if ( ( ++ connectAttempts % 8 ) == 0 ) { LOG . warn ( "Joining primary failed: " + e + " retrying.." ) ; } try { Thread . sleep ( 250 ) ; // milliseconds } catch ( InterruptedException dontcare ) { } } } while ( socket == null ) ; return socket ; }
Create socket to the leader node
205
6
154,440
private SocketChannel connectToHost ( SocketAddress hostAddr ) throws IOException { SocketChannel socket = null ; while ( socket == null ) { try { socket = SocketChannel . open ( hostAddr ) ; } catch ( java . net . ConnectException e ) { LOG . warn ( "Joining host failed: " + e . getMessage ( ) + " retrying.." ) ; try { Thread . sleep ( 250 ) ; // milliseconds } catch ( InterruptedException dontcare ) { } } } return socket ; }
Create socket to the given host
110
6
154,441
private RequestHostIdResponse requestHostId ( MessagingChannel messagingChannel , Set < String > activeVersions ) throws Exception { VersionChecker versionChecker = m_acceptor . getVersionChecker ( ) ; activeVersions . add ( versionChecker . getVersionString ( ) ) ; JSONObject jsObj = new JSONObject ( ) ; jsObj . put ( TYPE , ConnectionType . REQUEST_HOSTID . name ( ) ) ; // put the version compatibility status in the json jsObj . put ( VERSION_STRING , versionChecker . getVersionString ( ) ) ; // Advertise the port we are going to listen on based on config jsObj . put ( PORT , m_internalPort ) ; // If config specified an internal interface use that. // Otherwise the leader will echo back what we connected on if ( ! m_internalInterface . isEmpty ( ) ) { jsObj . put ( ADDRESS , m_internalInterface ) ; } // communicate configuration and node state jsObj = m_acceptor . decorate ( jsObj , Optional . empty ( ) ) ; jsObj . put ( MAY_EXCHANGE_TS , true ) ; byte jsBytes [ ] = jsObj . toString ( 4 ) . getBytes ( StandardCharsets . UTF_8 ) ; ByteBuffer requestHostIdBuffer = ByteBuffer . allocate ( 4 + jsBytes . length ) ; requestHostIdBuffer . putInt ( jsBytes . length ) ; requestHostIdBuffer . put ( jsBytes ) . flip ( ) ; messagingChannel . writeMessage ( requestHostIdBuffer ) ; // read the json response from socketjoiner with version info and validate it JSONObject leaderInfo = processJSONResponse ( messagingChannel , activeVersions , true ) ; // read the json response sent by HostMessenger with HostID JSONObject jsonObj = readJSONObjFromWire ( messagingChannel ) ; return new RequestHostIdResponse ( leaderInfo , jsonObj ) ; }
Connection handshake to the leader ask the leader to assign a host Id for current node .
412
17
154,442
public void addFragment ( byte [ ] planHash , int outputDepId , ByteBuffer parameterSet ) { addFragment ( planHash , null , outputDepId , parameterSet ) ; }
Add a pre - planned fragment .
41
7
154,443
public void addCustomFragment ( byte [ ] planHash , int outputDepId , ByteBuffer parameterSet , byte [ ] fragmentPlan , String stmtText ) { FragmentData item = new FragmentData ( ) ; item . m_planHash = planHash ; item . m_outputDepId = outputDepId ; item . m_parameterSet = parameterSet ; item . m_fragmentPlan = fragmentPlan ; item . m_stmtText = stmtText . getBytes ( ) ; m_items . add ( item ) ; }
Add an unplanned fragment .
119
6
154,444
public static FragmentTaskMessage createWithOneFragment ( long initiatorHSId , long coordinatorHSId , long txnId , long uniqueId , boolean isReadOnly , byte [ ] planHash , int outputDepId , ParameterSet params , boolean isFinal , boolean isForReplay , boolean isNPartTxn , long timestamp ) { ByteBuffer parambytes = null ; if ( params != null ) { parambytes = ByteBuffer . allocate ( params . getSerializedSize ( ) ) ; try { params . flattenToBuffer ( parambytes ) ; parambytes . flip ( ) ; } catch ( IOException e ) { VoltDB . crashLocalVoltDB ( "Failed to serialize parameter for fragment: " + params . toString ( ) , true , e ) ; } } FragmentTaskMessage ret = new FragmentTaskMessage ( initiatorHSId , coordinatorHSId , txnId , uniqueId , isReadOnly , isFinal , isForReplay , isNPartTxn , timestamp ) ; ret . addFragment ( planHash , outputDepId , parambytes ) ; return ret ; }
Convenience factory method to replace constructor that includes arrays of stuff .
239
14
154,445
public void setEmptyForRestart ( int outputDepId ) { m_emptyForRestart = true ; ParameterSet blank = ParameterSet . emptyParameterSet ( ) ; ByteBuffer mt = ByteBuffer . allocate ( blank . getSerializedSize ( ) ) ; try { blank . flattenToBuffer ( mt ) ; } catch ( IOException ioe ) { // Shouldn't ever happen, just bail out to not-obviously equivalent behavior mt = ByteBuffer . allocate ( 2 ) ; mt . putShort ( ( short ) 0 ) ; } addFragment ( EMPTY_HASH , outputDepId , mt ) ; }
fragment with the provided outputDepId
135
9
154,446
public static < R , C , V > ImmutableTable < R , C , V > copyOf ( Table < ? extends R , ? extends C , ? extends V > table ) { if ( table instanceof ImmutableTable ) { @ SuppressWarnings ( "unchecked" ) ImmutableTable < R , C , V > parameterizedTable = ( ImmutableTable < R , C , V > ) table ; return parameterizedTable ; } else { int size = table . size ( ) ; switch ( size ) { case 0 : return of ( ) ; case 1 : Cell < ? extends R , ? extends C , ? extends V > onlyCell = Iterables . getOnlyElement ( table . cellSet ( ) ) ; return ImmutableTable . < R , C , V > of ( onlyCell . getRowKey ( ) , onlyCell . getColumnKey ( ) , onlyCell . getValue ( ) ) ; default : ImmutableSet . Builder < Cell < R , C , V > > cellSetBuilder = new ImmutableSet . Builder < Cell < R , C , V > > ( size ) ; for ( Cell < ? extends R , ? extends C , ? extends V > cell : table . cellSet ( ) ) { /* * Must cast to be able to create a Cell<R, C, V> rather than a * Cell<? extends R, ? extends C, ? extends V> */ cellSetBuilder . add ( cellOf ( ( R ) cell . getRowKey ( ) , ( C ) cell . getColumnKey ( ) , ( V ) cell . getValue ( ) ) ) ; } return RegularImmutableTable . forCells ( cellSetBuilder . build ( ) ) ; } } }
Returns an immutable copy of the provided table .
364
9
154,447
private void replaceSocket ( Socket newSocket ) { synchronized ( m_socketLock ) { closeSocket ( m_socket ) ; if ( m_eos . get ( ) ) { closeSocket ( newSocket ) ; m_socket = null ; } else { m_socket = newSocket ; } } }
Set the socket to newSocket unless we re shutting down . The most reliable way to ensure the importer thread exits is to close its socket .
64
29
154,448
public synchronized void dumpWatches ( PrintWriter pwriter , boolean byPath ) { if ( byPath ) { for ( Entry < String , HashSet < Watcher > > e : watchTable . entrySet ( ) ) { pwriter . println ( e . getKey ( ) ) ; for ( Watcher w : e . getValue ( ) ) { pwriter . print ( "\t0x" ) ; pwriter . print ( Long . toHexString ( ( ( ServerCnxn ) w ) . getSessionId ( ) ) ) ; pwriter . print ( "\n" ) ; } } } else { for ( Entry < Watcher , HashSet < String > > e : watch2Paths . entrySet ( ) ) { pwriter . print ( "0x" ) ; pwriter . println ( Long . toHexString ( ( ( ServerCnxn ) e . getKey ( ) ) . getSessionId ( ) ) ) ; for ( String path : e . getValue ( ) ) { pwriter . print ( "\t" ) ; pwriter . println ( path ) ; } } } }
String representation of watches . Warning may be large!
240
10
154,449
void setBaseValues ( CatalogMap < ? extends CatalogType > parentMap , String name ) { if ( name == null ) { throw new CatalogException ( "Null value where it shouldn't be." ) ; } m_parentMap = parentMap ; m_typename = name ; }
This is my lazy hack to avoid using reflection to instantiate records .
61
14
154,450
public void validate ( ) throws IllegalArgumentException , IllegalAccessException { for ( Field field : getClass ( ) . getDeclaredFields ( ) ) { if ( CatalogType . class . isAssignableFrom ( field . getType ( ) ) ) { CatalogType ct = ( CatalogType ) field . get ( this ) ; assert ( ct . getCatalog ( ) == getCatalog ( ) ) : ct . getCatalogPath ( ) + " has wrong catalog" ; } if ( CatalogReference . class . isAssignableFrom ( field . getType ( ) ) ) { @ SuppressWarnings ( "unchecked" ) CatalogReference < ? extends CatalogType > cr = ( CatalogReference < ? extends CatalogType > ) field . get ( this ) ; if ( cr . m_value != null ) { assert ( cr . m_value . getCatalog ( ) == getCatalog ( ) ) : cr . m_value . getCatalogPath ( ) + " has wrong catalog" ; } } if ( CatalogMap . class . isAssignableFrom ( field . getClass ( ) ) ) { @ SuppressWarnings ( "unchecked" ) CatalogMap < ? extends CatalogType > cm = ( CatalogMap < ? extends CatalogType > ) field . get ( this ) ; for ( CatalogType ct : cm ) { assert ( ct . getCatalog ( ) == getCatalog ( ) ) : ct . getCatalogPath ( ) + " has wrong catalog" ; ct . validate ( ) ; } } } }
Fails an assertion if any child of this object doesn t think it s part of the same catalog .
328
21
154,451
private void writeExternalStreamStates ( JSONStringer stringer ) throws JSONException { stringer . key ( DISABLED_EXTERNAL_STREAMS ) . array ( ) ; for ( int partition : m_disabledExternalStreams ) { stringer . value ( partition ) ; } stringer . endArray ( ) ; }
Writes external streams state for partitions into snapshot digest .
70
11
154,452
public static VoltTable unionTables ( Collection < VoltTable > operands ) { VoltTable result = null ; // Locate the first non-null table to get the schema for ( VoltTable vt : operands ) { if ( vt != null ) { result = new VoltTable ( vt . getTableSchema ( ) ) ; result . setStatusCode ( vt . getStatusCode ( ) ) ; break ; } } if ( result != null ) { result . addTables ( operands ) ; result . resetRowPosition ( ) ; } return result ; }
Utility to aggregate a list of tables sharing a schema . Common for sysprocs to do this to aggregate results .
122
24
154,453
public static boolean tableContainsString ( VoltTable t , String s , boolean caseSenstive ) { if ( t . getRowCount ( ) == 0 ) { return false ; } if ( ! caseSenstive ) { s = s . toLowerCase ( ) ; } VoltTableRow row = t . fetchRow ( 0 ) ; do { for ( int i = 0 ; i < t . getColumnCount ( ) ; i ++ ) { if ( t . getColumnType ( i ) == VoltType . STRING ) { String value = row . getString ( i ) ; if ( value == null ) { continue ; } if ( ! caseSenstive ) { value = value . toLowerCase ( ) ; } if ( value . contains ( s ) ) { return true ; } } } } while ( row . advanceRow ( ) ) ; return false ; }
Return true if any string field in the table contains param s .
185
13
154,454
public static Object [ ] tableRowAsObjects ( VoltTableRow row ) { Object [ ] result = new Object [ row . getColumnCount ( ) ] ; for ( int i = 0 ; i < row . getColumnCount ( ) ; i ++ ) { result [ i ] = row . get ( i , row . getColumnType ( i ) ) ; } return result ; }
Get a VoltTableRow as an array of Objects of the right type
81
14
154,455
public static Stream < VoltTableRow > stream ( VoltTable table ) { return StreamSupport . stream ( new VoltTableSpliterator ( table , 0 , table . getRowCount ( ) ) , false ) ; }
Not yet public API for VoltTable and Java 8 streams
44
11
154,456
public void register ( ZKMBeanInfo bean , ZKMBeanInfo parent ) throws JMException { assert bean != null ; String path = null ; if ( parent != null ) { path = mapBean2Path . get ( parent ) ; assert path != null ; } path = makeFullPath ( path , parent ) ; mapBean2Path . put ( bean , path ) ; mapName2Bean . put ( bean . getName ( ) , bean ) ; if ( bean . isHidden ( ) ) return ; MBeanServer mbs = ManagementFactory . getPlatformMBeanServer ( ) ; ObjectName oname = makeObjectName ( path , bean ) ; try { mbs . registerMBean ( bean , oname ) ; } catch ( JMException e ) { LOG . warn ( "Failed to register MBean " + bean . getName ( ) ) ; throw e ; } }
Registers a new MBean with the platform MBean server .
197
15
154,457
private void unregister ( String path , ZKMBeanInfo bean ) throws JMException { if ( path == null ) return ; if ( ! bean . isHidden ( ) ) { MBeanServer mbs = ManagementFactory . getPlatformMBeanServer ( ) ; try { mbs . unregisterMBean ( makeObjectName ( path , bean ) ) ; } catch ( JMException e ) { LOG . warn ( "Failed to unregister MBean " + bean . getName ( ) ) ; throw e ; } } }
Unregister the MBean identified by the path .
116
11
154,458
public void unregister ( ZKMBeanInfo bean ) { if ( bean == null ) return ; String path = mapBean2Path . get ( bean ) ; try { unregister ( path , bean ) ; } catch ( InstanceNotFoundException e ) { LOG . warn ( "InstanceNotFoundException during unregister usually means more than one Zookeeper server has been running in a single JVM" ) ; LOG . warn ( "InstanceNotFoundException during unregister can be safely ignored during automated tests." ) ; } catch ( JMException e ) { LOG . warn ( "Error during unregister" , e ) ; } mapBean2Path . remove ( bean ) ; mapName2Bean . remove ( bean . getName ( ) ) ; }
Unregister MBean .
162
6
154,459
public void unregisterAll ( ) { for ( Map . Entry < ZKMBeanInfo , String > e : mapBean2Path . entrySet ( ) ) { try { unregister ( e . getValue ( ) , e . getKey ( ) ) ; } catch ( JMException e1 ) { LOG . warn ( "Error during unregister" , e1 ) ; } } mapBean2Path . clear ( ) ; mapName2Bean . clear ( ) ; }
Unregister all currently registered MBeans
104
8
154,460
public String makeFullPath ( String prefix , String ... name ) { StringBuilder sb = new StringBuilder ( prefix == null ? "/" : ( prefix . equals ( "/" ) ? prefix : prefix + "/" ) ) ; boolean first = true ; for ( String s : name ) { if ( s == null ) continue ; if ( ! first ) { sb . append ( "/" ) ; } else first = false ; sb . append ( s ) ; } return sb . toString ( ) ; }
Generate a filesystem - like path .
109
8
154,461
protected ObjectName makeObjectName ( String path , ZKMBeanInfo bean ) throws MalformedObjectNameException { if ( path == null ) return null ; StringBuilder beanName = new StringBuilder ( CommonNames . DOMAIN + ":" ) ; int counter = 0 ; counter = tokenize ( beanName , path , counter ) ; tokenize ( beanName , bean . getName ( ) , counter ) ; beanName . deleteCharAt ( beanName . length ( ) - 1 ) ; try { return new ObjectName ( beanName . toString ( ) ) ; } catch ( MalformedObjectNameException e ) { LOG . warn ( "Invalid name \"" + beanName . toString ( ) + "\" for class " + bean . getClass ( ) . toString ( ) ) ; throw e ; } }
Builds an MBean path and creates an ObjectName instance using the path .
173
17
154,462
@ Override public final int getType ( ) { if ( userTypeModifier == null ) { throw Error . runtimeError ( ErrorCode . U_S0500 , "Type" ) ; } return userTypeModifier . getType ( ) ; }
interface specific methods
54
3
154,463
public Object castToType ( SessionInterface session , Object a , Type type ) { return convertToType ( session , a , type ) ; }
Explicit casts are handled by this method . SQL standard 6 . 12 rules for enforcement of size precision and scale are implemented . For CHARACTER values it performs truncation in all cases of long strings .
30
41
154,464
public Object convertToTypeJDBC ( SessionInterface session , Object a , Type type ) { return convertToType ( session , a , type ) ; }
Convert type for JDBC . Same as convertToType but supports non - standard SQL conversions supported by JDBC
32
23
154,465
public static int getJDBCTypeCode ( int type ) { switch ( type ) { case Types . SQL_BLOB : return Types . BLOB ; case Types . SQL_CLOB : return Types . CLOB ; case Types . SQL_BIGINT : return Types . BIGINT ; case Types . SQL_BINARY : return Types . BINARY ; case Types . SQL_VARBINARY : return Types . VARBINARY ; case Types . SQL_BIT : case Types . SQL_BIT_VARYING : return Types . BIT ; default : return type ; } }
translate an internal type number to JDBC type number if a type is not supported internally it is returned without translation
125
23
154,466
public static Type getType ( int type , int collation , long precision , int scale ) { switch ( type ) { case Types . SQL_ALL_TYPES : return SQL_ALL_TYPES ; // return SQL_ALL_TYPES; // needs changes to Expression type resolution case Types . SQL_CHAR : case Types . SQL_VARCHAR : case Types . VARCHAR_IGNORECASE : case Types . SQL_CLOB : return CharacterType . getCharacterType ( type , precision ) ; case Types . SQL_INTEGER : return SQL_INTEGER ; case Types . SQL_SMALLINT : return SQL_SMALLINT ; case Types . SQL_BIGINT : return SQL_BIGINT ; case Types . TINYINT : return TINYINT ; case Types . SQL_FLOAT : if ( precision > 53 ) { throw Error . error ( ErrorCode . X_42592 , "" + precision ) ; } // $FALL-THROUGH$ case Types . SQL_REAL : case Types . SQL_DOUBLE : return SQL_DOUBLE ; case Types . SQL_NUMERIC : case Types . SQL_DECIMAL : if ( precision == 0 ) { precision = NumberType . defaultNumericPrecision ; } // A VoltDB extension to disable variable scale decimals scale = 12 ; // End of VoltDB extension return NumberType . getNumberType ( type , precision , scale ) ; case Types . SQL_BOOLEAN : return SQL_BOOLEAN ; case Types . SQL_BINARY : case Types . SQL_VARBINARY : case Types . SQL_BLOB : return BinaryType . getBinaryType ( type , precision ) ; case Types . SQL_BIT : case Types . SQL_BIT_VARYING : return BitType . getBitType ( type , precision ) ; case Types . SQL_DATE : case Types . SQL_TIME : case Types . SQL_TIME_WITH_TIME_ZONE : case Types . SQL_TIMESTAMP : case Types . SQL_TIMESTAMP_WITH_TIME_ZONE : return DateTimeType . getDateTimeType ( type , scale ) ; case Types . SQL_INTERVAL_YEAR : case Types . SQL_INTERVAL_YEAR_TO_MONTH : case Types . SQL_INTERVAL_MONTH : case Types . SQL_INTERVAL_DAY : case Types . SQL_INTERVAL_DAY_TO_HOUR : case Types . SQL_INTERVAL_DAY_TO_MINUTE : case Types . SQL_INTERVAL_DAY_TO_SECOND : case Types . SQL_INTERVAL_HOUR : case Types . SQL_INTERVAL_HOUR_TO_MINUTE : case Types . SQL_INTERVAL_HOUR_TO_SECOND : case Types . SQL_INTERVAL_MINUTE : case Types . SQL_INTERVAL_MINUTE_TO_SECOND : case Types . SQL_INTERVAL_SECOND : return IntervalType . getIntervalType ( type , precision , scale ) ; case Types . VOLT_GEOGRAPHY_POINT : return VOLT_GEOGRAPHY_POINT ; case Types . VOLT_GEOGRAPHY : return new VoltGeographyType ( precision ) ; case Types . OTHER : return OTHER ; default : throw Error . runtimeError ( ErrorCode . U_S0500 , "Type" ) ; } }
Enforces precision and scale limits on type
751
8
154,467
public boolean handleEvent ( Event e ) { switch ( e . id ) { case Event . SCROLL_LINE_UP : case Event . SCROLL_LINE_DOWN : case Event . SCROLL_PAGE_UP : case Event . SCROLL_PAGE_DOWN : case Event . SCROLL_ABSOLUTE : iX = sbHoriz . getValue ( ) ; iY = iRowHeight * sbVert . getValue ( ) ; repaint ( ) ; return true ; } return super . handleEvent ( e ) ; }
would require browsers to use the Java plugin .
122
9
154,468
public static byte [ ] getConfigureBytes ( int partitionCount , int tokenCount ) { Preconditions . checkArgument ( partitionCount > 0 ) ; Preconditions . checkArgument ( tokenCount > partitionCount ) ; Buckets buckets = new Buckets ( partitionCount , tokenCount ) ; ElasticHashinator hashinator = new ElasticHashinator ( buckets . getTokens ( ) ) ; return hashinator . getConfigBytes ( ) ; }
Convenience method for generating a deterministic token distribution for the ring based on a given partition count and tokens per partition . Each partition will have N tokens placed randomly on the ring .
93
37
154,469
private byte [ ] toBytes ( ) { ByteBuffer buf = ByteBuffer . allocate ( 4 + ( m_tokenCount * 8 ) ) ; buf . putInt ( m_tokenCount ) ; int lastToken = Integer . MIN_VALUE ; for ( int ii = 0 ; ii < m_tokenCount ; ii ++ ) { final long ptr = m_tokens + ( ii * 8 ) ; final int token = Bits . unsafe . getInt ( ptr ) ; Preconditions . checkArgument ( token >= lastToken ) ; lastToken = token ; final int pid = Bits . unsafe . getInt ( ptr + 4 ) ; buf . putInt ( token ) ; buf . putInt ( pid ) ; } return buf . array ( ) ; }
Serializes the configuration into bytes also updates the currently cached m_configBytes .
159
16
154,470
public ElasticHashinator addTokens ( NavigableMap < Integer , Integer > tokensToAdd ) { // figure out the interval long interval = deriveTokenInterval ( m_tokensMap . get ( ) . keySet ( ) ) ; Map < Integer , Integer > tokens = Maps . newTreeMap ( ) ; for ( Map . Entry < Integer , Integer > e : m_tokensMap . get ( ) . entrySet ( ) ) { if ( tokensToAdd . containsKey ( e . getKey ( ) ) ) { continue ; } // see if we are moving an intermediate token forward if ( isIntermediateToken ( e . getKey ( ) , interval ) ) { Map . Entry < Integer , Integer > floorEntry = tokensToAdd . floorEntry ( e . getKey ( ) ) ; // If the two tokens belong to the same partition and bucket, we are moving the one on the ring // forward, so remove it from the ring if ( floorEntry != null && floorEntry . getValue ( ) . equals ( e . getValue ( ) ) && containingBucket ( floorEntry . getKey ( ) , interval ) == containingBucket ( e . getKey ( ) , interval ) ) { continue ; } } tokens . put ( e . getKey ( ) , e . getValue ( ) ) ; } tokens . putAll ( tokensToAdd ) ; return new ElasticHashinator ( ImmutableSortedMap . copyOf ( tokens ) ) ; }
Add the given tokens to the ring and generate the new hashinator . The current hashinator is not changed .
310
22
154,471
@ Override public Map < Integer , Integer > pPredecessors ( int partition ) { Map < Integer , Integer > predecessors = new TreeMap < Integer , Integer > ( ) ; UnmodifiableIterator < Map . Entry < Integer , Integer > > iter = m_tokensMap . get ( ) . entrySet ( ) . iterator ( ) ; Set < Integer > pTokens = new HashSet < Integer > ( ) ; while ( iter . hasNext ( ) ) { Map . Entry < Integer , Integer > next = iter . next ( ) ; if ( next . getValue ( ) == partition ) { pTokens . add ( next . getKey ( ) ) ; } } for ( Integer token : pTokens ) { Map . Entry < Integer , Integer > predecessor = null ; if ( token != null ) { predecessor = m_tokensMap . get ( ) . headMap ( token ) . lastEntry ( ) ; // If null, it means partition is the first one on the ring, so predecessor // should be the last entry on the ring because it wraps around. if ( predecessor == null ) { predecessor = m_tokensMap . get ( ) . lastEntry ( ) ; } } if ( predecessor != null && predecessor . getValue ( ) != partition ) { predecessors . put ( predecessor . getKey ( ) , predecessor . getValue ( ) ) ; } } return predecessors ; }
Find the predecessors of the given partition on the ring . This method runs in linear time use with caution when the set of partitions is large .
294
28
154,472
@ Override public Pair < Integer , Integer > pPredecessor ( int partition , int token ) { Integer partForToken = m_tokensMap . get ( ) . get ( token ) ; if ( partForToken != null && partForToken == partition ) { Map . Entry < Integer , Integer > predecessor = m_tokensMap . get ( ) . headMap ( token ) . lastEntry ( ) ; if ( predecessor == null ) { predecessor = m_tokensMap . get ( ) . lastEntry ( ) ; } if ( predecessor . getKey ( ) != token ) { return Pair . of ( predecessor . getKey ( ) , predecessor . getValue ( ) ) ; } else { // given token is the only one on the ring, umpossible throw new RuntimeException ( "There is only one token on the hash ring" ) ; } } else { // given token doesn't map to partition throw new IllegalArgumentException ( "The given token " + token + " does not map to partition " + partition ) ; } }
Find the predecessor of the given token on the ring .
223
11
154,473
@ Override public Map < Integer , Integer > pGetRanges ( int partition ) { Map < Integer , Integer > ranges = new TreeMap < Integer , Integer > ( ) ; Integer first = null ; // start of the very first token on the ring Integer start = null ; // start of a range UnmodifiableIterator < Map . Entry < Integer , Integer > > iter = m_tokensMap . get ( ) . entrySet ( ) . iterator ( ) ; // Iterate through the token map to find the ranges assigned to // the given partition while ( iter . hasNext ( ) ) { Map . Entry < Integer , Integer > next = iter . next ( ) ; int token = next . getKey ( ) ; int pid = next . getValue ( ) ; if ( first == null ) { first = token ; } // if start is not null, there's an open range, now is // the time to close it. // else there is no open range, keep on going. if ( start != null ) { //Range end is inclusive so do token - 1 ranges . put ( start , token - 1 ) ; start = null ; } if ( pid == partition ) { // if start is null, there's no open range, start one. start = token ; } } // if there is an open range when we get here // It is the last token which implicity ends at the next max value if ( start != null ) { assert first != null ; ranges . put ( start , Integer . MAX_VALUE ) ; } return ranges ; }
This runs in linear time with respect to the number of tokens on the ring .
325
16
154,474
private byte [ ] toCookedBytes ( ) { // Allocate for a int pair per token/partition ID entry, plus a size. ByteBuffer buf = ByteBuffer . allocate ( 4 + ( m_tokenCount * 8 ) ) ; buf . putInt ( m_tokenCount ) ; // Keep tokens and partition ids separate to aid compression. for ( int zz = 3 ; zz >= 0 ; zz -- ) { int lastToken = Integer . MIN_VALUE ; for ( int ii = 0 ; ii < m_tokenCount ; ii ++ ) { int token = Bits . unsafe . getInt ( m_tokens + ( ii * 8 ) ) ; Preconditions . checkArgument ( token >= lastToken ) ; lastToken = token ; token = token >>> ( zz * 8 ) ; token = token & 0xFF ; buf . put ( ( byte ) token ) ; } } for ( int ii = 0 ; ii < m_tokenCount ; ii ++ ) { buf . putInt ( Bits . unsafe . getInt ( m_tokens + ( ii * 8 ) + 4 ) ) ; } try { return CompressionService . gzipBytes ( buf . array ( ) ) ; } catch ( IOException e ) { throw new RuntimeException ( "Failed to compress bytes" , e ) ; } }
Returns compressed config bytes .
284
5
154,475
private static synchronized void trackAllocatedHashinatorBytes ( long bytes ) { final long allocated = m_allocatedHashinatorBytes . addAndGet ( bytes ) ; if ( allocated > HASHINATOR_GC_THRESHHOLD ) { hostLogger . warn ( allocated + " bytes of hashinator data has been allocated" ) ; if ( m_emergencyGCThread == null || m_emergencyGCThread . getState ( ) == State . TERMINATED ) { m_emergencyGCThread = new Thread ( new Runnable ( ) { @ Override public void run ( ) { hostLogger . warn ( "Invoking System.gc() to recoup hashinator bytes" ) ; System . gc ( ) ; try { Thread . sleep ( 2000 ) ; } catch ( InterruptedException e ) { } hostLogger . info ( m_allocatedHashinatorBytes . get ( ) + " bytes of hashinator allocated after GC" ) ; } } , "Hashinator GC thread" ) ; m_emergencyGCThread . start ( ) ; } } }
Track allocated bytes and invoke System . gc to encourage reclamation if it is growing large
229
18
154,476
private static long deriveTokenInterval ( ImmutableSortedSet < Integer > tokens ) { long interval = 0 ; int count = 4 ; int prevToken = Integer . MIN_VALUE ; UnmodifiableIterator < Integer > tokenIter = tokens . iterator ( ) ; while ( tokenIter . hasNext ( ) && count -- > 0 ) { int nextToken = tokenIter . next ( ) ; interval = Math . max ( interval , nextToken - prevToken ) ; prevToken = nextToken ; } return interval ; }
Figure out the token interval from the first 3 ranges assuming that there is at most one token that doesn t fall onto the bucket boundary at any given time . The largest range will be the hashinator s bucket size .
108
43
154,477
private static int containingBucket ( int token , long interval ) { return ( int ) ( ( ( ( long ) token - Integer . MIN_VALUE ) / interval ) * interval + Integer . MIN_VALUE ) ; }
Calculate the boundary of the bucket that countain the given token given the token interval .
46
19
154,478
@ Override public boolean isOrderDeterministic ( ) { assert ( m_children != null ) ; assert ( m_children . size ( ) == 1 ) ; // This implementation is very close to AbstractPlanNode's implementation of this // method, except that we assert just one child. // Java doesn't allow calls to super-super-class methods via super.super. AbstractPlanNode child = m_children . get ( 0 ) ; if ( ! child . isOrderDeterministic ( ) ) { m_nondeterminismDetail = child . m_nondeterminismDetail ; return false ; } return true ; }
Order determinism for insert nodes depends on the determinism of child nodes . For subqueries producing unordered rows the insert will be considered order - nondeterministic .
134
34
154,479
private void logBatch ( final CatalogContext context , final AdHocPlannedStmtBatch batch , final Object [ ] userParams ) { final int numStmts = batch . getPlannedStatementCount ( ) ; final int numParams = userParams == null ? 0 : userParams . length ; final String readOnly = batch . readOnly ? "yes" : "no" ; final String singlePartition = batch . isSinglePartitionCompatible ( ) ? "yes" : "no" ; final String user = getUsername ( ) ; final String [ ] groupNames = context . authSystem . getGroupNamesForUser ( user ) ; final String groupList = StringUtils . join ( groupNames , ' ' ) ; //String[] stmtArray = batch.stmts.stream().map(s -> new String(s.sql, Charsets.UTF_8)).toArray(String[]::new); adhocLog . debug ( String . format ( "=== statements=%d parameters=%d read-only=%s single-partition=%s user=%s groups=[%s]" , numStmts , numParams , readOnly , singlePartition , user , groupList ) ) ; for ( int i = 0 ; i < batch . getPlannedStatementCount ( ) ; i ++ ) { AdHocPlannedStatement stmt = batch . getPlannedStatement ( i ) ; String sql = stmt . sql == null ? "SQL_UNKNOWN" : new String ( stmt . sql , Charsets . UTF_8 ) ; adhocLog . debug ( String . format ( "Statement #%d: %s" , i + 1 , sql ) ) ; } if ( userParams != null ) { for ( int i = 0 ; i < userParams . length ; ++ i ) { Object value = userParams [ i ] ; final String valueString = ( value != null ? value . toString ( ) : "NULL" ) ; adhocLog . debug ( String . format ( "Parameter #%d: %s" , i + 1 , valueString ) ) ; } } }
Log ad hoc batch info
469
5
154,480
static CompletableFuture < ClientResponse > processExplainDefaultProc ( AdHocPlannedStmtBatch planBatch ) { Database db = VoltDB . instance ( ) . getCatalogContext ( ) . database ; // there better be one statement if this is really SQL // from a default procedure assert ( planBatch . getPlannedStatementCount ( ) == 1 ) ; AdHocPlannedStatement ahps = planBatch . getPlannedStatement ( 0 ) ; String sql = new String ( ahps . sql , StandardCharsets . UTF_8 ) ; String explain = planBatch . explainStatement ( 0 , db , false ) ; VoltTable vt = new VoltTable ( new VoltTable . ColumnInfo ( "STATEMENT_NAME" , VoltType . STRING ) , new VoltTable . ColumnInfo ( "SQL_STATEMENT" , VoltType . STRING ) , new VoltTable . ColumnInfo ( "EXECUTION_PLAN" , VoltType . STRING ) ) ; vt . addRow ( "sql0" , sql , explain ) ; ClientResponseImpl response = new ClientResponseImpl ( ClientResponseImpl . SUCCESS , ClientResponse . UNINITIALIZED_APP_STATUS_CODE , null , new VoltTable [ ] { vt } , null ) ; CompletableFuture < ClientResponse > fut = new CompletableFuture <> ( ) ; fut . complete ( response ) ; return fut ; }
Explain Proc for a default proc is routed through the regular Explain path using ad hoc planning and all . Take the result from that async process and format it like other explains for procedures .
313
37
154,481
private final CompletableFuture < ClientResponse > createAdHocTransaction ( final AdHocPlannedStmtBatch plannedStmtBatch , final boolean isSwapTables ) throws VoltTypeException { ByteBuffer buf = null ; try { buf = plannedStmtBatch . flattenPlanArrayToBuffer ( ) ; } catch ( IOException e ) { VoltDB . crashLocalVoltDB ( e . getMessage ( ) , true , e ) ; } assert ( buf . hasArray ( ) ) ; // create the execution site task String procedureName = null ; Object [ ] params = null ; // pick the sysproc based on the presence of partition info // HSQL (or PostgreSQL) does not specifically implement AdHoc SP // -- instead, use its always-SP implementation of AdHoc boolean isSinglePartition = plannedStmtBatch . isSinglePartitionCompatible ( ) || m_isConfiguredForNonVoltDBBackend ; if ( isSwapTables ) { procedureName = "@SwapTablesCore" ; params = new Object [ ] { buf . array ( ) } ; } else if ( isSinglePartition ) { if ( plannedStmtBatch . isReadOnly ( ) ) { procedureName = "@AdHoc_RO_SP" ; } else { procedureName = "@AdHoc_RW_SP" ; } int type = VoltType . NULL . getValue ( ) ; // replicated table read is single-part without a partitioning param // I copied this from below, but I'm not convinced that the above statement is correct // or that the null behavior here either (a) ever actually happens or (b) has the // desired intent. Object partitionParam = plannedStmtBatch . partitionParam ( ) ; byte [ ] param = null ; if ( partitionParam != null ) { type = VoltType . typeFromClass ( partitionParam . getClass ( ) ) . getValue ( ) ; param = VoltType . valueToBytes ( partitionParam ) ; } // Send the partitioning parameter and its type along so that the site can check if // it's mis-partitioned. Type is needed to re-hashinate for command log re-init. params = new Object [ ] { param , ( byte ) type , buf . array ( ) } ; } else { if ( plannedStmtBatch . isReadOnly ( ) ) { procedureName = "@AdHoc_RO_MP" ; } else { procedureName = "@AdHoc_RW_MP" ; } params = new Object [ ] { buf . array ( ) } ; } return callProcedure ( procedureName , params ) ; }
Take a set of adhoc plans and pass them off to the right transactional adhoc variant .
567
22
154,482
private void collectParameterValueExpressions ( AbstractExpression expr , List < AbstractExpression > pves ) { if ( expr == null ) { return ; } if ( expr instanceof TupleValueExpression || expr instanceof AggregateExpression ) { // Create a matching PVE for this expression to be used on the EE side // to get the original expression value addCorrelationParameterValueExpression ( expr , pves ) ; return ; } collectParameterValueExpressions ( expr . getLeft ( ) , pves ) ; collectParameterValueExpressions ( expr . getRight ( ) , pves ) ; if ( expr . getArgs ( ) != null ) { for ( AbstractExpression arg : expr . getArgs ( ) ) { collectParameterValueExpressions ( arg , pves ) ; } } }
PVE inside the Row subquery
169
7
154,483
public static Result newPSMResult ( int type , String label , Object value ) { Result result = newResult ( ResultConstants . VALUE ) ; result . errorCode = type ; result . mainString = label ; result . valueData = value ; return result ; }
For interval PSM return values
57
6
154,484
public static Result newPreparedExecuteRequest ( Type [ ] types , long statementId ) { Result result = newResult ( ResultConstants . EXECUTE ) ; result . metaData = ResultMetaData . newSimpleResultMetaData ( types ) ; result . statementID = statementId ; result . navigator . add ( ValuePool . emptyObjectArray ) ; return result ; }
For SQLEXECUTE For execution of SQL prepared statements . The parameters are set afterwards as the Result is reused
80
23
154,485
public static Result newCallResponse ( Type [ ] types , long statementId , Object [ ] values ) { Result result = newResult ( ResultConstants . CALL_RESPONSE ) ; result . metaData = ResultMetaData . newSimpleResultMetaData ( types ) ; result . statementID = statementId ; result . navigator . add ( values ) ; return result ; }
For CALL_RESPONSE For execution of SQL callable statements .
80
15
154,486
public static Result newUpdateResultRequest ( Type [ ] types , long id ) { Result result = newResult ( ResultConstants . UPDATE_RESULT ) ; result . metaData = ResultMetaData . newUpdateResultMetaData ( types ) ; result . id = id ; result . navigator . add ( new Object [ ] { } ) ; return result ; }
For UPDATE_RESULT The parameters are set afterwards as the Result is reused
76
15
154,487
public void setPreparedResultUpdateProperties ( Object [ ] parameterValues ) { if ( navigator . getSize ( ) == 1 ) { ( ( RowSetNavigatorClient ) navigator ) . setData ( 0 , parameterValues ) ; } else { navigator . clear ( ) ; navigator . add ( parameterValues ) ; } }
For UPDATE_RESULT results The parameters are set by this method as the Result is reused
72
18
154,488
public void setPreparedExecuteProperties ( Object [ ] parameterValues , int maxRows , int fetchSize ) { mode = ResultConstants . EXECUTE ; if ( navigator . getSize ( ) == 1 ) { ( ( RowSetNavigatorClient ) navigator ) . setData ( 0 , parameterValues ) ; } else { navigator . clear ( ) ; navigator . add ( parameterValues ) ; } updateCount = maxRows ; this . fetchSize = fetchSize ; }
For SQLEXECUTE results The parameters are set by this method as the Result is reused
106
19
154,489
public static Result newBatchedExecuteResponse ( int [ ] updateCounts , Result generatedResult , Result e ) { Result result = newResult ( ResultConstants . BATCHEXECRESPONSE ) ; result . addChainedResult ( generatedResult ) ; result . addChainedResult ( e ) ; Type [ ] types = new Type [ ] { Type . SQL_INTEGER } ; result . metaData = ResultMetaData . newSimpleResultMetaData ( types ) ; Object [ ] [ ] table = new Object [ updateCounts . length ] [ ] ; for ( int i = 0 ; i < updateCounts . length ; i ++ ) { table [ i ] = new Object [ ] { ValuePool . getInt ( updateCounts [ i ] ) } ; } ( ( RowSetNavigatorClient ) result . navigator ) . setData ( table ) ; return result ; }
For BATCHEXERESPONSE for a BATCHEXECUTE or BATCHEXECDIRECT
191
23
154,490
public void setPrepareOrExecuteProperties ( String sql , int maxRows , int fetchSize , int statementReturnType , int resultSetType , int resultSetConcurrency , int resultSetHoldability , int keyMode , int [ ] generatedIndexes , String [ ] generatedNames ) { mainString = sql ; updateCount = maxRows ; this . fetchSize = fetchSize ; this . statementReturnType = statementReturnType ; rsScrollability = resultSetType ; rsConcurrency = resultSetConcurrency ; rsHoldability = resultSetHoldability ; generateKeys = keyMode ; generatedMetaData = ResultMetaData . newGeneratedColumnsMetaData ( generatedIndexes , generatedNames ) ; }
For both EXECDIRECT and PREPARE
150
10
154,491
private static void reset ( ) { description = null ; argName = null ; longopt = null ; type = String . class ; required = false ; numberOfArgs = Option . UNINITIALIZED ; optionalArg = false ; valuesep = ( char ) 0 ; }
Resets the member variables to their default values .
58
10
154,492
public static OptionBuilder hasOptionalArgs ( ) { OptionBuilder . numberOfArgs = Option . UNLIMITED_VALUES ; OptionBuilder . optionalArg = true ; return INSTANCE ; }
The next Option can have an unlimited number of optional arguments .
40
12
154,493
public static OptionBuilder hasOptionalArgs ( int numArgs ) { OptionBuilder . numberOfArgs = numArgs ; OptionBuilder . optionalArg = true ; return INSTANCE ; }
The next Option can have the specified number of optional arguments .
36
12
154,494
public static Option create ( ) throws IllegalArgumentException { if ( longopt == null ) { OptionBuilder . reset ( ) ; throw new IllegalArgumentException ( "must specify longopt" ) ; } return create ( null ) ; }
Create an Option using the current settings
50
7
154,495
private String checkProcedureIdentifier ( final String identifier , final String statement ) throws VoltCompilerException { String retIdent = checkIdentifierStart ( identifier , statement ) ; if ( retIdent . contains ( "." ) ) { String msg = String . format ( "Invalid procedure name containing dots \"%s\" in DDL: \"%s\"" , identifier , statement . substring ( 0 , statement . length ( ) - 1 ) ) ; throw m_compiler . new VoltCompilerException ( msg ) ; } return retIdent ; }
Check whether or not a procedure name is acceptible .
116
11
154,496
void resolveHostname ( boolean synchronous ) { Runnable r = new Runnable ( ) { @ Override public void run ( ) { String remoteHost = ReverseDNSCache . hostnameOrAddress ( m_remoteSocketAddress . getAddress ( ) ) ; if ( ! remoteHost . equals ( m_remoteSocketAddress . getAddress ( ) . getHostAddress ( ) ) ) { m_remoteHostname = remoteHost ; m_remoteHostAndAddressAndPort = remoteHost + m_remoteHostAndAddressAndPort ; m_toString = VoltPort . this . toString ( ) + ":" + m_remoteHostAndAddressAndPort ; } } } ; if ( synchronous ) { r . run ( ) ; } else { /* * Start the reverse DNS lookup in background because it might be * very slow if the hostname is not specified in local /etc/hosts. */ try { ReverseDNSCache . submit ( r ) ; } catch ( RejectedExecutionException e ) { networkLog . debug ( "Reverse DNS lookup for " + m_remoteSocketAddress + " rejected because the queue was full" ) ; } } }
Do a reverse DNS lookup of the remote end . Done in a separate thread unless synchronous is specified . If asynchronous lookup is requested the task may be dropped and resolution may never occur
248
36
154,497
public void setInterests ( int opsToAdd , int opsToRemove ) { // must be done atomically with changes to m_running synchronized ( m_lock ) { int oldInterestOps = m_interestOps ; m_interestOps = ( m_interestOps | opsToAdd ) & ( ~ opsToRemove ) ; if ( oldInterestOps != m_interestOps && ! m_running ) { /* * If this is a write, optimistically assume the write * will succeed and try it without using the selector */ m_network . addToChangeList ( this , ( opsToAdd & SelectionKey . OP_WRITE ) != 0 ) ; } } }
Change the desired interest key set
141
6
154,498
public static String adHocSQLFromInvocationForDebug ( StoredProcedureInvocation invocation ) { assert ( invocation . getProcName ( ) . startsWith ( "@AdHoc" ) ) ; ParameterSet params = invocation . getParams ( ) ; // the final param is the byte array we need byte [ ] serializedBatchData = ( byte [ ] ) params . getParam ( params . size ( ) - 1 ) ; Pair < Object [ ] , AdHocPlannedStatement [ ] > data = decodeSerializedBatchData ( serializedBatchData ) ; Object [ ] userparams = data . getFirst ( ) ; AdHocPlannedStatement [ ] statements = data . getSecond ( ) ; StringBuilder sb = new StringBuilder ( ) ; if ( statements . length == 0 ) { sb . append ( "ADHOC INVOCATION HAS NO SQL" ) ; } else if ( statements . length == 1 ) { sb . append ( adHocSQLStringFromPlannedStatement ( statements [ 0 ] , userparams ) ) ; } else { // > 1 sb . append ( "BEGIN ADHOC_SQL_BATCH {\n" ) ; for ( AdHocPlannedStatement stmt : statements ) { sb . append ( adHocSQLStringFromPlannedStatement ( stmt , userparams ) ) . append ( "\n" ) ; } sb . append ( "} END ADHOC_SQL_BATCH" ) ; } return sb . toString ( ) ; }
Get a string containing the SQL statements and any parameters for a given batch passed to an ad - hoc query . Used for debugging and logging .
333
28
154,499
public static String adHocSQLStringFromPlannedStatement ( AdHocPlannedStatement statement , Object [ ] userparams ) { final int MAX_PARAM_LINE_CHARS = 120 ; StringBuilder sb = new StringBuilder ( ) ; String sql = new String ( statement . sql , Charsets . UTF_8 ) ; sb . append ( sql ) ; Object [ ] params = paramsForStatement ( statement , userparams ) ; // convert params to strings of a certain max length for ( int i = 0 ; i < params . length ; i ++ ) { Object param = params [ i ] ; String paramLineStr = String . format ( " Param %d: %s" , i , param . toString ( ) ) ; // trim param line if it's silly long if ( paramLineStr . length ( ) > MAX_PARAM_LINE_CHARS ) { paramLineStr = paramLineStr . substring ( 0 , MAX_PARAM_LINE_CHARS - 3 ) ; paramLineStr += "..." ; } sb . append ( ' ' ) . append ( paramLineStr ) ; } return sb . toString ( ) ; }
Get a string containing a SQL statement and any parameters for a given AdHocPlannedStatement . Used for debugging and logging .
249
26