idx
int64
0
165k
question
stringlengths
73
4.15k
target
stringlengths
5
918
len_question
int64
21
890
len_target
int64
3
255
155,400
public void writeAll ( java . sql . ResultSet rs , boolean includeColumnNames ) throws SQLException , IOException { if ( includeColumnNames ) { writeColumnNames ( rs ) ; } while ( rs . next ( ) ) { writeNext ( resultService . getColumnValues ( rs ) ) ; } }
Writes the entire ResultSet to a CSV file .
67
11
155,401
public void addRecord ( String key , String value ) throws TarMalformatException , IOException { if ( key == null || value == null || key . length ( ) < 1 || value . length ( ) < 1 ) { throw new TarMalformatException ( RB . singleton . getString ( RB . ZERO_WRITE ) ) ; } int lenWithoutIlen = key . length ( ) + value . length ( ) + 3 ; // "Ilen" means Initial Length field. +3 = SPACE + = + \n int lenW = 0 ; // lenW = Length With initial-length-field if ( lenWithoutIlen < 8 ) { lenW = lenWithoutIlen + 1 ; // Takes just 1 char to report total } else if ( lenWithoutIlen < 97 ) { lenW = lenWithoutIlen + 2 ; // Takes 2 chars to report this total } else if ( lenWithoutIlen < 996 ) { lenW = lenWithoutIlen + 3 ; // Takes 3... } else if ( lenWithoutIlen < 9995 ) { lenW = lenWithoutIlen + 4 ; // ditto } else if ( lenWithoutIlen < 99994 ) { lenW = lenWithoutIlen + 5 ; } else { throw new TarMalformatException ( RB . singleton . getString ( RB . PIF_TOOBIG , 99991 ) ) ; } writer . write ( Integer . toString ( lenW ) ) ; writer . write ( ' ' ) ; writer . write ( key ) ; writer . write ( ' ' ) ; writer . write ( value ) ; writer . write ( ' ' ) ; writer . flush ( ) ; // Does this do anything with a BAOS? }
I guess the initial length field is supposed to be in units of characters not bytes?
366
17
155,402
synchronized public void shutdown ( ) { m_shutdown . set ( true ) ; if ( m_isExecutorServiceLocal ) { try { m_es . shutdown ( ) ; m_es . awaitTermination ( 365 , TimeUnit . DAYS ) ; } catch ( InterruptedException e ) { repairLog . warn ( "Unexpected interrupted exception" , e ) ; } } }
shutdown silences the babysitter and causes watches to not reset . Note that shutting down will churn ephemeral ZK nodes - shutdown allows the programmer to not set watches on nodes from terminated session .
84
41
155,403
public static Pair < BabySitter , List < String > > blockingFactory ( ZooKeeper zk , String dir , Callback cb ) throws InterruptedException , ExecutionException { ExecutorService es = CoreUtils . getCachedSingleThreadExecutor ( "Babysitter-" + dir , 15000 ) ; Pair < BabySitter , List < String > > babySitter = blockingFactory ( zk , dir , cb , es ) ; babySitter . getFirst ( ) . m_isExecutorServiceLocal = true ; return babySitter ; }
Create a new BabySitter and block on reading the initial children list .
122
15
155,404
public static Pair < BabySitter , List < String > > blockingFactory ( ZooKeeper zk , String dir , Callback cb , ExecutorService es ) throws InterruptedException , ExecutionException { BabySitter bs = new BabySitter ( zk , dir , cb , es ) ; List < String > initialChildren ; try { initialChildren = bs . m_eventHandler . call ( ) ; } catch ( Exception e ) { throw new ExecutionException ( e ) ; } return new Pair < BabySitter , List < String > > ( bs , initialChildren ) ; }
Create a new BabySitter and block on reading the initial children list . Use the provided ExecutorService to queue events to rather than creating a private ExecutorService . The initial set of children will be retrieved in the current thread and not the ExecutorService because it is assumed this is being called from the ExecutorService
127
65
155,405
public static BabySitter nonblockingFactory ( ZooKeeper zk , String dir , Callback cb , ExecutorService es ) throws InterruptedException , ExecutionException { BabySitter bs = new BabySitter ( zk , dir , cb , es ) ; bs . m_es . submit ( bs . m_eventHandler ) ; return bs ; }
Create a new BabySitter and make sure it reads the initial children list . Use the provided ExecutorService to queue events to rather than creating a private ExecutorService .
81
35
155,406
void checkClosed ( ) throws SQLException { if ( isClosed ) { throw Util . sqlException ( ErrorCode . X_07501 ) ; } if ( connection . isClosed ) { close ( ) ; throw Util . sqlException ( ErrorCode . X_08503 ) ; } }
An internal check for closed statements .
67
7
155,407
void performPostExecute ( ) throws SQLException { resultOut . clearLobResults ( ) ; generatedResult = null ; if ( resultIn == null ) { return ; } Result current = resultIn ; while ( current . getChainedResult ( ) != null ) { current = current . getUnlinkChainedResult ( ) ; if ( current . getType ( ) == ResultConstants . WARNING ) { SQLWarning w = Util . sqlWarning ( current ) ; if ( rootWarning == null ) { rootWarning = w ; } else { rootWarning . setNextWarning ( w ) ; } } else if ( current . getType ( ) == ResultConstants . ERROR ) { errorResult = current ; } else if ( current . getType ( ) == ResultConstants . DATA ) { generatedResult = current ; } } if ( resultIn . isData ( ) ) { currentResultSet = new JDBCResultSet ( connection . sessionProxy , this , resultIn , resultIn . metaData , connection . connProperties ) ; } }
processes chained warnings and any generated columns result set
222
10
155,408
boolean getMoreResults ( int current ) throws SQLException { checkClosed ( ) ; if ( resultIn == null || ! resultIn . isData ( ) ) { return false ; } if ( resultSetCounter == 0 ) { resultSetCounter ++ ; return true ; } if ( currentResultSet != null && current != KEEP_CURRENT_RESULT ) { currentResultSet . close ( ) ; } resultIn = null ; return false ; }
Note yet correct for multiple ResultSets . Should keep track of the previous ResultSet objects to be able to close them
98
24
155,409
void closeResultData ( ) throws SQLException { if ( currentResultSet != null ) { currentResultSet . close ( ) ; } if ( generatedResultSet != null ) { generatedResultSet . close ( ) ; } generatedResultSet = null ; generatedResult = null ; resultIn = null ; }
See comment for getMoreResults .
65
7
155,410
public boolean compatibleWithTable ( VoltTable table ) { String candidateName = getTableName ( table ) ; // table can't have the same name as the view if ( candidateName . equals ( viewName ) ) { return false ; } // view is for a different table if ( candidateName . equals ( srcTableName ) == false ) { return false ; } try { // ignore ret value here - just looking to not throw int groupColIndex = table . getColumnIndex ( groupColName ) ; VoltType groupColType = table . getColumnType ( groupColIndex ) ; if ( groupColType == VoltType . DECIMAL ) { // no longer a good type to group return false ; } // check the sum col is still value int sumColIndex = table . getColumnIndex ( sumColName ) ; VoltType sumColType = table . getColumnType ( sumColIndex ) ; if ( ( sumColType == VoltType . TINYINT ) || ( sumColType == VoltType . SMALLINT ) || ( sumColType == VoltType . INTEGER ) ) { return true ; } else { // no longer a good type to sum return false ; } } catch ( IllegalArgumentException e ) { // column index is bad return false ; } }
Check if the view could apply to the provided table unchanged .
269
12
155,411
@ Beta @ GwtIncompatible // concurrency public static ThreadFactory platformThreadFactory ( ) { if ( ! isAppEngine ( ) ) { return Executors . defaultThreadFactory ( ) ; } try { return ( ThreadFactory ) Class . forName ( "com.google_voltpatches.appengine.api.ThreadManager" ) . getMethod ( "currentRequestThreadFactory" ) . invoke ( null ) ; } catch ( IllegalAccessException e ) { throw new RuntimeException ( "Couldn't invoke ThreadManager.currentRequestThreadFactory" , e ) ; } catch ( ClassNotFoundException e ) { throw new RuntimeException ( "Couldn't invoke ThreadManager.currentRequestThreadFactory" , e ) ; } catch ( NoSuchMethodException e ) { throw new RuntimeException ( "Couldn't invoke ThreadManager.currentRequestThreadFactory" , e ) ; } catch ( InvocationTargetException e ) { throw Throwables . propagate ( e . getCause ( ) ) ; } }
Returns a default thread factory used to create new threads .
210
11
155,412
public static void verifySnapshots ( final List < String > directories , final Set < String > snapshotNames ) { FileFilter filter = new SnapshotFilter ( ) ; if ( ! snapshotNames . isEmpty ( ) ) { filter = new SpecificSnapshotFilter ( snapshotNames ) ; } Map < String , Snapshot > snapshots = new HashMap < String , Snapshot > ( ) ; for ( String directory : directories ) { SnapshotUtil . retrieveSnapshotFiles ( new File ( directory ) , snapshots , filter , true , SnapshotPathType . SNAP_PATH , CONSOLE_LOG ) ; } if ( snapshots . isEmpty ( ) ) { System . out . println ( "Snapshot corrupted" ) ; System . out . println ( "No files found" ) ; } for ( Snapshot s : snapshots . values ( ) ) { System . out . println ( SnapshotUtil . generateSnapshotReport ( s . getTxnId ( ) , s ) . getSecond ( ) ) ; } }
Perform snapshot verification .
213
5
155,413
public long lowestEquivalentValue ( final long value ) { final int bucketIndex = getBucketIndex ( value ) ; final int subBucketIndex = getSubBucketIndex ( value , bucketIndex ) ; long thisValueBaseLevel = valueFromIndex ( bucketIndex , subBucketIndex ) ; return thisValueBaseLevel ; }
Get the lowest value that is equivalent to the given value within the histogram s resolution . Where equivalent means that value samples recorded for any two equivalent values are counted in a common total count .
70
38
155,414
public double getMean ( ) { if ( getTotalCount ( ) == 0 ) { return 0.0 ; } recordedValuesIterator . reset ( ) ; double totalValue = 0 ; while ( recordedValuesIterator . hasNext ( ) ) { HistogramIterationValue iterationValue = recordedValuesIterator . next ( ) ; totalValue += medianEquivalentValue ( iterationValue . getValueIteratedTo ( ) ) * iterationValue . getCountAtValueIteratedTo ( ) ; } return ( totalValue * 1.0 ) / getTotalCount ( ) ; }
Get the computed mean value of all recorded values in the histogram
118
13
155,415
public double getStdDeviation ( ) { if ( getTotalCount ( ) == 0 ) { return 0.0 ; } final double mean = getMean ( ) ; double geometric_deviation_total = 0.0 ; recordedValuesIterator . reset ( ) ; while ( recordedValuesIterator . hasNext ( ) ) { HistogramIterationValue iterationValue = recordedValuesIterator . next ( ) ; Double deviation = ( medianEquivalentValue ( iterationValue . getValueIteratedTo ( ) ) * 1.0 ) - mean ; geometric_deviation_total += ( deviation * deviation ) * iterationValue . getCountAddedInThisIterationStep ( ) ; } double std_deviation = Math . sqrt ( geometric_deviation_total / getTotalCount ( ) ) ; return std_deviation ; }
Get the computed standard deviation of all recorded values in the histogram
173
13
155,416
public void reestablishTotalCount ( ) { // On overflow, the totalCount accumulated counter will (always) not match the total of counts long totalCounted = 0 ; for ( int i = 0 ; i < countsArrayLength ; i ++ ) { totalCounted += getCountAtIndex ( i ) ; } setTotalCount ( totalCounted ) ; }
Reestablish the internal notion of totalCount by recalculating it from recorded values .
75
17
155,417
static void setTableColumnsForSubquery ( Table table , QueryExpression queryExpression , boolean fullIndex ) { table . columnList = queryExpression . getColumns ( ) ; table . columnCount = queryExpression . getColumnCount ( ) ; table . createPrimaryKey ( ) ; if ( fullIndex ) { int [ ] colIndexes = null ; colIndexes = table . getNewColumnMap ( ) ; ArrayUtil . fillSequence ( colIndexes ) ; table . fullIndex = table . createIndexForColumns ( colIndexes ) ; } }
For table subqueries
124
5
155,418
public void considerCandidatePlan ( CompiledPlan plan , AbstractParsedStmt parsedStmt ) { //System.out.println(String.format("[Raw plan]:%n%s", rawplan.rootPlanGraph.toExplainPlanString())); // run the set of microptimizations, which may return many plans (or not) ScanDeterminizer . apply ( plan , m_detMode ) ; // add in the sql to the plan plan . sql = m_sql ; // compute resource usage using the single stats collector m_stats = new PlanStatistics ( ) ; AbstractPlanNode planGraph = plan . rootPlanGraph ; // compute statistics about a plan planGraph . computeEstimatesRecursively ( m_stats , m_estimates , m_paramHints ) ; // compute the cost based on the resources using the current cost model plan . cost = m_costModel . getPlanCost ( m_stats ) ; // filename for debug output String filename = String . valueOf ( m_planId ++ ) ; //* enable for debug */ System.out.println("DEBUG [new plan]: Cost:" + plan.cost + plan.rootPlanGraph.toExplainPlanString()); // find the minimum cost plan if ( m_bestPlan == null || plan . cost < m_bestPlan . cost ) { // free the PlanColumns held by the previous best plan m_bestPlan = plan ; m_bestFilename = filename ; //* enable for debug */ System.out.println("DEBUG [Best plan] updated ***\n"); } outputPlan ( plan , planGraph , filename ) ; }
Picks the best cost plan for a given raw plan
345
11
155,419
public static PartitionDRGateway getInstance ( int partitionId , ProducerDRGateway producerGateway , StartAction startAction ) { // if this is a primary cluster in a DR-enabled scenario // try to load the real version of this class PartitionDRGateway pdrg = null ; if ( producerGateway != null ) { pdrg = tryToLoadProVersion ( ) ; } if ( pdrg == null ) { pdrg = new PartitionDRGateway ( ) ; } // init the instance and return try { pdrg . init ( partitionId , producerGateway , startAction ) ; } catch ( Exception e ) { VoltDB . crashLocalVoltDB ( e . getMessage ( ) , true , e ) ; } // Regarding apparent lack of thread safety: this is called serially // while looping over the SPIs during database initialization assert ! m_partitionDRGateways . containsKey ( partitionId ) ; ImmutableMap . Builder < Integer , PartitionDRGateway > builder = ImmutableMap . builder ( ) ; builder . putAll ( m_partitionDRGateways ) ; builder . put ( partitionId , pdrg ) ; m_partitionDRGateways = builder . build ( ) ; return pdrg ; }
Load the full subclass if it should otherwise load the noop stub .
272
14
155,420
public String [ ] getUserPermissionList ( String userName ) { if ( ! m_enabled ) { return m_perm_list ; } if ( userName == null ) { return new String [ ] { } ; } AuthUser user = getUser ( userName ) ; if ( user == null ) { return new String [ ] { } ; } return user . m_permissions_list ; }
Get users permission list not god for permission checking .
86
10
155,421
public void callProcedure ( AuthUser user , boolean isAdmin , int timeout , ProcedureCallback cb , String procName , Object [ ] args ) { // since we know the caller, this is safe assert ( cb != null ) ; StoredProcedureInvocation task = new StoredProcedureInvocation ( ) ; task . setProcName ( procName ) ; task . setParams ( args ) ; if ( timeout != BatchTimeoutOverrideType . NO_TIMEOUT ) { task . setBatchTimeout ( timeout ) ; } InternalAdapterTaskAttributes kattrs = new InternalAdapterTaskAttributes ( DEFAULT_INTERNAL_ADAPTER_NAME , isAdmin , connectionId ( ) ) ; assert ( m_dispatcher != null ) ; // JHH: I have no idea why we need to do this, but CL crashes if we don't. Sigh. try { task = MiscUtils . roundTripForCL ( task ) ; } catch ( Exception e ) { String msg = String . format ( "Cannot invoke procedure %s. failed to create task: %s" , procName , e . getMessage ( ) ) ; m_logger . rateLimitedLog ( SUPPRESS_INTERVAL , Level . ERROR , null , msg ) ; ClientResponseImpl cri = new ClientResponseImpl ( ClientResponse . UNEXPECTED_FAILURE , new VoltTable [ 0 ] , msg ) ; try { cb . clientCallback ( cri ) ; } catch ( Exception e1 ) { throw new IllegalStateException ( e1 ) ; } } createTransaction ( kattrs , cb , task , user ) ; }
Used to call a procedure from NTPRocedureRunner Calls createTransaction with the proper params
354
19
155,422
public static void printSystemOut ( String message1 , long message2 ) { if ( TRACESYSTEMOUT ) { System . out . print ( message1 ) ; System . out . println ( message2 ) ; } }
Used to print messages to System . out
48
8
155,423
public static boolean acceptsPrecision ( int type ) { switch ( type ) { case Types . SQL_BINARY : case Types . SQL_BIT : case Types . SQL_BIT_VARYING : case Types . SQL_BLOB : case Types . SQL_CHAR : case Types . SQL_NCHAR : case Types . SQL_CLOB : case Types . NCLOB : case Types . SQL_VARBINARY : case Types . SQL_VARCHAR : case Types . SQL_NVARCHAR : case Types . VARCHAR_IGNORECASE : case Types . SQL_DECIMAL : case Types . SQL_NUMERIC : case Types . SQL_FLOAT : case Types . SQL_TIME : case Types . SQL_TIMESTAMP : case Types . SQL_INTERVAL_YEAR : case Types . SQL_INTERVAL_YEAR_TO_MONTH : case Types . SQL_INTERVAL_MONTH : case Types . SQL_INTERVAL_DAY : case Types . SQL_INTERVAL_DAY_TO_HOUR : case Types . SQL_INTERVAL_DAY_TO_MINUTE : case Types . SQL_INTERVAL_DAY_TO_SECOND : case Types . SQL_INTERVAL_HOUR : case Types . SQL_INTERVAL_HOUR_TO_MINUTE : case Types . SQL_INTERVAL_HOUR_TO_SECOND : case Types . SQL_INTERVAL_MINUTE : case Types . SQL_INTERVAL_MINUTE_TO_SECOND : case Types . SQL_INTERVAL_SECOND : case Types . VOLT_GEOGRAPHY : return true ; default : return false ; } }
Types that accept precision params in column definition or casts . CHAR VARCHAR and VARCHAR_IGNORECASE params are ignored when the sql . enforce_strict_types is false .
361
40
155,424
public static < T > Iterable < T > cycle ( T ... elements ) { return cycle ( Lists . newArrayList ( elements ) ) ; }
Returns an iterable whose iterators cycle indefinitely over the provided elements .
31
14
155,425
@ Override protected void coreLoadCatalog ( long timestamp , final byte [ ] catalogBytes ) throws EEException { LOG . trace ( "Loading Application Catalog..." ) ; int errorCode = 0 ; errorCode = nativeLoadCatalog ( pointer , timestamp , catalogBytes ) ; checkErrorCode ( errorCode ) ; //LOG.info("Loaded Catalog."); }
Provide a serialized catalog and initialize version 0 of the engine s catalog .
74
16
155,426
@ Override public void coreUpdateCatalog ( long timestamp , boolean isStreamUpdate , final String catalogDiffs ) throws EEException { LOG . trace ( "Loading Application Catalog..." ) ; int errorCode = 0 ; errorCode = nativeUpdateCatalog ( pointer , timestamp , isStreamUpdate , getStringBytes ( catalogDiffs ) ) ; checkErrorCode ( errorCode ) ; }
Provide a catalog diff and a new catalog version and update the engine s catalog .
78
17
155,427
@ Override public int extractPerFragmentStats ( int batchSize , long [ ] executionTimesOut ) { m_perFragmentStatsBuffer . clear ( ) ; // Discard the first byte since it is the timing on/off switch. m_perFragmentStatsBuffer . get ( ) ; int succeededFragmentsCount = m_perFragmentStatsBuffer . getInt ( ) ; if ( executionTimesOut != null ) { assert ( executionTimesOut . length >= succeededFragmentsCount ) ; for ( int i = 0 ; i < succeededFragmentsCount ; i ++ ) { executionTimesOut [ i ] = m_perFragmentStatsBuffer . getLong ( ) ; } // This is the time for the failed fragment. if ( succeededFragmentsCount < executionTimesOut . length ) { executionTimesOut [ succeededFragmentsCount ] = m_perFragmentStatsBuffer . getLong ( ) ; } } return succeededFragmentsCount ; }
Extract the per - fragment stats from the buffer .
200
11
155,428
@ Override public VoltTable [ ] getStats ( final StatsSelector selector , final int locators [ ] , final boolean interval , final Long now ) { //Clear is destructive, do it before the native call m_nextDeserializer . clear ( ) ; final int numResults = nativeGetStats ( pointer , selector . ordinal ( ) , locators , interval , now ) ; if ( numResults == - 1 ) { throwExceptionForError ( ERRORCODE_ERROR ) ; } try { m_nextDeserializer . readInt ( ) ; //Ignore the length of the result tables final VoltTable results [ ] = new VoltTable [ numResults ] ; for ( int ii = 0 ; ii < numResults ; ii ++ ) { int len = m_nextDeserializer . readInt ( ) ; byte [ ] bufCopy = new byte [ len ] ; m_nextDeserializer . readFully ( bufCopy , 0 , len ) ; // This Table should be readonly (true), but table stats need to be updated // Stream stats until Stream stats are deprecated from Table stats results [ ii ] = PrivateVoltTableFactory . createVoltTableFromBuffer ( ByteBuffer . wrap ( bufCopy ) , false ) ; } return results ; } catch ( final IOException ex ) { LOG . error ( "Failed to deserialze result table for getStats" + ex ) ; throw new EEException ( ERRORCODE_WRONG_SERIALIZED_BYTES ) ; } }
Retrieve a set of statistics using the specified selector from the StatisticsSelector enum .
319
17
155,429
public boolean storeLargeTempTableBlock ( long siteId , long blockCounter , ByteBuffer block ) { LargeBlockTask task = LargeBlockTask . getStoreTask ( new BlockId ( siteId , blockCounter ) , block ) ; return executeLargeBlockTaskSynchronously ( task ) ; }
Store a large temp table block to disk .
61
9
155,430
public boolean loadLargeTempTableBlock ( long siteId , long blockCounter , ByteBuffer block ) { LargeBlockTask task = LargeBlockTask . getLoadTask ( new BlockId ( siteId , blockCounter ) , block ) ; return executeLargeBlockTaskSynchronously ( task ) ; }
Read a large table block from disk and write it to a ByteBuffer . Block will still be stored on disk when this operation completes .
61
27
155,431
public boolean releaseLargeTempTableBlock ( long siteId , long blockCounter ) { LargeBlockTask task = LargeBlockTask . getReleaseTask ( new BlockId ( siteId , blockCounter ) ) ; return executeLargeBlockTaskSynchronously ( task ) ; }
Delete the block with the given id from disk .
55
10
155,432
public List < String > getSQLStatements ( ) { List < String > sqlStatements = new ArrayList <> ( plannedStatements . size ( ) ) ; for ( AdHocPlannedStatement plannedStatement : plannedStatements ) { sqlStatements . add ( new String ( plannedStatement . sql , Constants . UTF8ENCODING ) ) ; } return sqlStatements ; }
Retrieve all the SQL statement text as a list of strings .
83
13
155,433
public boolean isSinglePartitionCompatible ( ) { for ( AdHocPlannedStatement plannedStmt : plannedStatements ) { if ( plannedStmt . core . collectorFragment != null ) { return false ; } } return true ; }
Detect if batch is compatible with single partition optimizations
52
9
155,434
public ByteBuffer flattenPlanArrayToBuffer ( ) throws IOException { int size = 0 ; // sizeof batch ParameterSet userParamCache = null ; if ( userParamSet == null ) { userParamCache = ParameterSet . emptyParameterSet ( ) ; } else { Object [ ] typedUserParams = new Object [ userParamSet . length ] ; int ii = 0 ; for ( AdHocPlannedStatement cs : plannedStatements ) { for ( VoltType paramType : cs . core . parameterTypes ) { if ( ii >= typedUserParams . length ) { String errorMsg = "Too few actual arguments were passed for the parameters in the sql statement(s): (" + typedUserParams . length + " vs. " + ii + ")" ; // Volt-TYPE-Exception is slightly cheating, here, should there be a more general VoltArgumentException? throw new VoltTypeException ( errorMsg ) ; } typedUserParams [ ii ] = ParameterConverter . tryToMakeCompatible ( paramType . classFromType ( ) , userParamSet [ ii ] ) ; // System.out.println("DEBUG typed parameter: " + work.userParamSet[ii] + // "using type: " + paramType + "as: " + typedUserParams[ii]); ii ++ ; } } // Each parameter referenced in each statements should be represented // exactly once in userParams. if ( ii < typedUserParams . length ) { // Volt-TYPE-Exception is slightly cheating, here, should there be a more general VoltArgumentException? String errorMsg = "Too many actual arguments were passed for the parameters in the sql statement(s): (" + typedUserParams . length + " vs. " + ii + ")" ; throw new VoltTypeException ( errorMsg ) ; } userParamCache = ParameterSet . fromArrayNoCopy ( typedUserParams ) ; } size += userParamCache . getSerializedSize ( ) ; size += 2 ; // sizeof batch for ( AdHocPlannedStatement cs : plannedStatements ) { size += cs . getSerializedSize ( ) ; } ByteBuffer buf = ByteBuffer . allocate ( size ) ; userParamCache . flattenToBuffer ( buf ) ; buf . putShort ( ( short ) plannedStatements . size ( ) ) ; for ( AdHocPlannedStatement cs : plannedStatements ) { cs . flattenToBuffer ( buf ) ; } return buf ; }
For convenience serialization is accomplished with this single method but deserialization is piecemeal via the static methods userParamsFromBuffer and planArrayFromBuffer with no dummy AdHocPlannedStmtBatch receiver instance required .
525
47
155,435
public String explainStatement ( int i , Database db , boolean getJSONString ) { AdHocPlannedStatement plannedStatement = plannedStatements . get ( i ) ; String aggplan = new String ( plannedStatement . core . aggregatorFragment , Constants . UTF8ENCODING ) ; PlanNodeTree pnt = new PlanNodeTree ( ) ; try { String result = null ; JSONObject jobj = new JSONObject ( aggplan ) ; if ( getJSONString ) { result = jobj . toString ( 4 ) ; } pnt . loadFromJSONPlan ( jobj , db ) ; if ( plannedStatement . core . collectorFragment != null ) { // multi-partition query plan String collplan = new String ( plannedStatement . core . collectorFragment , Constants . UTF8ENCODING ) ; PlanNodeTree collpnt = new PlanNodeTree ( ) ; // reattach plan fragments JSONObject jobMP = new JSONObject ( collplan ) ; collpnt . loadFromJSONPlan ( jobMP , db ) ; assert ( collpnt . getRootPlanNode ( ) instanceof SendPlanNode ) ; pnt . getRootPlanNode ( ) . reattachFragment ( collpnt . getRootPlanNode ( ) ) ; if ( getJSONString ) { result += "\n" + jobMP . toString ( 4 ) ; } } if ( ! getJSONString ) { result = pnt . getRootPlanNode ( ) . toExplainPlanString ( ) ; } return result ; } catch ( JSONException e ) { System . out . println ( e ) ; return "Internal Error (JSONException): " + e . getMessage ( ) ; } }
Return the EXPLAIN string of the batched statement at the index
360
14
155,436
public synchronized static AdHocCompilerCache getCacheForCatalogHash ( byte [ ] catalogHash ) { String hashString = Encoder . hexEncode ( catalogHash ) ; AdHocCompilerCache cache = m_catalogHashMatch . getIfPresent ( hashString ) ; if ( cache == null ) { cache = new AdHocCompilerCache ( ) ; m_catalogHashMatch . put ( hashString , cache ) ; } return cache ; }
Get the global cache for a given hash of the catalog . Note that there can be only one cache per catalogHash at a time .
99
27
155,437
synchronized void printStats ( ) { String line1 = String . format ( "CACHE STATS - Literals: Hits %d/%d (%.1f%%), Inserts %d Evictions %d\n" , m_literalHits , m_literalQueries , ( m_literalHits * 100.0 ) / m_literalQueries , m_literalInsertions , m_literalEvictions ) ; String line2 = String . format ( "CACHE STATS - Plans: Hits %d/%d (%.1f%%), Inserts %d Evictions %d\n" , m_planHits , m_planQueries , ( m_planHits * 100.0 ) / m_planQueries , m_planInsertions , m_planEvictions ) ; System . out . print ( line1 + line2 ) ; System . out . flush ( ) ; // reset these m_literalHits = 0 ; m_literalQueries = 0 ; m_literalInsertions = 0 ; m_literalEvictions = 0 ; m_planHits = 0 ; m_planQueries = 0 ; m_planInsertions = 0 ; m_planEvictions = 0 ; }
Stats printing method used during development . Probably shouldn t live past real stats integration .
275
16
155,438
public synchronized void put ( String sql , String parsedToken , AdHocPlannedStatement planIn , String [ ] extractedLiterals , boolean hasUserQuestionMarkParameters , boolean hasAutoParameterizedException ) { assert ( sql != null ) ; assert ( parsedToken != null ) ; assert ( planIn != null ) ; AdHocPlannedStatement plan = planIn ; assert ( new String ( plan . sql , Constants . UTF8ENCODING ) . equals ( sql ) ) ; // hasUserQuestionMarkParameters and hasAutoParameterizedException can not be true at the same time // it means that a query can not be both user parameterized query and auto parameterized query. assert ( ! hasUserQuestionMarkParameters || ! hasAutoParameterizedException ) ; // uncomment this to get some raw stdout cache performance stats every 5s //startPeriodicStatsPrinting(); // deal with L2 cache if ( ! hasAutoParameterizedException ) { BoundPlan matched = null ; BoundPlan unmatched = new BoundPlan ( planIn . core , planIn . parameterBindings ( extractedLiterals ) ) ; // deal with the parameterized plan cache first List < BoundPlan > boundVariants = m_coreCache . get ( parsedToken ) ; if ( boundVariants == null ) { boundVariants = new ArrayList < BoundPlan > ( ) ; m_coreCache . put ( parsedToken , boundVariants ) ; // Note that there is an edge case in which more than one plan is getting counted as one // "plan insertion". This only happens when two different plans arose from the same parameterized // query (token) because one invocation used the correct constants to trigger an expression index and // another invocation did not. These are not counted separately (which would have to happen below // after each call to boundVariants.add) because they are not evicted separately. // It seems saner to use consistent units when counting insertions vs. evictions. ++ m_planInsertions ; } else { for ( BoundPlan boundPlan : boundVariants ) { if ( boundPlan . equals ( unmatched ) ) { matched = boundPlan ; break ; } } if ( matched != null ) { // if a different core is found, reuse it // this is useful when updating the literal cache if ( unmatched . m_core != matched . m_core ) { plan = new AdHocPlannedStatement ( planIn , matched . m_core ) ; plan . setBoundConstants ( matched . m_constants ) ; } } } if ( matched == null ) { // Don't count insertions (of possibly repeated tokens) here // -- see the comment above where only UNIQUE token insertions are being counted, instead. boundVariants . add ( unmatched ) ; } } // then deal with the L1 cache if ( ! hasUserQuestionMarkParameters ) { AdHocPlannedStatement cachedPlan = m_literalCache . get ( sql ) ; if ( cachedPlan == null ) { //* enable to debug */ System.out.println("DEBUG: Caching literal '" + sql + "'"); m_literalCache . put ( sql , plan ) ; ++ m_literalInsertions ; } else { assert ( cachedPlan . equals ( plan ) ) ; } } }
Called from the PlannerTool directly when it finishes planning . This is the only way to populate the cache .
689
23
155,439
public void startPeriodicStatsPrinting ( ) { if ( m_statsTimer == null ) { m_statsTimer = new Timer ( ) ; m_statsTimer . scheduleAtFixedRate ( new TimerTask ( ) { @ Override public void run ( ) { printStats ( ) ; } } , 5000 , 5000 ) ; } }
Start a timer that prints cache stats to the console every 5s . Used for development until we get better stats integration .
74
24
155,440
@ Override public Date getDate ( int parameterIndex , Calendar cal ) throws SQLException { checkClosed ( ) ; throw SQLError . noSupport ( ) ; }
Retrieves the value of the designated JDBC DATE parameter as a java . sql . Date object using the given Calendar object to construct the date .
39
31
155,441
@ Override public Date getDate ( String parameterName , Calendar cal ) throws SQLException { checkClosed ( ) ; throw SQLError . noSupport ( ) ; }
Retrieves the value of a JDBC DATE parameter as a java . sql . Date object using the given Calendar object to construct the date .
39
30
155,442
@ Override public Object getObject ( String parameterName , Map < String , Class < ? > > map ) throws SQLException { checkClosed ( ) ; throw SQLError . noSupport ( ) ; }
Returns an object representing the value of OUT parameter parameterName and uses map for the custom mapping of the parameter value .
47
23
155,443
@ Override public Time getTime ( int parameterIndex , Calendar cal ) throws SQLException { checkClosed ( ) ; throw SQLError . noSupport ( ) ; }
Retrieves the value of the designated JDBC TIME parameter as a java . sql . Time object using the given Calendar object to construct the time .
39
30
155,444
@ Override public Time getTime ( String parameterName , Calendar cal ) throws SQLException { checkClosed ( ) ; throw SQLError . noSupport ( ) ; }
Retrieves the value of a JDBC TIME parameter as a java . sql . Time object using the given Calendar object to construct the time .
39
29
155,445
@ Override public Timestamp getTimestamp ( int parameterIndex , Calendar cal ) throws SQLException { checkClosed ( ) ; throw SQLError . noSupport ( ) ; }
Retrieves the value of the designated JDBC TIMESTAMP parameter as a java . sql . Timestamp object using the given Calendar object to construct the Timestamp object .
41
35
155,446
@ Override public Timestamp getTimestamp ( String parameterName , Calendar cal ) throws SQLException { checkClosed ( ) ; throw SQLError . noSupport ( ) ; }
Retrieves the value of a JDBC TIMESTAMP parameter as a java . sql . Timestamp object using the given Calendar object to construct the Timestamp object .
41
34
155,447
@ Override public void registerOutParameter ( String parameterName , int sqlType , int scale ) throws SQLException { checkClosed ( ) ; throw SQLError . noSupport ( ) ; }
Registers the parameter named parameterName to be of JDBC type sqlType .
44
16
155,448
@ Override public void setNString ( String parameterName , String value ) throws SQLException { checkClosed ( ) ; throw SQLError . noSupport ( ) ; }
Sets the designated parameter to the given String object .
40
11
155,449
@ Override public void setURL ( String parameterName , URL val ) throws SQLException { checkClosed ( ) ; throw SQLError . noSupport ( ) ; }
Sets the designated parameter to the given java . net . URL object .
39
15
155,450
public static String suffixHSIdsWithMigratePartitionLeaderRequest ( Long HSId ) { return Long . toString ( Long . MAX_VALUE ) + "/" + Long . toString ( HSId ) + migrate_partition_leader_suffix ; }
Generate a HSID string with BALANCE_SPI_SUFFIX information . When this string is updated we can tell the reason why HSID is changed .
56
34
155,451
public void startPartitionWatch ( ) throws InterruptedException , ExecutionException { Future < ? > task = m_es . submit ( new PartitionWatchEvent ( ) ) ; task . get ( ) ; }
Initialized and start watching partition level cache this function is blocking .
44
12
155,452
private void processPartitionWatchEvent ( ) throws KeeperException , InterruptedException { try { m_zk . create ( m_rootNode , null , Ids . OPEN_ACL_UNSAFE , CreateMode . PERSISTENT ) ; m_zk . getData ( m_rootNode , m_childWatch , null ) ; } catch ( KeeperException . NodeExistsException e ) { m_zk . getData ( m_rootNode , m_childWatch , null ) ; } }
Race to create partition - specific zk node and put a watch on it .
111
16
155,453
public Object convertToDefaultType ( SessionInterface session , Object a ) { if ( a == null ) { return a ; } Type otherType ; if ( a instanceof Number ) { if ( a instanceof BigInteger ) { a = new BigDecimal ( ( BigInteger ) a ) ; } else if ( a instanceof Float ) { a = new Double ( ( ( Float ) a ) . doubleValue ( ) ) ; } else if ( a instanceof Byte ) { a = ValuePool . getInt ( ( ( Byte ) a ) . intValue ( ) ) ; } else if ( a instanceof Short ) { a = ValuePool . getInt ( ( ( Short ) a ) . intValue ( ) ) ; } if ( a instanceof Integer ) { otherType = Type . SQL_INTEGER ; } else if ( a instanceof Long ) { otherType = Type . SQL_BIGINT ; } else if ( a instanceof Double ) { otherType = Type . SQL_DOUBLE ; } else if ( a instanceof BigDecimal ) { // BEGIN Cherry-picked code change from hsqldb-2.2.8 otherType = Type . SQL_DECIMAL_DEFAULT ; /* if (typeCode == Types.SQL_DECIMAL || typeCode == Types.SQL_NUMERIC) { return convertToTypeLimits(session, a); } BigDecimal val = (BigDecimal) a; otherType = getNumberType(Types.SQL_DECIMAL, JavaSystem.precision(val), scale); */ // END Cherry-picked code change from hsqldb-2.2.8 } else { throw Error . error ( ErrorCode . X_42561 ) ; } // BEGIN Cherry-picked code change from hsqldb-2.2.8 switch ( typeCode ) { case Types . TINYINT : case Types . SQL_SMALLINT : case Types . SQL_INTEGER : return convertToInt ( session , a , Types . INTEGER ) ; case Types . SQL_BIGINT : return convertToLong ( session , a ) ; case Types . SQL_REAL : case Types . SQL_FLOAT : case Types . SQL_DOUBLE : return convertToDouble ( a ) ; case Types . SQL_NUMERIC : case Types . SQL_DECIMAL : { a = convertToDecimal ( a ) ; BigDecimal dec = ( BigDecimal ) a ; if ( scale != dec . scale ( ) ) { dec = dec . setScale ( scale , BigDecimal . ROUND_HALF_DOWN ) ; } return dec ; } default : throw Error . error ( ErrorCode . X_42561 ) ; } // END Cherry-picked code change from hsqldb-2.2.8 } else if ( a instanceof String ) { otherType = Type . SQL_VARCHAR ; } else { throw Error . error ( ErrorCode . X_42561 ) ; } return convertToType ( session , a , otherType ) ; }
Converts a value to this type
658
7
155,454
private static Double convertToDouble ( Object a ) { double value ; if ( a instanceof java . lang . Double ) { return ( Double ) a ; } else if ( a instanceof BigDecimal ) { BigDecimal bd = ( BigDecimal ) a ; value = bd . doubleValue ( ) ; int signum = bd . signum ( ) ; BigDecimal bdd = new BigDecimal ( value + signum ) ; if ( bdd . compareTo ( bd ) != signum ) { throw Error . error ( ErrorCode . X_22003 ) ; } } else { value = ( ( Number ) a ) . doubleValue ( ) ; } return ValuePool . getDouble ( Double . doubleToLongBits ( value ) ) ; }
Converter from a numeric object to Double . Input is checked to be within range represented by Double
165
20
155,455
public Object mod ( Object a , Object b ) { if ( a == null || b == null ) { return null ; } switch ( typeCode ) { case Types . SQL_REAL : case Types . SQL_FLOAT : case Types . SQL_DOUBLE : { double ad = ( ( Number ) a ) . doubleValue ( ) ; double bd = ( ( Number ) b ) . doubleValue ( ) ; if ( bd == 0 ) { throw Error . error ( ErrorCode . X_22012 ) ; } return ValuePool . getDouble ( Double . doubleToLongBits ( ad % bd ) ) ; } case Types . SQL_DECIMAL : { if ( ( b ) . equals ( 0 ) ) { throw Error . error ( ErrorCode . X_22012 ) ; } return ValuePool . getBigDecimal ( ( ( BigDecimal ) a ) . remainder ( ( BigDecimal ) b ) ) ; } case Types . TINYINT : case Types . SQL_SMALLINT : case Types . SQL_INTEGER : { int ai = ( ( Number ) a ) . intValue ( ) ; int bi = ( ( Number ) b ) . intValue ( ) ; if ( bi == 0 ) { throw Error . error ( ErrorCode . X_22012 ) ; } return ValuePool . getInt ( ai % bi ) ; } case Types . SQL_BIGINT : { long al = ( ( Number ) a ) . longValue ( ) ; long bl = ( ( Number ) b ) . longValue ( ) ; if ( bl == 0 ) { throw Error . error ( ErrorCode . X_22012 ) ; } return ValuePool . getLong ( al % bl ) ; } default : throw Error . runtimeError ( ErrorCode . U_S0500 , "NumberType" ) ; } }
A VoltDB extension
397
4
155,456
public synchronized void write ( int c ) throws IOException { checkClosed ( ) ; int newcount = count + 1 ; if ( newcount > buf . length ) { buf = copyOf ( buf , Math . max ( buf . length << 1 , newcount ) ) ; } buf [ count ] = ( char ) c ; count = newcount ; }
Writes the specified single character .
75
7
155,457
private void initiateSPIMigrationIfRequested ( Iv2InitiateTaskMessage msg ) { if ( ! "@MigratePartitionLeader" . equals ( msg . getStoredProcedureName ( ) ) ) { return ; } final Object [ ] params = msg . getParameters ( ) ; int pid = Integer . parseInt ( params [ 1 ] . toString ( ) ) ; if ( pid != m_partitionId ) { tmLog . warn ( String . format ( "@MigratePartitionLeader executed at a wrong partition %d for partition %d." , m_partitionId , pid ) ) ; return ; } RealVoltDB db = ( RealVoltDB ) VoltDB . instance ( ) ; int hostId = Integer . parseInt ( params [ 2 ] . toString ( ) ) ; Long newLeaderHSId = db . getCartographer ( ) . getHSIDForPartitionHost ( hostId , pid ) ; if ( newLeaderHSId == null || newLeaderHSId == m_hsId ) { tmLog . warn ( String . format ( "@MigratePartitionLeader the partition leader is already on the host %d or the host id is invalid." , hostId ) ) ; return ; } SpScheduler scheduler = ( SpScheduler ) m_scheduler ; scheduler . checkPointMigratePartitionLeader ( ) ; scheduler . m_isLeader = false ; m_newLeaderHSID = newLeaderHSId ; m_migratePartitionLeaderStatus = MigratePartitionLeaderStatus . STARTED ; LeaderCache leaderAppointee = new LeaderCache ( m_messenger . getZK ( ) , "initiateSPIMigrationIfRequested-" + m_partitionId , VoltZK . iv2appointees ) ; try { leaderAppointee . start ( true ) ; leaderAppointee . put ( pid , LeaderCache . suffixHSIdsWithMigratePartitionLeaderRequest ( newLeaderHSId ) ) ; } catch ( InterruptedException | ExecutionException | KeeperException e ) { VoltDB . crashLocalVoltDB ( "fail to start MigratePartitionLeader" , true , e ) ; } finally { try { leaderAppointee . shutdown ( ) ; } catch ( InterruptedException e ) { } } tmLog . info ( "MigratePartitionLeader for partition " + pid + " to " + CoreUtils . hsIdToString ( newLeaderHSId ) ) ; //notify the new leader right away if the current leader has drained all transactions. notifyNewLeaderOfTxnDoneIfNeeded ( ) ; }
rerouted from this moment on until the transactions are correctly routed to new leader .
566
16
155,458
private boolean checkMisroutedIv2IntiateTaskMessage ( Iv2InitiateTaskMessage message ) { if ( message . isForReplica ( ) ) { return false ; } if ( m_scheduler . isLeader ( ) && m_migratePartitionLeaderStatus != MigratePartitionLeaderStatus . TXN_RESTART ) { //At this point, the message is sent to partition leader return false ; } //At this point, the message is misrouted. //(1) If a site has been demoted via @MigratePartitionLeader, the messages which are sent to the leader will be restarted. //(2) If a site becomes new leader via @MigratePartitionLeader. Transactions will be restarted before it gets notification from old // leader that transactions on older leader have been drained. InitiateResponseMessage response = new InitiateResponseMessage ( message ) ; response . setMisrouted ( message . getStoredProcedureInvocation ( ) ) ; response . m_sourceHSId = getHSId ( ) ; deliver ( response ) ; if ( tmLog . isDebugEnabled ( ) ) { tmLog . debug ( "Sending message back on:" + CoreUtils . hsIdToString ( m_hsId ) + " isLeader:" + m_scheduler . isLeader ( ) + " status:" + m_migratePartitionLeaderStatus + "\n" + message ) ; } //notify the new partition leader that the old leader has completed the Txns if needed. notifyNewLeaderOfTxnDoneIfNeeded ( ) ; return true ; }
if these requests are intended for leader . Client interface will restart these transactions .
348
15
155,459
private boolean checkMisroutedFragmentTaskMessage ( FragmentTaskMessage message ) { if ( m_scheduler . isLeader ( ) || message . isForReplica ( ) ) { return false ; } TransactionState txnState = ( ( ( SpScheduler ) m_scheduler ) . getTransactionState ( message . getTxnId ( ) ) ) ; // If a fragment is part of a transaction which have not been seen on this site, restart it. if ( txnState == null ) { FragmentResponseMessage response = new FragmentResponseMessage ( message , getHSId ( ) ) ; TransactionRestartException restart = new TransactionRestartException ( "Transaction being restarted due to MigratePartitionLeader." , message . getTxnId ( ) ) ; restart . setMisrouted ( true ) ; response . setStatus ( FragmentResponseMessage . UNEXPECTED_ERROR , restart ) ; response . m_sourceHSId = getHSId ( ) ; response . setPartitionId ( m_partitionId ) ; if ( tmLog . isDebugEnabled ( ) ) { tmLog . debug ( "misRoutedFragMsg on site:" + CoreUtils . hsIdToString ( getHSId ( ) ) + "\n" + message ) ; } deliver ( response ) ; return true ; } // A transaction may have multiple batches or fragments. If the first batch or fragment has already been // processed, the follow-up batches or fragments should also be processed on this site. if ( ! m_scheduler . isLeader ( ) && ! message . isForReplica ( ) ) { message . setExecutedOnPreviousLeader ( true ) ; txnState . setLeaderMigrationInvolved ( ) ; if ( tmLog . isDebugEnabled ( ) ) { tmLog . debug ( "Follow-up fragment will be processed on " + CoreUtils . hsIdToString ( getHSId ( ) ) + "\n" + message ) ; } } return false ; }
After MigratePartitionLeader has been requested the fragments which are sent to leader site should be restarted .
436
22
155,460
private void handleLogRequest ( VoltMessage message ) { Iv2RepairLogRequestMessage req = ( Iv2RepairLogRequestMessage ) message ; // It is possible for a dead host to queue messages after a repair request is processed // so make sure this can't happen by re-queuing this message after we know the dead host is gone // Since we are not checking validateForeignHostId on the PicoNetwork thread, it is possible for // the PicoNetwork thread to validateForeignHostId and queue a message behind this repair message. // Further, we loose visibility to the ForeignHost as soon as HostMessenger marks the host invalid // even though the PicoNetwork thread could still be alive so we will skeptically int deadHostId = req . getDeadHostId ( ) ; if ( deadHostId != Integer . MAX_VALUE ) { if ( m_messenger . canCompleteRepair ( deadHostId ) ) { // Make sure we are the last in the task queue when we know the ForeignHost is gone req . disableDeadHostCheck ( ) ; deliver ( message ) ; } else { if ( req . getRepairRetryCount ( ) > 100 && req . getRepairRetryCount ( ) % 100 == 0 ) { hostLog . warn ( "Repair Request for dead host " + deadHostId + " has not been processed yet because connection has not closed" ) ; } Runnable retryRepair = new Runnable ( ) { @ Override public void run ( ) { InitiatorMailbox . this . deliver ( message ) ; } } ; VoltDB . instance ( ) . scheduleWork ( retryRepair , 10 , - 1 , TimeUnit . MILLISECONDS ) ; // the repair message will be resubmitted shortly when the ForeignHosts to the dead host have been removed } return ; } List < Iv2RepairLogResponseMessage > logs = m_repairLog . contents ( req . getRequestId ( ) , req . isMPIRequest ( ) ) ; if ( req . isMPIRequest ( ) ) { m_scheduler . cleanupTransactionBacklogOnRepair ( ) ; } for ( Iv2RepairLogResponseMessage log : logs ) { send ( message . m_sourceHSId , log ) ; } }
Produce the repair log . This is idempotent .
484
13
155,461
private void setMigratePartitionLeaderStatus ( MigratePartitionLeaderMessage message ) { //The host with old partition leader is down. if ( message . isStatusReset ( ) ) { m_migratePartitionLeaderStatus = MigratePartitionLeaderStatus . NONE ; return ; } if ( m_migratePartitionLeaderStatus == MigratePartitionLeaderStatus . NONE ) { //txn draining notification from the old leader arrives before this site is promoted m_migratePartitionLeaderStatus = MigratePartitionLeaderStatus . TXN_DRAINED ; } else if ( m_migratePartitionLeaderStatus == MigratePartitionLeaderStatus . TXN_RESTART ) { //if the new leader has been promoted, stop restarting txns. m_migratePartitionLeaderStatus = MigratePartitionLeaderStatus . NONE ; } tmLog . info ( "MigratePartitionLeader new leader " + CoreUtils . hsIdToString ( m_hsId ) + " is notified by previous leader " + CoreUtils . hsIdToString ( message . getPriorLeaderHSID ( ) ) + ". status:" + m_migratePartitionLeaderStatus ) ; }
that previous partition leader has drained its txns
263
9
155,462
public void setMigratePartitionLeaderStatus ( boolean migratePartitionLeader ) { if ( ! migratePartitionLeader ) { m_migratePartitionLeaderStatus = MigratePartitionLeaderStatus . NONE ; m_newLeaderHSID = Long . MIN_VALUE ; return ; } //The previous leader has already drained all txns if ( m_migratePartitionLeaderStatus == MigratePartitionLeaderStatus . TXN_DRAINED ) { m_migratePartitionLeaderStatus = MigratePartitionLeaderStatus . NONE ; tmLog . info ( "MigratePartitionLeader transactions on previous partition leader are drained. New leader:" + CoreUtils . hsIdToString ( m_hsId ) + " status:" + m_migratePartitionLeaderStatus ) ; return ; } //Wait for the notification from old partition leader m_migratePartitionLeaderStatus = MigratePartitionLeaderStatus . TXN_RESTART ; tmLog . info ( "MigratePartitionLeader restart txns on new leader:" + CoreUtils . hsIdToString ( m_hsId ) + " status:" + m_migratePartitionLeaderStatus ) ; }
the site for new partition leader
257
6
155,463
public void notifyNewLeaderOfTxnDoneIfNeeded ( ) { //return quickly to avoid performance hit if ( m_newLeaderHSID == Long . MIN_VALUE ) { return ; } SpScheduler scheduler = ( SpScheduler ) m_scheduler ; if ( ! scheduler . txnDoneBeforeCheckPoint ( ) ) { return ; } MigratePartitionLeaderMessage message = new MigratePartitionLeaderMessage ( m_hsId , m_newLeaderHSID ) ; send ( message . getNewLeaderHSID ( ) , message ) ; //reset status on the old partition leader m_migratePartitionLeaderStatus = MigratePartitionLeaderStatus . NONE ; m_repairLog . setLeaderState ( false ) ; tmLog . info ( "MigratePartitionLeader previous leader " + CoreUtils . hsIdToString ( m_hsId ) + " notifies new leader " + CoreUtils . hsIdToString ( m_newLeaderHSID ) + " transactions are drained." + " status:" + m_migratePartitionLeaderStatus ) ; m_newLeaderHSID = Long . MIN_VALUE ; }
Then new master can proceed to process transactions .
253
9
155,464
public void resetMigratePartitionLeaderStatus ( ) { m_scheduler . m_isLeader = true ; m_migratePartitionLeaderStatus = MigratePartitionLeaderStatus . NONE ; m_repairLog . setLeaderState ( true ) ; m_newLeaderHSID = Long . MIN_VALUE ; }
Reinstall the site as leader .
70
7
155,465
private Option resolveOption ( String opt ) { opt = Util . stripLeadingHyphens ( opt ) ; for ( Option option : options ) { if ( opt . equals ( option . getOpt ( ) ) ) { return option ; } if ( opt . equals ( option . getLongOpt ( ) ) ) { return option ; } } return null ; }
Retrieves the option object given the long or short option as a String
75
15
155,466
public String [ ] getArgs ( ) { String [ ] answer = new String [ args . size ( ) ] ; args . toArray ( answer ) ; return answer ; }
Retrieve any left - over non - recognized options and arguments
36
12
155,467
public boolean processScanNodeWithReAggNode ( AbstractPlanNode node , AbstractPlanNode reAggNode ) { // MV table scan node can not be in in-lined nodes. for ( int i = 0 ; i < node . getChildCount ( ) ; i ++ ) { AbstractPlanNode child = node . getChild ( i ) ; if ( child instanceof AbstractScanPlanNode ) { AbstractScanPlanNode scanNode = ( AbstractScanPlanNode ) child ; if ( ! scanNode . getTargetTableName ( ) . equals ( getMVTableName ( ) ) ) { continue ; } if ( reAggNode != null ) { // Join query case. node . setAndLinkChild ( i , reAggNode ) ; } // Process scan node. // Set up the scan plan node's scan columns. Add in-line projection node for scan node. scanNode . addInlinePlanNode ( m_scanInlinedProjectionNode ) ; m_scanNode = scanNode ; return true ; } else { boolean replaced = processScanNodeWithReAggNode ( child , reAggNode ) ; if ( replaced ) { return true ; } } } return false ; }
Find the scan node on MV table replace it with reAggNode for join query . This scan node can not be in - lined so it should be as a child of a join node .
246
38
155,468
private void resolveColumnReferences ( ) { if ( isDistinctSelect || isGrouped ) { acceptsSequences = false ; } for ( int i = 0 ; i < rangeVariables . length ; i ++ ) { Expression e = rangeVariables [ i ] . nonIndexJoinCondition ; if ( e == null ) { continue ; } resolveColumnReferencesAndAllocate ( e , i + 1 , false ) ; } resolveColumnReferencesAndAllocate ( queryCondition , rangeVariables . length , false ) ; for ( int i = 0 ; i < indexLimitVisible ; i ++ ) { resolveColumnReferencesAndAllocate ( exprColumns [ i ] , rangeVariables . length , acceptsSequences ) ; } for ( int i = indexLimitVisible ; i < indexStartOrderBy ; i ++ ) { resolveColumnReferencesAndAllocate ( exprColumns [ i ] , rangeVariables . length , false ) ; } /************************* Volt DB Extensions *************************/ resolveColumnReferencesInGroupBy ( ) ; /**********************************************************************/ resolveColumnReferencesInOrderBy ( sortAndSlice ) ; }
Resolves all column expressions in the GROUP BY clause and beyond . Replaces any alias column expression in the ORDER BY cluase with the actual select column expression .
236
33
155,469
private int getMaxRowCount ( Session session , int rowCount ) { int limitStart = getLimitStart ( session ) ; int limitCount = getLimitCount ( session , rowCount ) ; if ( simpleLimit ) { if ( rowCount == 0 ) { rowCount = limitCount ; } // A VoltDB extension to support LIMIT 0 if ( rowCount > Integer . MAX_VALUE - limitStart ) { /* disable 1 line ... if (rowCount == 0 || rowCount > Integer.MAX_VALUE - limitStart) { ... disabled 1 line */ // End of VoltDB extension rowCount = Integer . MAX_VALUE ; } else { rowCount += limitStart ; } } else { rowCount = Integer . MAX_VALUE ; // A VoltDB extension to support LIMIT 0 // limitCount == 0 can be enforced/optimized as rowCount == 0 regardless of offset // even in non-simpleLimit cases (SELECT DISTINCT, GROUP BY, and/or ORDER BY). // This is an optimal handling of a hard-coded LIMIT 0, but it really shouldn't be the ONLY // enforcement for zero LIMITs -- what about "LIMIT ?" with 0 passed later as a parameter? // The HSQL executor ("HSQL back end") also needs runtime enforcement of zero limits. // The VoltDB executor has such enforcement. if ( limitCount == 0 ) { rowCount = 0 ; } // End of VoltDB extension } return rowCount ; }
translate the rowCount into total number of rows needed from query including any rows skipped at the beginning
309
20
155,470
protected void dumpExprColumns ( String header ) { System . out . println ( "\n\n*********************************************" ) ; System . out . println ( header ) ; try { System . out . println ( getSQL ( ) ) ; } catch ( Exception e ) { } for ( int i = 0 ; i < exprColumns . length ; ++ i ) { if ( i == 0 ) System . out . println ( "Visible columns:" ) ; if ( i == indexStartOrderBy ) System . out . println ( "start order by:" ) ; if ( i == indexStartAggregates ) System . out . println ( "start aggregates:" ) ; if ( i == indexLimitVisible ) System . out . println ( "After limit of visible columns:" ) ; System . out . println ( i + ": " + exprColumns [ i ] ) ; } System . out . println ( "\n\n" ) ; }
Dumps the exprColumns list for this query specification . Writes to stdout .
201
18
155,471
public void updateEECacheStats ( long eeCacheSize , long hits , long misses , int partitionId ) { m_cache1Level = eeCacheSize ; m_cache1Hits += hits ; m_cacheMisses += misses ; m_invocations += hits + misses ; m_partitionId = partitionId ; }
Used to update EE cache stats without changing tracked time
72
10
155,472
public void endStatsCollection ( long cache1Size , long cache2Size , CacheUse cacheUse , long partitionId ) { if ( m_currentStartTime != null ) { long delta = System . nanoTime ( ) - m_currentStartTime ; if ( delta < 0 ) { if ( Math . abs ( delta ) > 1000000000 ) { log . info ( "Planner statistics recorded a negative planning time larger than one second: " + delta ) ; } } else { m_totalPlanningTime += delta ; m_minPlanningTime = Math . min ( delta , m_minPlanningTime ) ; m_maxPlanningTime = Math . max ( delta , m_maxPlanningTime ) ; m_lastMinPlanningTime = Math . min ( delta , m_lastMinPlanningTime ) ; m_lastMaxPlanningTime = Math . max ( delta , m_lastMaxPlanningTime ) ; } m_currentStartTime = null ; } m_cache1Level = cache1Size ; m_cache2Level = cache2Size ; switch ( cacheUse ) { case HIT1 : m_cache1Hits ++ ; break ; case HIT2 : m_cache2Hits ++ ; break ; case MISS : m_cacheMisses ++ ; break ; case FAIL : m_failures ++ ; break ; } m_invocations ++ ; m_partitionId = partitionId ; }
Called after planning or failing to plan . Records timer and cache stats .
302
15
155,473
@ Override protected void updateStatsRow ( Object rowKey , Object rowValues [ ] ) { super . updateStatsRow ( rowKey , rowValues ) ; rowValues [ columnNameToIndex . get ( "PARTITION_ID" ) ] = m_partitionId ; long totalTimedExecutionTime = m_totalPlanningTime ; long minExecutionTime = m_minPlanningTime ; long maxExecutionTime = m_maxPlanningTime ; long cache1Level = m_cache1Level ; long cache2Level = m_cache2Level ; long cache1Hits = m_cache1Hits ; long cache2Hits = m_cache2Hits ; long cacheMisses = m_cacheMisses ; long failureCount = m_failures ; if ( m_interval ) { totalTimedExecutionTime = m_totalPlanningTime - m_lastTimedPlanningTime ; m_lastTimedPlanningTime = m_totalPlanningTime ; minExecutionTime = m_lastMinPlanningTime ; maxExecutionTime = m_lastMaxPlanningTime ; m_lastMinPlanningTime = Long . MAX_VALUE ; m_lastMaxPlanningTime = Long . MIN_VALUE ; cache1Level = m_cache1Level - m_lastCache1Level ; m_lastCache1Level = m_cache1Level ; cache2Level = m_cache2Level - m_lastCache2Level ; m_lastCache2Level = m_cache2Level ; cache1Hits = m_cache1Hits - m_lastCache1Hits ; m_lastCache1Hits = m_cache1Hits ; cache2Hits = m_cache2Hits - m_lastCache2Hits ; m_lastCache2Hits = m_cache2Hits ; cacheMisses = m_cacheMisses - m_lastCacheMisses ; m_lastCacheMisses = m_cacheMisses ; failureCount = m_failures - m_lastFailures ; m_lastFailures = m_failures ; m_lastInvocations = m_invocations ; } rowValues [ columnNameToIndex . get ( VoltSystemProcedure . CNAME_SITE_ID ) ] = m_siteId ; rowValues [ columnNameToIndex . get ( "PARTITION_ID" ) ] = m_partitionId ; rowValues [ columnNameToIndex . get ( "CACHE1_LEVEL" ) ] = cache1Level ; rowValues [ columnNameToIndex . get ( "CACHE2_LEVEL" ) ] = cache2Level ; rowValues [ columnNameToIndex . get ( "CACHE1_HITS" ) ] = cache1Hits ; rowValues [ columnNameToIndex . get ( "CACHE2_HITS" ) ] = cache2Hits ; rowValues [ columnNameToIndex . get ( "CACHE_MISSES" ) ] = cacheMisses ; rowValues [ columnNameToIndex . get ( "PLAN_TIME_MIN" ) ] = minExecutionTime ; rowValues [ columnNameToIndex . get ( "PLAN_TIME_MAX" ) ] = maxExecutionTime ; if ( getSampleCount ( ) != 0 ) { rowValues [ columnNameToIndex . get ( "PLAN_TIME_AVG" ) ] = ( totalTimedExecutionTime / getSampleCount ( ) ) ; } else { rowValues [ columnNameToIndex . get ( "PLAN_TIME_AVG" ) ] = 0L ; } rowValues [ columnNameToIndex . get ( "FAILURES" ) ] = failureCount ; }
Update the rowValues array with the latest statistical information . This method is overrides the super class version which must also be called so that it can update its columns .
805
33
155,474
static void tag ( StringBuilder sb , String color , String text ) { sb . append ( "<span class='label label" ) ; if ( color != null ) { sb . append ( "-" ) . append ( color ) ; } String classText = text . replace ( ' ' , ' ' ) ; sb . append ( " l-" ) . append ( classText ) . append ( "'>" ) . append ( text ) . append ( "</span>" ) ; }
Make an html bootstrap tag with our custom css class .
103
13
155,475
public static String report ( Catalog catalog , long minHeap , boolean isPro , int hostCount , int sitesPerHost , int kfactor , ArrayList < Feedback > warnings , String autoGenDDL ) throws IOException { // asynchronously get platform properties new Thread ( ) { @ Override public void run ( ) { PlatformProperties . getPlatformProperties ( ) ; } } . start ( ) ; URL url = Resources . getResource ( ReportMaker . class , "template.html" ) ; String contents = Resources . toString ( url , Charsets . UTF_8 ) ; Cluster cluster = catalog . getClusters ( ) . get ( "cluster" ) ; assert ( cluster != null ) ; Database db = cluster . getDatabases ( ) . get ( "database" ) ; assert ( db != null ) ; String statsData = getStatsHTML ( db , minHeap , warnings ) ; contents = contents . replace ( "##STATS##" , statsData ) ; // generateProceduresTable needs to happen before generateSchemaTable // because some metadata used in the later is generated in the former String procData = generateProceduresTable ( db . getTables ( ) , db . getProcedures ( ) ) ; contents = contents . replace ( "##PROCS##" , procData ) ; String schemaData = generateSchemaTable ( db ) ; contents = contents . replace ( "##SCHEMA##" , schemaData ) ; DatabaseSizes sizes = CatalogSizing . getCatalogSizes ( db , DrRoleType . XDCR . value ( ) . equals ( cluster . getDrrole ( ) ) ) ; String sizeData = generateSizeTable ( sizes ) ; contents = contents . replace ( "##SIZES##" , sizeData ) ; String clusterConfig = generateClusterConfiguration ( isPro , hostCount , sitesPerHost , kfactor ) ; contents = contents . replace ( "##CLUSTERCONFIG##" , clusterConfig ) ; String sizeSummary = generateSizeSummary ( sizes ) ; contents = contents . replace ( "##SIZESUMMARY##" , sizeSummary ) ; String heapSummary = generateRecommendedServerSettings ( sizes ) ; contents = contents . replace ( "##RECOMMENDEDSERVERSETTINGS##" , heapSummary ) ; String platformData = PlatformProperties . getPlatformProperties ( ) . toHTML ( ) ; contents = contents . replace ( "##PLATFORM##" , platformData ) ; contents = contents . replace ( "##VERSION##" , VoltDB . instance ( ) . getVersionString ( ) ) ; contents = contents . replace ( "##DDL##" , escapeHtml4 ( autoGenDDL ) ) ; DateFormat df = new SimpleDateFormat ( "d MMM yyyy HH:mm:ss z" ) ; contents = contents . replace ( "##TIMESTAMP##" , df . format ( m_timestamp ) ) ; String msg = Encoder . hexEncode ( VoltDB . instance ( ) . getVersionString ( ) + "," + System . currentTimeMillis ( ) ) ; contents = contents . replace ( "get.py?a=KEY&" , String . format ( "get.py?a=%s&" , msg ) ) ; return contents ; }
Generate the HTML catalog report from a newly compiled VoltDB catalog
709
13
155,476
public static String liveReport ( ) { byte [ ] reportbytes = VoltDB . instance ( ) . getCatalogContext ( ) . getFileInJar ( VoltCompiler . CATLOG_REPORT ) ; String report = new String ( reportbytes , Charsets . UTF_8 ) ; // remove commented out code report = report . replace ( "<!--##RESOURCES" , "" ) ; report = report . replace ( "##RESOURCES-->" , "" ) ; // inject the cluster overview //String clusterStr = "<h4>System Overview</h4>\n<p>" + getLiveSystemOverview() + "</p><br/>\n"; //report = report.replace("<!--##CLUSTER##-->", clusterStr); // inject the running system platform properties PlatformProperties pp = PlatformProperties . getPlatformProperties ( ) ; String ppStr = "<h4>Cluster Platform</h4>\n<p>" + pp . toHTML ( ) + "</p><br/>\n" ; report = report . replace ( "<!--##PLATFORM2##-->" , ppStr ) ; // change the live/static var to live if ( VoltDB . instance ( ) . getConfig ( ) . m_isEnterprise ) { report = report . replace ( "&b=r&" , "&b=e&" ) ; } else { report = report . replace ( "&b=r&" , "&b=c&" ) ; } return report ; }
Find the pre - compild catalog report in the jarfile and modify it for use in the the built - in web portal .
328
26
155,477
private static boolean turnOffClientInterface ( ) { // we don't expect this to ever fail, but if it does, skip to dying immediately VoltDBInterface vdbInstance = instance ( ) ; if ( vdbInstance != null ) { ClientInterface ci = vdbInstance . getClientInterface ( ) ; if ( ci != null ) { if ( ! ci . ceaseAllPublicFacingTrafficImmediately ( ) ) { return false ; } } } return true ; }
turn off client interface as fast as possible
101
8
155,478
private static void sendCrashSNMPTrap ( String msg ) { if ( msg == null || msg . trim ( ) . isEmpty ( ) ) { return ; } VoltDBInterface vdbInstance = instance ( ) ; if ( vdbInstance == null ) { return ; } SnmpTrapSender snmp = vdbInstance . getSnmpTrapSender ( ) ; if ( snmp == null ) { return ; } try { snmp . crash ( msg ) ; } catch ( Throwable t ) { VoltLogger log = new VoltLogger ( "HOST" ) ; log . warn ( "failed to issue a crash SNMP trap" , t ) ; } }
send a SNMP trap crash notification
146
7
155,479
public static void crashGlobalVoltDB ( String errMsg , boolean stackTrace , Throwable t ) { // for test code wasCrashCalled = true ; crashMessage = errMsg ; if ( ignoreCrash ) { throw new AssertionError ( "Faux crash of VoltDB successful." ) ; } // end test code // send a snmp trap crash notification sendCrashSNMPTrap ( errMsg ) ; try { // turn off client interface as fast as possible // we don't expect this to ever fail, but if it does, skip to dying immediately if ( ! turnOffClientInterface ( ) ) { return ; // this will jump to the finally block and die faster } // instruct the rest of the cluster to die instance ( ) . getHostMessenger ( ) . sendPoisonPill ( errMsg ) ; // give the pill a chance to make it through the network buffer Thread . sleep ( 500 ) ; } catch ( Exception e ) { e . printStackTrace ( ) ; // sleep even on exception in case the pill got sent before the exception try { Thread . sleep ( 500 ) ; } catch ( InterruptedException e2 ) { } } // finally block does its best to ensure death, no matter what context this // is called in finally { crashLocalVoltDB ( errMsg , stackTrace , t ) ; } }
Exit the process with an error message optionally with a stack trace . Also notify all connected peers that the node is going down .
283
25
155,480
public static void main ( String [ ] args ) { //Thread.setDefaultUncaughtExceptionHandler(new VoltUncaughtExceptionHandler()); Configuration config = new Configuration ( args ) ; try { if ( ! config . validate ( ) ) { System . exit ( - 1 ) ; } else { if ( config . m_startAction == StartAction . GET ) { cli ( config ) ; } else { initialize ( config ) ; instance ( ) . run ( ) ; } } } catch ( OutOfMemoryError e ) { String errmsg = "VoltDB Main thread: ran out of Java memory. This node will shut down." ; VoltDB . crashLocalVoltDB ( errmsg , false , e ) ; } }
Entry point for the VoltDB server process .
154
9
155,481
public static String getDefaultReplicationInterface ( ) { if ( m_config . m_drInterface == null || m_config . m_drInterface . isEmpty ( ) ) { if ( m_config . m_externalInterface == null ) { return "" ; } else { return m_config . m_externalInterface ; } } else { return m_config . m_drInterface ; } }
Selects the a specified m_drInterface over a specified m_externalInterface from m_config
85
20
155,482
public void removeAllZeros ( ) { Iterator < Map . Entry < K , AtomicLong > > entryIterator = map . entrySet ( ) . iterator ( ) ; while ( entryIterator . hasNext ( ) ) { Map . Entry < K , AtomicLong > entry = entryIterator . next ( ) ; AtomicLong atomic = entry . getValue ( ) ; if ( atomic != null && atomic . get ( ) == 0L ) { entryIterator . remove ( ) ; } } }
Removes all mappings from this map whose values are zero .
102
13
155,483
public CompletableFuture < ClientResponseWithPartitionKey [ ] > callAllPartitionProcedure ( String procedureName , Object ... params ) { return m_runner . callAllPartitionProcedure ( procedureName , params ) ; }
A version of the similar API from VoltDB clients but for for non - transactional procedures . Runs a single - partition procedure on every partition that exists at the time it is called .
52
37
155,484
public static ByteBuffer getTableDataReference ( VoltTable vt ) { ByteBuffer buf = vt . m_buffer . duplicate ( ) ; buf . rewind ( ) ; return buf ; }
End users should not call this method . Obtain a reference to the table s underlying buffer . The returned reference s position and mark are independent of the table s buffer position and mark . The returned buffer has no mark and is at position 0 .
41
49
155,485
private long reserveNextTicket ( double requiredPermits , long nowMicros ) { resync ( nowMicros ) ; long microsToNextFreeTicket = Math . max ( 0 , nextFreeTicketMicros - nowMicros ) ; double storedPermitsToSpend = Math . min ( requiredPermits , this . storedPermits ) ; double freshPermits = requiredPermits - storedPermitsToSpend ; long waitMicros = storedPermitsToWaitTime ( this . storedPermits , storedPermitsToSpend ) + ( long ) ( freshPermits * stableIntervalMicros ) ; this . nextFreeTicketMicros = nextFreeTicketMicros + waitMicros ; this . storedPermits -= storedPermitsToSpend ; return microsToNextFreeTicket ; }
Reserves next ticket and returns the wait time that the caller must wait for .
176
16
155,486
public Options addOptionGroup ( OptionGroup group ) { if ( group . isRequired ( ) ) { requiredOpts . add ( group ) ; } for ( Option option : group . getOptions ( ) ) { // an Option cannot be required if it is in an // OptionGroup, either the group is required or // nothing is required option . setRequired ( false ) ; addOption ( option ) ; optionGroups . put ( option . getKey ( ) , group ) ; } return this ; }
Add the specified option group .
104
6
155,487
public Options addOption ( String opt , String description ) { addOption ( opt , null , false , description ) ; return this ; }
Add an option that only contains a short name . The option does not take an argument .
28
18
155,488
public Options addOption ( String opt , boolean hasArg , String description ) { addOption ( opt , null , hasArg , description ) ; return this ; }
Add an option that only contains a short - name . It may be specified as requiring an argument .
33
20
155,489
public Options addOption ( String opt , String longOpt , boolean hasArg , String description ) { addOption ( new Option ( opt , longOpt , hasArg , description ) ) ; return this ; }
Add an option that contains a short - name and a long - name . It may be specified as requiring an argument .
42
24
155,490
public static VoltXMLElement mergeTwoElementsUsingOperator ( String opName , String opElementId , VoltXMLElement first , VoltXMLElement second ) { if ( first == null || second == null ) { return first == null ? second : first ; } if ( opName == null || opElementId == null ) { return null ; } VoltXMLElement retval = new VoltXMLElement ( "operation" ) ; retval . attributes . put ( "id" , opElementId ) ; retval . attributes . put ( "optype" , opName ) ; retval . children . add ( first ) ; retval . children . add ( second ) ; return retval ; }
If one of the elements is null return the other one diectly .
150
15
155,491
public static List < VoltXMLElement > buildLimitElements ( int limit , String limitValueElementId ) { if ( limitValueElementId == null ) { return null ; } List < VoltXMLElement > retval = new ArrayList < VoltXMLElement > ( ) ; retval . add ( new VoltXMLElement ( "offset" ) ) ; VoltXMLElement limitElement = new VoltXMLElement ( "limit" ) ; String strLimit = String . valueOf ( limit ) ; limitElement . attributes . put ( "limit" , strLimit ) ; limitElement . children . add ( buildValueElement ( limitValueElementId , false , strLimit , "BIGINT" ) ) ; retval . add ( limitElement ) ; return retval ; }
Build VoltXMLElement for expression like LIMIT 1 .
165
12
155,492
public static VoltXMLElement buildColumnParamJoincondElement ( String opName , VoltXMLElement leftElement , String valueParamElementId , String opElementId ) { VoltXMLElement valueParamElement = buildValueElement ( valueParamElementId ) ; return mergeTwoElementsUsingOperator ( opName , opElementId , leftElement , valueParamElement ) ; }
Build VoltXMLElement for expression like column = ? .
80
12
155,493
public static VoltXMLElement buildParamElement ( String elementId , String index , String valueType ) { VoltXMLElement retval = new VoltXMLElement ( "parameter" ) ; retval . attributes . put ( "id" , elementId ) ; retval . attributes . put ( "index" , index ) ; retval . attributes . put ( "valuetype" , valueType ) ; return retval ; }
Build an element to be inserted under the parameters tree .
93
11
155,494
@ Override public void loadFromJSONObject ( JSONObject jobj , Database db ) throws JSONException { super . loadFromJSONObject ( jobj , db ) ; m_lookupType = IndexLookupType . get ( jobj . getString ( Members . LOOKUP_TYPE . name ( ) ) ) ; m_sortDirection = SortDirectionType . get ( jobj . getString ( Members . SORT_DIRECTION . name ( ) ) ) ; if ( jobj . has ( Members . HAS_OFFSET_RANK . name ( ) ) ) { m_hasOffsetRankOptimization = jobj . getBoolean ( Members . HAS_OFFSET_RANK . name ( ) ) ; } m_purpose = jobj . has ( Members . PURPOSE . name ( ) ) ? jobj . getInt ( Members . PURPOSE . name ( ) ) : FOR_SCANNING_PERFORMANCE_OR_ORDERING ; m_targetIndexName = jobj . getString ( Members . TARGET_INDEX_NAME . name ( ) ) ; m_catalogIndex = db . getTables ( ) . get ( super . m_targetTableName ) . getIndexes ( ) . get ( m_targetIndexName ) ; // load end_expression m_endExpression = AbstractExpression . fromJSONChild ( jobj , Members . END_EXPRESSION . name ( ) , m_tableScan ) ; // load initial_expression m_initialExpression = AbstractExpression . fromJSONChild ( jobj , Members . INITIAL_EXPRESSION . name ( ) , m_tableScan ) ; // load searchkey_expressions AbstractExpression . loadFromJSONArrayChild ( m_searchkeyExpressions , jobj , Members . SEARCHKEY_EXPRESSIONS . name ( ) , m_tableScan ) ; // load COMPARE_NOTDISTINCT flag vector loadBooleanArrayFromJSONObject ( jobj , Members . COMPARE_NOTDISTINCT . name ( ) , m_compareNotDistinct ) ; // load skip_null_predicate m_skip_null_predicate = AbstractExpression . fromJSONChild ( jobj , Members . SKIP_NULL_PREDICATE . name ( ) , m_tableScan ) ; }
all members loaded
502
3
155,495
public boolean isPredicatesOptimizableForAggregate ( ) { // for reverse scan, need to examine "added" predicates List < AbstractExpression > predicates = ExpressionUtil . uncombinePredicate ( m_predicate ) ; // if the size of predicates doesn't equal 1, can't be our added artifact predicates if ( predicates . size ( ) != 1 ) { return false ; } // examin the possible "added" predicates: NOT NULL expr. AbstractExpression expr = predicates . get ( 0 ) ; if ( expr . getExpressionType ( ) != ExpressionType . OPERATOR_NOT ) { return false ; } if ( expr . getLeft ( ) . getExpressionType ( ) != ExpressionType . OPERATOR_IS_NULL ) { return false ; } // Not reverse scan. if ( m_lookupType != IndexLookupType . LT && m_lookupType != IndexLookupType . LTE ) { return false ; } return true ; }
added for reverse scan purpose only
213
6
155,496
private void respondWithDummy ( ) { final FragmentResponseMessage response = new FragmentResponseMessage ( m_fragmentMsg , m_initiator . getHSId ( ) ) ; response . m_sourceHSId = m_initiator . getHSId ( ) ; response . setRecovering ( true ) ; response . setStatus ( FragmentResponseMessage . SUCCESS , null ) ; // Set the dependencies even if this is a dummy response. This site could be the master // on elastic join, so the fragment response message is actually going to the MPI. for ( int frag = 0 ; frag < m_fragmentMsg . getFragmentCount ( ) ; frag ++ ) { final int outputDepId = m_fragmentMsg . getOutputDepId ( frag ) ; response . addDependency ( new DependencyPair . BufferDependencyPair ( outputDepId , m_rawDummyResponse , 0 , m_rawDummyResponse . length ) ) ; } response . setRespBufferable ( m_respBufferable ) ; m_initiator . deliver ( response ) ; }
Respond with a dummy fragment response .
244
8
155,497
public FragmentResponseMessage processFragmentTask ( SiteProcedureConnection siteConnection ) { final FragmentResponseMessage currentFragResponse = new FragmentResponseMessage ( m_fragmentMsg , m_initiator . getHSId ( ) ) ; currentFragResponse . setStatus ( FragmentResponseMessage . SUCCESS , null ) ; for ( int frag = 0 ; frag < m_fragmentMsg . getFragmentCount ( ) ; frag ++ ) { final long fragmentId = VoltSystemProcedure . hashToFragId ( m_fragmentMsg . getPlanHash ( frag ) ) ; // equivalent to dep.depId: // final int outputDepId = m_fragmentMsg.getOutputDepId(frag); final VoltTrace . TraceEventBatch traceLog = VoltTrace . log ( VoltTrace . Category . SPSITE ) ; if ( traceLog != null ) { traceLog . add ( ( ) -> VoltTrace . beginDuration ( "runfragmenttask" , "txnId" , TxnEgo . txnIdToString ( getTxnId ( ) ) , "partition" , Integer . toString ( siteConnection . getCorrespondingPartitionId ( ) ) , "fragmentId" , String . valueOf ( fragmentId ) ) ) ; } ParameterSet params = m_fragmentMsg . getParameterSetForFragment ( frag ) ; try { // run the overloaded sysproc planfragment. pass an empty dependency // set since remote (non-aggregator) fragments don't receive dependencies. final DependencyPair dep = siteConnection . executeSysProcPlanFragment ( m_txnState , m_inputDeps , fragmentId , params ) ; // @Shutdown returns null, handle it here if ( dep != null ) { currentFragResponse . addDependency ( dep ) ; } } catch ( final EEException | SQLException | ReplicatedTableException e ) { hostLog . l7dlog ( Level . TRACE , LogKeys . host_ExecutionSite_ExceptionExecutingPF . name ( ) , new Object [ ] { Encoder . hexEncode ( m_fragmentMsg . getFragmentPlan ( frag ) ) } , e ) ; currentFragResponse . setStatus ( FragmentResponseMessage . UNEXPECTED_ERROR , e ) ; addDependencyToFragment ( currentFragResponse ) ; break ; } catch ( final SerializableException e ) { // Note that with SerializableException, the error code here might get changed before // the client/user sees it. It really just needs to indicate failure. // // Key point here vs the next catch block for VAE is to not wrap the subclass of // SerializableException here to preserve it during the serialization. // currentFragResponse . setStatus ( FragmentResponseMessage . USER_ERROR , e ) ; addDependencyToFragment ( currentFragResponse ) ; break ; } catch ( final VoltAbortException e ) { currentFragResponse . setStatus ( FragmentResponseMessage . USER_ERROR , new SerializableException ( CoreUtils . throwableToString ( e ) ) ) ; addDependencyToFragment ( currentFragResponse ) ; break ; } if ( traceLog != null ) { traceLog . add ( VoltTrace :: endDuration ) ; } } // we should never rollback DR buffer for MP sysprocs because we don't report the DR buffer size and therefore don't know if it is empty or not. currentFragResponse . setDrBufferSize ( 1 ) ; return currentFragResponse ; }
modified to work in the new world
780
7
155,498
@ Override public void run ( ) { try { VoltTable partitionKeys = null ; partitionKeys = m_client . callProcedure ( "@GetPartitionKeys" , "INTEGER" ) . getResults ( ) [ 0 ] ; while ( partitionKeys . advanceRow ( ) ) { m_client . callProcedure ( new NullCallback ( ) , "DeleteOldAdRequests" , partitionKeys . getLong ( "PARTITION_KEY" ) , m_expiredAgeInSeconds ) ; } m_client . callProcedure ( new NullCallback ( ) , "DeleteExpiredBids" ) ; } catch ( IOException | ProcCallException ex ) { ex . printStackTrace ( ) ; } }
Remove aged - out data from the ad_requests table . This table is partitioned and may be large so use the run - everywhere pattern to minimize impact to throughput .
158
35
155,499
public void updateLobUsage ( boolean commit ) { if ( ! hasLobOps ) { return ; } hasLobOps = false ; if ( commit ) { for ( int i = 0 ; i < createdLobs . size ( ) ; i ++ ) { long lobID = createdLobs . get ( i ) ; int delta = lobUsageCount . get ( lobID , 0 ) ; if ( delta == 1 ) { lobUsageCount . remove ( lobID ) ; createdLobs . remove ( i ) ; i -- ; } else if ( ! session . isBatch ) { database . lobManager . adjustUsageCount ( lobID , delta - 1 ) ; lobUsageCount . remove ( lobID ) ; createdLobs . remove ( i ) ; i -- ; } } if ( ! lobUsageCount . isEmpty ( ) ) { Iterator it = lobUsageCount . keySet ( ) . iterator ( ) ; while ( it . hasNext ( ) ) { long lobID = it . nextLong ( ) ; int delta = lobUsageCount . get ( lobID ) ; database . lobManager . adjustUsageCount ( lobID , delta - 1 ) ; } lobUsageCount . clear ( ) ; } return ; } else { for ( int i = 0 ; i < createdLobs . size ( ) ; i ++ ) { long lobID = createdLobs . get ( i ) ; database . lobManager . deleteLob ( lobID ) ; } createdLobs . clear ( ) ; lobUsageCount . clear ( ) ; return ; } }
update LobManager user counts delete lobs that have no usage
330
12