idx
int64
0
165k
question
stringlengths
73
4.15k
target
stringlengths
5
918
len_question
int64
21
890
len_target
int64
3
255
155,000
private static void updatePartialIndex ( IndexScanPlanNode scan ) { if ( scan . getPredicate ( ) == null && scan . getPartialIndexPredicate ( ) != null ) { if ( scan . isForSortOrderOnly ( ) ) { scan . setPredicate ( Collections . singletonList ( scan . getPartialIndexPredicate ( ) ) ) ; } scan . setForPartialIndexOnly ( ) ; } }
Check if the index for the scan node is a partial index and if so make sure that the scan contains index predicate and update index reason as needed for
93
30
155,001
private void calculateIndexGroupByInfo ( IndexScanPlanNode root , IndexGroupByInfo gbInfo ) { String fromTableAlias = root . getTargetTableAlias ( ) ; assert ( fromTableAlias != null ) ; Index index = root . getCatalogIndex ( ) ; if ( ! IndexType . isScannable ( index . getType ( ) ) ) { return ; } ArrayList < AbstractExpression > bindings = new ArrayList <> ( ) ; gbInfo . m_coveredGroupByColumns = calculateGroupbyColumnsCovered ( index , fromTableAlias , bindings ) ; gbInfo . m_canBeFullySerialized = ( gbInfo . m_coveredGroupByColumns . size ( ) == m_parsedSelect . groupByColumns ( ) . size ( ) ) ; }
Sets IndexGroupByInfo for an IndexScan
176
10
155,002
private AbstractPlanNode indexAccessForGroupByExprs ( SeqScanPlanNode root , IndexGroupByInfo gbInfo ) { if ( ! root . isPersistentTableScan ( ) ) { // subquery and common tables are not handled return root ; } String fromTableAlias = root . getTargetTableAlias ( ) ; assert ( fromTableAlias != null ) ; List < ParsedColInfo > groupBys = m_parsedSelect . groupByColumns ( ) ; Table targetTable = m_catalogDb . getTables ( ) . get ( root . getTargetTableName ( ) ) ; assert ( targetTable != null ) ; CatalogMap < Index > allIndexes = targetTable . getIndexes ( ) ; List < Integer > maxCoveredGroupByColumns = new ArrayList <> ( ) ; ArrayList < AbstractExpression > maxCoveredBindings = null ; Index pickedUpIndex = null ; boolean foundAllGroupByCoveredIndex = false ; for ( Index index : allIndexes ) { if ( ! IndexType . isScannable ( index . getType ( ) ) ) { continue ; } if ( ! index . getPredicatejson ( ) . isEmpty ( ) ) { // do not try to look at Partial/Sparse index continue ; } ArrayList < AbstractExpression > bindings = new ArrayList <> ( ) ; List < Integer > coveredGroupByColumns = calculateGroupbyColumnsCovered ( index , fromTableAlias , bindings ) ; if ( coveredGroupByColumns . size ( ) > maxCoveredGroupByColumns . size ( ) ) { maxCoveredGroupByColumns = coveredGroupByColumns ; pickedUpIndex = index ; maxCoveredBindings = bindings ; if ( maxCoveredGroupByColumns . size ( ) == groupBys . size ( ) ) { foundAllGroupByCoveredIndex = true ; break ; } } } if ( pickedUpIndex == null ) { return root ; } IndexScanPlanNode indexScanNode = new IndexScanPlanNode ( root , null , pickedUpIndex , SortDirectionType . INVALID ) ; indexScanNode . setForGroupingOnly ( ) ; indexScanNode . setBindings ( maxCoveredBindings ) ; gbInfo . m_coveredGroupByColumns = maxCoveredGroupByColumns ; gbInfo . m_canBeFullySerialized = foundAllGroupByCoveredIndex ; return indexScanNode ; }
Turn sequential scan to index scan for group by if possible
531
11
155,003
private static void fixDistributedApproxCountDistinct ( AggregatePlanNode distNode , AggregatePlanNode coordNode ) { assert ( distNode != null ) ; assert ( coordNode != null ) ; // Patch up any APPROX_COUNT_DISTINCT on the distributed node. List < ExpressionType > distAggTypes = distNode . getAggregateTypes ( ) ; boolean hasApproxCountDistinct = false ; for ( int i = 0 ; i < distAggTypes . size ( ) ; ++ i ) { ExpressionType et = distAggTypes . get ( i ) ; if ( et == ExpressionType . AGGREGATE_APPROX_COUNT_DISTINCT ) { hasApproxCountDistinct = true ; distNode . updateAggregate ( i , ExpressionType . AGGREGATE_VALS_TO_HYPERLOGLOG ) ; } } if ( hasApproxCountDistinct ) { // Now, patch up any APPROX_COUNT_DISTINCT on the coordinating node. List < ExpressionType > coordAggTypes = coordNode . getAggregateTypes ( ) ; for ( int i = 0 ; i < coordAggTypes . size ( ) ; ++ i ) { ExpressionType et = coordAggTypes . get ( i ) ; if ( et == ExpressionType . AGGREGATE_APPROX_COUNT_DISTINCT ) { coordNode . updateAggregate ( i , ExpressionType . AGGREGATE_HYPERLOGLOGS_TO_CARD ) ; } } } }
This function is called once it s been determined that we can push down an aggregation plan node .
333
19
155,004
protected AbstractPlanNode checkLimitPushDownViability ( AbstractPlanNode root ) { AbstractPlanNode receiveNode = root ; List < ParsedColInfo > orderBys = m_parsedSelect . orderByColumns ( ) ; boolean orderByCoversAllGroupBy = m_parsedSelect . groupByIsAnOrderByPermutation ( ) ; while ( ! ( receiveNode instanceof ReceivePlanNode ) ) { // Limitation: can only push past some nodes (see above comment) // Delete the aggregate node case to handle ENG-6485, // or say we don't push down meeting aggregate node // TODO: We might want to optimize/push down "limit" for some cases if ( ! ( receiveNode instanceof OrderByPlanNode ) && ! ( receiveNode instanceof ProjectionPlanNode ) && ! isValidAggregateNodeForLimitPushdown ( receiveNode , orderBys , orderByCoversAllGroupBy ) ) { return null ; } if ( receiveNode instanceof OrderByPlanNode ) { // if grouping by the partition key, // limit can still push down if ordered by aggregate values. if ( ! m_parsedSelect . hasPartitionColumnInGroupby ( ) && isOrderByAggregationValue ( m_parsedSelect . orderByColumns ( ) ) ) { return null ; } } // Traverse... if ( receiveNode . getChildCount ( ) == 0 ) { return null ; } // nothing that allows pushing past has multiple inputs assert ( receiveNode . getChildCount ( ) == 1 ) ; receiveNode = receiveNode . getChild ( 0 ) ; } return receiveNode . getChild ( 0 ) ; }
Check if we can push the limit node down .
357
10
155,005
private static Set < String > getIndexedColumnSetForTable ( Table table ) { HashSet < String > columns = new HashSet <> ( ) ; for ( Index index : table . getIndexes ( ) ) { for ( ColumnRef colRef : index . getColumns ( ) ) { columns . add ( colRef . getColumn ( ) . getTypeName ( ) ) ; } } return columns ; }
Get the unique set of names of all columns that are part of an index on the given table .
88
20
155,006
private static boolean isNullRejecting ( Collection < String > tableAliases , List < AbstractExpression > exprs ) { for ( AbstractExpression expr : exprs ) { for ( String tableAlias : tableAliases ) { if ( ExpressionUtil . isNullRejectingExpression ( expr , tableAlias ) ) { // We are done at this level return true ; } } } return false ; }
Verify if an expression from the input list is NULL - rejecting for any of the tables from the list
87
21
155,007
private static List < AbstractExpression > findBindingsForOneIndexedExpression ( ExpressionOrColumn nextStatementEOC , ExpressionOrColumn indexEntry ) { assert ( nextStatementEOC . m_expr != null ) ; AbstractExpression nextStatementExpr = nextStatementEOC . m_expr ; if ( indexEntry . m_colRef != null ) { ColumnRef indexColRef = indexEntry . m_colRef ; // This is a column. So try to match it // with the expression in nextStatementEOC. if ( matchExpressionAndColumnRef ( nextStatementExpr , indexColRef , indexEntry . m_tableScan ) ) { return s_reusableImmutableEmptyBinding ; } return null ; } // So, this index entry is an expression. List < AbstractExpression > moreBindings = nextStatementEOC . m_expr . bindingToIndexedExpression ( indexEntry . m_expr ) ; if ( moreBindings != null ) { return moreBindings ; } return null ; }
Match the indexEntry which is from an index with a statement expression or column . The nextStatementEOC must be an expression not a column reference .
219
30
155,008
private static boolean removeExactMatchCoveredExpressions ( AbstractExpression coveringExpr , List < AbstractExpression > exprsToCover ) { boolean hasMatch = false ; Iterator < AbstractExpression > iter = exprsToCover . iterator ( ) ; while ( iter . hasNext ( ) ) { AbstractExpression exprToCover = iter . next ( ) ; if ( coveringExpr . bindingToIndexedExpression ( exprToCover ) != null ) { iter . remove ( ) ; hasMatch = true ; // need to keep going to remove all matches } } return hasMatch ; }
Loop over the expressions to cover to find ones that exactly match the covering expression and remove them from the original list . Returns true if there is at least one match . False otherwise .
126
36
155,009
private static List < AbstractExpression > removeNotNullCoveredExpressions ( StmtTableScan tableScan , List < AbstractExpression > coveringExprs , List < AbstractExpression > exprsToCover ) { // Collect all TVEs from NULL-rejecting covering expressions Set < TupleValueExpression > coveringTves = new HashSet <> ( ) ; for ( AbstractExpression coveringExpr : coveringExprs ) { if ( ExpressionUtil . isNullRejectingExpression ( coveringExpr , tableScan . getTableAlias ( ) ) ) { coveringTves . addAll ( ExpressionUtil . getTupleValueExpressions ( coveringExpr ) ) ; } } // For each NOT NULL expression to cover extract the TVE expressions. If all of them are also part // of the covering NULL-rejecting collection then this NOT NULL expression is covered Iterator < AbstractExpression > iter = exprsToCover . iterator ( ) ; while ( iter . hasNext ( ) ) { AbstractExpression filter = iter . next ( ) ; if ( ExpressionType . OPERATOR_NOT == filter . getExpressionType ( ) ) { assert ( filter . getLeft ( ) != null ) ; if ( ExpressionType . OPERATOR_IS_NULL == filter . getLeft ( ) . getExpressionType ( ) ) { assert ( filter . getLeft ( ) . getLeft ( ) != null ) ; List < TupleValueExpression > tves = ExpressionUtil . getTupleValueExpressions ( filter . getLeft ( ) . getLeft ( ) ) ; if ( coveringTves . containsAll ( tves ) ) { iter . remove ( ) ; } } } } return exprsToCover ; }
Remove NOT NULL expressions that are covered by the NULL - rejecting expressions . For example COL IS NOT NULL is covered by the COL > 0 NULL - rejecting comparison expression .
368
33
155,010
protected static AbstractPlanNode addSendReceivePair ( AbstractPlanNode scanNode ) { SendPlanNode sendNode = new SendPlanNode ( ) ; sendNode . addAndLinkChild ( scanNode ) ; ReceivePlanNode recvNode = new ReceivePlanNode ( ) ; recvNode . addAndLinkChild ( sendNode ) ; return recvNode ; }
Insert a send receive pair above the supplied scanNode .
79
11
155,011
protected static AbstractPlanNode getAccessPlanForTable ( JoinNode tableNode ) { StmtTableScan tableScan = tableNode . getTableScan ( ) ; // Access path to access the data in the table (index/scan/etc). AccessPath path = tableNode . m_currentAccessPath ; assert ( path != null ) ; // if no index, it is a sequential scan if ( path . index == null ) { return getScanAccessPlanForTable ( tableScan , path ) ; } return getIndexAccessPlanForTable ( tableScan , path ) ; }
Given an access path build the single - site or distributed plan that will assess the data from the table according to the path .
119
25
155,012
private static AbstractPlanNode injectIndexedJoinWithMaterializedScan ( AbstractExpression listElements , IndexScanPlanNode scanNode ) { MaterializedScanPlanNode matScan = new MaterializedScanPlanNode ( ) ; assert ( listElements instanceof VectorValueExpression || listElements instanceof ParameterValueExpression ) ; matScan . setRowData ( listElements ) ; matScan . setSortDirection ( scanNode . getSortDirection ( ) ) ; NestLoopIndexPlanNode nlijNode = new NestLoopIndexPlanNode ( ) ; nlijNode . setJoinType ( JoinType . INNER ) ; nlijNode . addInlinePlanNode ( scanNode ) ; nlijNode . addAndLinkChild ( matScan ) ; // resolve the sort direction nlijNode . resolveSortDirection ( ) ; return nlijNode ; }
Generate a plan for an IN - LIST - driven index scan
189
13
155,013
private static void replaceInListFilterWithEqualityFilter ( List < AbstractExpression > endExprs , AbstractExpression inListRhs , AbstractExpression equalityRhs ) { for ( AbstractExpression comparator : endExprs ) { AbstractExpression otherExpr = comparator . getRight ( ) ; if ( otherExpr == inListRhs ) { endExprs . remove ( comparator ) ; AbstractExpression replacement = new ComparisonExpression ( ExpressionType . COMPARE_EQUAL , comparator . getLeft ( ) , equalityRhs ) ; endExprs . add ( replacement ) ; break ; } } }
with an equality filter referencing the second given rhs .
139
11
155,014
CallEvent [ ] makeRandomEvent ( ) { long callId = ++ lastCallIdUsed ; // get agentid Integer agentId = agentsAvailable . poll ( ) ; if ( agentId == null ) { return null ; } // get phone number Long phoneNo = phoneNumbersAvailable . poll ( ) ; assert ( phoneNo != null ) ; // voltdb timestamp type uses micros from epoch Date startTS = new Date ( currentSystemMilliTimestamp ) ; long durationms = - 1 ; long meancalldurationms = config . meancalldurationseconds * 1000 ; long maxcalldurationms = config . maxcalldurationseconds * 1000 ; double stddev = meancalldurationms / 2.0 ; // repeat until in the range (0..maxcalldurationms] while ( ( durationms <= 0 ) || ( durationms > maxcalldurationms ) ) { durationms = ( long ) ( rand . nextGaussian ( ) * stddev ) + meancalldurationms ; } Date endTS = new Date ( startTS . getTime ( ) + durationms ) ; CallEvent [ ] event = new CallEvent [ 2 ] ; event [ 0 ] = new CallEvent ( callId , agentId , phoneNo , startTS , null ) ; event [ 1 ] = new CallEvent ( callId , agentId , phoneNo , null , endTS ) ; // some debugging code //System.out.println("Creating event with range:"); //System.out.println(new Date(startTS.getTime() / 1000)); //System.out.println(new Date(endTS.getTime() / 1000)); return event ; }
Generate a random call event with a duration .
359
10
155,015
@ Override public CallEvent next ( long systemCurrentTimeMillis ) { // check for time passing if ( systemCurrentTimeMillis > currentSystemMilliTimestamp ) { // build a target for this 1ms window long eventBacklog = targetEventsThisMillisecond - eventsSoFarThisMillisecond ; targetEventsThisMillisecond = ( long ) Math . floor ( targetEventsPerMillisecond ) ; double targetFraction = targetEventsPerMillisecond - targetEventsThisMillisecond ; targetEventsThisMillisecond += ( rand . nextDouble ( ) <= targetFraction ) ? 1 : 0 ; targetEventsThisMillisecond += eventBacklog ; // reset counter for this 1ms window eventsSoFarThisMillisecond = 0 ; currentSystemMilliTimestamp = systemCurrentTimeMillis ; } // drain scheduled events first CallEvent callEvent = delayedEvents . nextReady ( systemCurrentTimeMillis ) ; if ( callEvent != null ) { // double check this is an end event assert ( callEvent . startTS == null ) ; assert ( callEvent . endTS != null ) ; // return the agent/phone for this event to the available lists agentsAvailable . add ( callEvent . agentId ) ; phoneNumbersAvailable . add ( callEvent . phoneNo ) ; validate ( ) ; return callEvent ; } // check if we made all the target events for this 1ms window if ( targetEventsThisMillisecond == eventsSoFarThisMillisecond ) { validate ( ) ; return null ; } // generate rando event (begin/end pair) CallEvent [ ] event = makeRandomEvent ( ) ; // this means all agents are busy if ( event == null ) { validate ( ) ; return null ; } // schedule the end event long endTimeKey = event [ 1 ] . endTS . getTime ( ) ; assert ( ( endTimeKey - systemCurrentTimeMillis ) < ( config . maxcalldurationseconds * 1000 ) ) ; delayedEvents . add ( endTimeKey , event [ 1 ] ) ; eventsSoFarThisMillisecond ++ ; validate ( ) ; return event [ 0 ] ; }
Return the next call event that is safe for delivery or null if there are no safe objects to deliver .
452
21
155,016
private void validate ( ) { long delayedEventCount = delayedEvents . size ( ) ; long outstandingAgents = config . agents - agentsAvailable . size ( ) ; long outstandingPhones = config . numbers - phoneNumbersAvailable . size ( ) ; if ( outstandingAgents != outstandingPhones ) { throw new RuntimeException ( String . format ( "outstandingAgents (%d) != outstandingPhones (%d)" , outstandingAgents , outstandingPhones ) ) ; } if ( outstandingAgents != delayedEventCount ) { throw new RuntimeException ( String . format ( "outstandingAgents (%d) != delayedEventCount (%d)" , outstandingAgents , delayedEventCount ) ) ; } }
Smoke check on validity of data structures . This was useful while getting the code right for this class but it doesn t do much now unless the code needs changes .
142
33
155,017
void printSummary ( ) { System . out . printf ( "There are %d agents outstanding and %d phones. %d entries waiting to go.\n" , agentsAvailable . size ( ) , phoneNumbersAvailable . size ( ) , delayedEvents . size ( ) ) ; }
Debug statement to help users verify there are no lost or delayed events .
59
14
155,018
private boolean isNegativeNumber ( String token ) { try { Double . parseDouble ( token ) ; return true ; } catch ( NumberFormatException e ) { return false ; } }
Check if the token is a negative number .
38
9
155,019
private boolean isLongOption ( String token ) { if ( ! token . startsWith ( "-" ) || token . length ( ) == 1 ) { return false ; } int pos = token . indexOf ( "=" ) ; String t = pos == - 1 ? token : token . substring ( 0 , pos ) ; if ( ! options . getMatchingOptions ( t ) . isEmpty ( ) ) { // long or partial long options (--L, -L, --L=V, -L=V, --l, --l=V) return true ; } else if ( getLongPrefix ( token ) != null && ! token . startsWith ( "--" ) ) { // -LV return true ; } return false ; }
Tells if the token looks like a long option .
157
11
155,020
private void handleUnknownToken ( String token ) throws ParseException { if ( token . startsWith ( "-" ) && token . length ( ) > 1 && ! stopAtNonOption ) { throw new UnrecognizedOptionException ( "Unrecognized option: " + token , token ) ; } cmd . addArg ( token ) ; if ( stopAtNonOption ) { skipParsing = true ; } }
Handles an unknown token . If the token starts with a dash an UnrecognizedOptionException is thrown . Otherwise the token is added to the arguments of the command line . If the stopAtNonOption flag is set this stops the parsing and the remaining tokens are added as - is in the arguments of the command line .
87
65
155,021
public String getText ( ) { if ( sqlTextStr == null ) { sqlTextStr = new String ( sqlText , Constants . UTF8ENCODING ) ; } return sqlTextStr ; }
Get the text of the SQL statement represented .
43
9
155,022
public static String canonicalizeStmt ( String stmtStr ) { // Cleanup whitespace newlines and adding semicolon for catalog compatibility stmtStr = stmtStr . replaceAll ( "\n" , " " ) ; stmtStr = stmtStr . trim ( ) ; if ( ! stmtStr . endsWith ( ";" ) ) { stmtStr += ";" ; } return stmtStr ; }
use the same statement to compute crc .
89
9
155,023
private static boolean [ ] createSafeOctets ( String safeChars ) { int maxChar = - 1 ; char [ ] safeCharArray = safeChars . toCharArray ( ) ; for ( char c : safeCharArray ) { maxChar = Math . max ( c , maxChar ) ; } boolean [ ] octets = new boolean [ maxChar + 1 ] ; for ( char c : safeCharArray ) { octets [ c ] = true ; } return octets ; }
Creates a boolean array with entries corresponding to the character values specified in safeChars set to true . The array is as small as is required to hold the given character information .
103
36
155,024
String getTableName ( ) { if ( opType == OpTypes . MULTICOLUMN ) { return tableName ; } if ( opType == OpTypes . COLUMN ) { if ( rangeVariable == null ) { return tableName ; } return rangeVariable . getTable ( ) . getName ( ) . name ; } return "" ; }
Returns the table name for a column expression as a string
75
11
155,025
VoltXMLElement voltAnnotateColumnXML ( VoltXMLElement exp ) { if ( tableName != null ) { if ( rangeVariable != null && rangeVariable . rangeTable != null && rangeVariable . tableAlias != null && rangeVariable . rangeTable . tableType == TableBase . SYSTEM_SUBQUERY ) { exp . attributes . put ( "table" , rangeVariable . tableAlias . name . toUpperCase ( ) ) ; } else { exp . attributes . put ( "table" , tableName . toUpperCase ( ) ) ; } } exp . attributes . put ( "column" , columnName . toUpperCase ( ) ) ; if ( ( alias == null ) || ( getAlias ( ) . length ( ) == 0 ) ) { exp . attributes . put ( "alias" , columnName . toUpperCase ( ) ) ; } if ( rangeVariable != null && rangeVariable . tableAlias != null ) { exp . attributes . put ( "tablealias" , rangeVariable . tableAlias . name . toUpperCase ( ) ) ; } exp . attributes . put ( "index" , Integer . toString ( columnIndex ) ) ; return exp ; }
VoltDB added method to provide detail for a non - catalog - dependent representation of this HSQLDB object .
256
23
155,026
void parseTablesAndParams ( VoltXMLElement root ) { // Parse parameters first to satisfy a dependency of expression parsing // which happens during table scan parsing. parseParameters ( root ) ; parseCommonTableExpressions ( root ) ; for ( VoltXMLElement node : root . children ) { if ( node . name . equalsIgnoreCase ( "tablescan" ) ) { parseTable ( node ) ; } else if ( node . name . equalsIgnoreCase ( "tablescans" ) ) { parseTables ( node ) ; } } }
Parse tables and parameters .
120
6
155,027
private AbstractExpression parseExpressionNode ( VoltXMLElement exprNode ) { String elementName = exprNode . name . toLowerCase ( ) ; XMLElementExpressionParser parser = m_exprParsers . get ( elementName ) ; if ( parser == null ) { throw new PlanningErrorException ( "Unsupported expression node '" + elementName + "'" ) ; } AbstractExpression retval = parser . parse ( this , exprNode ) ; assert ( "asterisk" . equals ( elementName ) || retval != null ) ; return retval ; }
Given a VoltXMLElement expression node translate it into an AbstractExpression . This is mostly a lookup in the table m_exprParsers .
123
31
155,028
private AbstractExpression parseVectorExpression ( VoltXMLElement exprNode ) { ArrayList < AbstractExpression > args = new ArrayList <> ( ) ; for ( VoltXMLElement argNode : exprNode . children ) { assert ( argNode != null ) ; // recursively parse each argument subtree (could be any kind of expression). AbstractExpression argExpr = parseExpressionNode ( argNode ) ; assert ( argExpr != null ) ; args . add ( argExpr ) ; } VectorValueExpression vve = new VectorValueExpression ( ) ; vve . setValueType ( VoltType . VOLTTABLE ) ; vve . setArgs ( args ) ; return vve ; }
Parse a Vector value for SQL - IN
153
9
155,029
private SelectSubqueryExpression parseSubqueryExpression ( VoltXMLElement exprNode ) { assert ( exprNode . children . size ( ) == 1 ) ; VoltXMLElement subqueryElmt = exprNode . children . get ( 0 ) ; AbstractParsedStmt subqueryStmt = parseSubquery ( subqueryElmt ) ; // add table to the query cache String withoutAlias = null ; StmtSubqueryScan stmtSubqueryScan = addSubqueryToStmtCache ( subqueryStmt , withoutAlias ) ; // Set to the default SELECT_SUBQUERY. May be overridden depending on the context return new SelectSubqueryExpression ( ExpressionType . SELECT_SUBQUERY , stmtSubqueryScan ) ; }
Parse an expression subquery
161
6
155,030
private StmtTableScan resolveStmtTableScanByAlias ( String tableAlias ) { StmtTableScan tableScan = getStmtTableScanByAlias ( tableAlias ) ; if ( tableScan != null ) { return tableScan ; } if ( m_parentStmt != null ) { // This may be a correlated subquery return m_parentStmt . resolveStmtTableScanByAlias ( tableAlias ) ; } return null ; }
Return StmtTableScan by table alias . In case of correlated queries may need to walk up the statement tree .
93
23
155,031
protected StmtTableScan addTableToStmtCache ( Table table , String tableAlias ) { // Create an index into the query Catalog cache StmtTableScan tableScan = m_tableAliasMap . get ( tableAlias ) ; if ( tableScan == null ) { tableScan = new StmtTargetTableScan ( table , tableAlias , m_stmtId ) ; m_tableAliasMap . put ( tableAlias , tableScan ) ; } return tableScan ; }
Add a table to the statement cache .
99
8
155,032
protected StmtSubqueryScan addSubqueryToStmtCache ( AbstractParsedStmt subquery , String tableAlias ) { assert ( subquery != null ) ; // If there is no usable alias because the subquery is inside an expression, // generate a unique one for internal use. if ( tableAlias == null ) { tableAlias = AbstractParsedStmt . TEMP_TABLE_NAME + "_" + subquery . m_stmtId ; } StmtSubqueryScan subqueryScan = new StmtSubqueryScan ( subquery , tableAlias , m_stmtId ) ; StmtTableScan prior = m_tableAliasMap . put ( tableAlias , subqueryScan ) ; assert ( prior == null ) ; return subqueryScan ; }
Add a sub - query to the statement cache .
163
10
155,033
private StmtTargetTableScan addSimplifiedSubqueryToStmtCache ( StmtSubqueryScan subqueryScan , StmtTargetTableScan tableScan ) { String tableAlias = subqueryScan . getTableAlias ( ) ; assert ( tableAlias != null ) ; // It is guaranteed by the canSimplifySubquery that there is // one and only one TABLE in the subquery's FROM clause. Table promotedTable = tableScan . getTargetTable ( ) ; StmtTargetTableScan promotedScan = new StmtTargetTableScan ( promotedTable , tableAlias , m_stmtId ) ; // Keep the original subquery scan to be able to tie the parent // statement column/table names and aliases to the table's. promotedScan . setOriginalSubqueryScan ( subqueryScan ) ; // Replace the subquery scan with the table scan StmtTableScan prior = m_tableAliasMap . put ( tableAlias , promotedScan ) ; assert ( prior == subqueryScan ) ; m_tableList . add ( promotedTable ) ; return promotedScan ; }
Replace an existing subquery scan with its underlying table scan . The subquery has already passed all the checks from the canSimplifySubquery method . Subquery ORDER BY clause is ignored if such exists .
224
43
155,034
protected AbstractExpression replaceExpressionsWithPve ( AbstractExpression expr ) { assert ( expr != null ) ; if ( expr instanceof TupleValueExpression ) { int paramIdx = ParameterizationInfo . getNextParamIndex ( ) ; ParameterValueExpression pve = new ParameterValueExpression ( paramIdx , expr ) ; m_parameterTveMap . put ( paramIdx , expr ) ; return pve ; } if ( expr instanceof AggregateExpression ) { int paramIdx = ParameterizationInfo . getNextParamIndex ( ) ; ParameterValueExpression pve = new ParameterValueExpression ( paramIdx , expr ) ; // Disallow aggregation of parent columns in a subquery. // except the case HAVING AGG(T1.C1) IN (SELECT T2.C2 ...) List < TupleValueExpression > tves = ExpressionUtil . getTupleValueExpressions ( expr ) ; assert ( m_parentStmt != null ) ; for ( TupleValueExpression tve : tves ) { int origId = tve . getOrigStmtId ( ) ; if ( m_stmtId != origId && m_parentStmt . m_stmtId != origId ) { throw new PlanningErrorException ( "Subqueries do not support aggregation of parent statement columns" ) ; } } m_parameterTveMap . put ( paramIdx , expr ) ; return pve ; } if ( expr . getLeft ( ) != null ) { expr . setLeft ( replaceExpressionsWithPve ( expr . getLeft ( ) ) ) ; } if ( expr . getRight ( ) != null ) { expr . setRight ( replaceExpressionsWithPve ( expr . getRight ( ) ) ) ; } if ( expr . getArgs ( ) != null ) { List < AbstractExpression > newArgs = new ArrayList <> ( ) ; for ( AbstractExpression argument : expr . getArgs ( ) ) { newArgs . add ( replaceExpressionsWithPve ( argument ) ) ; } expr . setArgs ( newArgs ) ; } return expr ; }
Helper method to replace all TVEs and aggregated expressions with the corresponding PVEs . The original expressions are placed into the map to be propagated to the EE . The key to the map is the parameter index .
465
44
155,035
private StmtCommonTableScan resolveCommonTableByName ( String tableName , String tableAlias ) { StmtCommonTableScan answer = null ; StmtCommonTableScanShared scan = null ; for ( AbstractParsedStmt scope = this ; scope != null && scan == null ; scope = scope . getParentStmt ( ) ) { scan = scope . getCommonTableByName ( tableName ) ; } if ( scan != null ) { answer = new StmtCommonTableScan ( tableName , tableAlias , scan ) ; } return answer ; }
Look for a common table by name possibly in parent scopes . This is different from resolveStmtTableByAlias in that it looks for common tables and only by name not by alias . Of course a name and an alias are both strings so this is kind of a stylized distinction .
118
58
155,036
protected void parseParameters ( VoltXMLElement root ) { VoltXMLElement paramsNode = null ; for ( VoltXMLElement node : root . children ) { if ( node . name . equalsIgnoreCase ( "parameters" ) ) { paramsNode = node ; break ; } } if ( paramsNode == null ) { return ; } for ( VoltXMLElement node : paramsNode . children ) { if ( node . name . equalsIgnoreCase ( "parameter" ) ) { long id = Long . parseLong ( node . attributes . get ( "id" ) ) ; String typeName = node . attributes . get ( "valuetype" ) ; String isVectorParam = node . attributes . get ( "isvector" ) ; // Get the index for this parameter in the EE's parameter vector String indexAttr = node . attributes . get ( "index" ) ; assert ( indexAttr != null ) ; int index = Integer . parseInt ( indexAttr ) ; VoltType type = VoltType . typeFromString ( typeName ) ; ParameterValueExpression pve = new ParameterValueExpression ( ) ; pve . setParameterIndex ( index ) ; pve . setValueType ( type ) ; if ( isVectorParam != null && isVectorParam . equalsIgnoreCase ( "true" ) ) { pve . setParamIsVector ( ) ; } m_paramsById . put ( id , pve ) ; getParamsByIndex ( ) . put ( index , pve ) ; } } }
Populate the statement s paramList from the parameters element . Each parameter has an id and an index both of which are numeric . It also has a type and an indication of whether it s a vector parameter . For each parameter we create a ParameterValueExpression named pve which holds the type and vector parameter indication . We add the pve to two maps m_paramsById and m_paramsByIndex .
328
84
155,037
protected void promoteUnionParametersFromChild ( AbstractParsedStmt childStmt ) { getParamsByIndex ( ) . putAll ( childStmt . getParamsByIndex ( ) ) ; m_parameterTveMap . putAll ( childStmt . m_parameterTveMap ) ; }
in the EE .
68
4
155,038
HashMap < AbstractExpression , Set < AbstractExpression > > analyzeValueEquivalence ( ) { // collect individual where/join expressions m_joinTree . analyzeJoinExpressions ( m_noTableSelectionList ) ; return m_joinTree . getAllEquivalenceFilters ( ) ; }
Collect value equivalence expressions across the entire SQL statement
65
10
155,039
protected Table getTableFromDB ( String tableName ) { Table table = m_db . getTables ( ) . getExact ( tableName ) ; return table ; }
Look up a table by name . This table may be stored in the local catalog or else the global catalog .
37
22
155,040
private AbstractExpression parseTableCondition ( VoltXMLElement tableScan , String joinOrWhere ) { AbstractExpression condExpr = null ; for ( VoltXMLElement childNode : tableScan . children ) { if ( ! childNode . name . equalsIgnoreCase ( joinOrWhere ) ) { continue ; } assert ( childNode . children . size ( ) == 1 ) ; assert ( condExpr == null ) ; condExpr = parseConditionTree ( childNode . children . get ( 0 ) ) ; assert ( condExpr != null ) ; ExpressionUtil . finalizeValueTypes ( condExpr ) ; condExpr = ExpressionUtil . evaluateExpression ( condExpr ) ; // If the condition is a trivial CVE(TRUE) (after the evaluation) simply drop it if ( ConstantValueExpression . isBooleanTrue ( condExpr ) ) { condExpr = null ; } } return condExpr ; }
Parse a where or join clause . This behavior is common to all kinds of statements .
201
18
155,041
LimitPlanNode limitPlanNodeFromXml ( VoltXMLElement limitXml , VoltXMLElement offsetXml ) { if ( limitXml == null && offsetXml == null ) { return null ; } String node ; long limitParameterId = - 1 ; long offsetParameterId = - 1 ; long limit = - 1 ; long offset = 0 ; if ( limitXml != null ) { // Parse limit if ( ( node = limitXml . attributes . get ( "limit_paramid" ) ) != null ) { limitParameterId = Long . parseLong ( node ) ; } else { assert ( limitXml . children . size ( ) == 1 ) ; VoltXMLElement valueNode = limitXml . children . get ( 0 ) ; String isParam = valueNode . attributes . get ( "isparam" ) ; if ( ( isParam != null ) && ( isParam . equalsIgnoreCase ( "true" ) ) ) { limitParameterId = Long . parseLong ( valueNode . attributes . get ( "id" ) ) ; } else { node = limitXml . attributes . get ( "limit" ) ; assert ( node != null ) ; limit = Long . parseLong ( node ) ; } } } if ( offsetXml != null ) { // Parse offset if ( ( node = offsetXml . attributes . get ( "offset_paramid" ) ) != null ) { offsetParameterId = Long . parseLong ( node ) ; } else { if ( offsetXml . children . size ( ) == 1 ) { VoltXMLElement valueNode = offsetXml . children . get ( 0 ) ; String isParam = valueNode . attributes . get ( "isparam" ) ; if ( ( isParam != null ) && ( isParam . equalsIgnoreCase ( "true" ) ) ) { offsetParameterId = Long . parseLong ( valueNode . attributes . get ( "id" ) ) ; } else { node = offsetXml . attributes . get ( "offset" ) ; assert ( node != null ) ; offset = Long . parseLong ( node ) ; } } } } // limit and offset can't have both value and parameter if ( limit != - 1 ) assert limitParameterId == - 1 : "Parsed value and param. limit." ; if ( offset != 0 ) assert offsetParameterId == - 1 : "Parsed value and param. offset." ; LimitPlanNode limitPlanNode = new LimitPlanNode ( ) ; limitPlanNode . setLimit ( ( int ) limit ) ; limitPlanNode . setOffset ( ( int ) offset ) ; limitPlanNode . setLimitParameterIndex ( parameterCountIndexById ( limitParameterId ) ) ; limitPlanNode . setOffsetParameterIndex ( parameterCountIndexById ( offsetParameterId ) ) ; return limitPlanNode ; }
Produce a LimitPlanNode from the given XML
601
10
155,042
protected boolean producesOneRowOutput ( ) { if ( m_tableAliasMap . size ( ) != 1 ) { return false ; } // Get the table. There's only one. StmtTableScan scan = m_tableAliasMap . values ( ) . iterator ( ) . next ( ) ; Table table = getTableFromDB ( scan . getTableName ( ) ) ; // May be sub-query? If can't find the table there's no use to continue. if ( table == null ) { return false ; } // Get all the indexes defined on the table CatalogMap < Index > indexes = table . getIndexes ( ) ; if ( indexes == null || indexes . size ( ) == 0 ) { // no indexes defined on the table return false ; } // Collect value equivalence expression for the SQL statement HashMap < AbstractExpression , Set < AbstractExpression > > valueEquivalence = analyzeValueEquivalence ( ) ; // If no value equivalence filter defined in SQL statement, there's no use to continue if ( valueEquivalence . isEmpty ( ) ) { return false ; } // Collect all tve expressions from value equivalence set which have equivalence // defined to parameterized or constant value expression. // Eg: T.A = ? or T.A = 1 Set < AbstractExpression > parameterizedConstantKeys = new HashSet <> ( ) ; Set < AbstractExpression > valueEquivalenceKeys = valueEquivalence . keySet ( ) ; // get all the keys for ( AbstractExpression key : valueEquivalenceKeys ) { if ( key instanceof TupleValueExpression ) { Set < AbstractExpression > values = valueEquivalence . get ( key ) ; for ( AbstractExpression value : values ) { if ( ( value instanceof ParameterValueExpression ) || ( value instanceof ConstantValueExpression ) ) { TupleValueExpression tve = ( TupleValueExpression ) key ; parameterizedConstantKeys . add ( tve ) ; } } } } // Iterate over the unique indexes defined on the table to check if the unique // index defined on table appears in tve equivalence expression gathered above. for ( Index index : indexes ) { // Perform lookup only on pure column indices which are unique if ( ! index . getUnique ( ) || ! index . getExpressionsjson ( ) . isEmpty ( ) ) { continue ; } Set < AbstractExpression > indexExpressions = new HashSet <> ( ) ; CatalogMap < ColumnRef > indexColRefs = index . getColumns ( ) ; for ( ColumnRef indexColRef : indexColRefs ) { Column col = indexColRef . getColumn ( ) ; TupleValueExpression tve = new TupleValueExpression ( scan . getTableName ( ) , scan . getTableAlias ( ) , col . getName ( ) , col . getName ( ) , col . getIndex ( ) ) ; indexExpressions . add ( tve ) ; } if ( parameterizedConstantKeys . containsAll ( indexExpressions ) ) { return true ; } } return false ; }
row else false
661
3
155,043
public Collection < String > calculateUDFDependees ( ) { List < String > answer = new ArrayList <> ( ) ; Collection < AbstractExpression > fCalls = findAllSubexpressionsOfClass ( FunctionExpression . class ) ; for ( AbstractExpression fCall : fCalls ) { FunctionExpression fexpr = ( FunctionExpression ) fCall ; if ( fexpr . isUserDefined ( ) ) { answer . add ( fexpr . getFunctionName ( ) ) ; } } return answer ; }
Calculate the UDF dependees . These are the UDFs called in an expression in this procedure .
116
23
155,044
public void statementGuaranteesDeterminism ( boolean hasLimitOrOffset , boolean order , String contentDeterminismDetail ) { m_statementHasLimitOrOffset = hasLimitOrOffset ; m_statementIsOrderDeterministic = order ; if ( contentDeterminismDetail != null ) { m_contentDeterminismDetail = contentDeterminismDetail ; } }
Mark the level of result determinism imposed by the statement which can save us from a difficult determination based on the plan graph .
82
25
155,045
private static void setParamIndexes ( BitSet ints , List < AbstractExpression > params ) { for ( AbstractExpression ae : params ) { assert ( ae instanceof ParameterValueExpression ) ; ParameterValueExpression pve = ( ParameterValueExpression ) ae ; int param = pve . getParameterIndex ( ) ; ints . set ( param ) ; } }
sources of bindings IndexScans and IndexCounts .
86
12
155,046
private static int [ ] bitSetToIntVector ( BitSet ints ) { int intCount = ints . cardinality ( ) ; if ( intCount == 0 ) { return null ; } int [ ] result = new int [ intCount ] ; int nextBit = ints . nextSetBit ( 0 ) ; for ( int ii = 0 ; ii < intCount ; ii ++ ) { assert ( nextBit != - 1 ) ; result [ ii ] = nextBit ; nextBit = ints . nextSetBit ( nextBit + 1 ) ; } assert ( nextBit == - 1 ) ; return result ; }
to convert the set bits to their integer indexes .
130
10
155,047
public VoltType [ ] parameterTypes ( ) { if ( m_parameterTypes == null ) { m_parameterTypes = new VoltType [ getParameters ( ) . length ] ; int ii = 0 ; for ( ParameterValueExpression param : getParameters ( ) ) { m_parameterTypes [ ii ++ ] = param . getValueType ( ) ; } } return m_parameterTypes ; }
This is assumed to be called only after parameters has been fully initialized .
87
14
155,048
public static Session newSession ( int dbID , String user , String password , int timeZoneSeconds ) { Database db = ( Database ) databaseIDMap . get ( dbID ) ; if ( db == null ) { return null ; } Session session = db . connect ( user , password , timeZoneSeconds ) ; session . isNetwork = true ; return session ; }
Used by server to open a new session
78
8
155,049
public static Session newSession ( String type , String path , String user , String password , HsqlProperties props , int timeZoneSeconds ) { Database db = getDatabase ( type , path , props ) ; if ( db == null ) { return null ; } return db . connect ( user , password , timeZoneSeconds ) ; }
Used by in - process connections and by Servlet
71
10
155,050
public static synchronized Database lookupDatabaseObject ( String type , String path ) { // A VoltDB extension to work around ENG-6044 /* disabled 14 lines ... Object key = path; HashMap databaseMap; if (type == DatabaseURL.S_FILE) { databaseMap = fileDatabaseMap; key = filePathToKey(path); } else if (type == DatabaseURL.S_RES) { databaseMap = resDatabaseMap; } else if (type == DatabaseURL.S_MEM) { databaseMap = memDatabaseMap; } else { throw (Error.runtimeError( ErrorCode.U_S0500, "DatabaseManager.lookupDatabaseObject()")); } ... disabled 14 lines */ assert ( type == DatabaseURL . S_MEM ) ; java . util . HashMap < String , Database > databaseMap = memDatabaseMap ; String key = path ; // End of VoltDB extension return ( Database ) databaseMap . get ( key ) ; }
Looks up database of a given type and path in the registry . Returns null if there is none .
204
20
155,051
private static synchronized void addDatabaseObject ( String type , String path , Database db ) { // A VoltDB extension to work around ENG-6044 /* disable 15 lines ... Object key = path; HashMap databaseMap; if (type == DatabaseURL.S_FILE) { databaseMap = fileDatabaseMap; key = filePathToKey(path); } else if (type == DatabaseURL.S_RES) { databaseMap = resDatabaseMap; } else if (type == DatabaseURL.S_MEM) { databaseMap = memDatabaseMap; } else { throw Error.runtimeError(ErrorCode.U_S0500, "DatabaseManager.addDatabaseObject()"); } ... disabled 15 lines */ assert ( type == DatabaseURL . S_MEM ) ; java . util . HashMap < String , Database > databaseMap = memDatabaseMap ; String key = path ; // End of VoltDB extension databaseIDMap . put ( db . databaseID , db ) ; databaseMap . put ( key , db ) ; }
Adds a database to the registry . Returns null if there is none .
216
14
155,052
static void removeDatabase ( Database database ) { int dbID = database . databaseID ; String type = database . getType ( ) ; String path = database . getPath ( ) ; // A VoltDB extension to work around ENG-6044 /* disable 2 lines ... Object key = path; HashMap databaseMap; ... disabled 2 lines */ // End of VoltDB extension notifyServers ( database ) ; // A VoltDB extension to work around ENG-6044 /* disable 11 lines ... if (type == DatabaseURL.S_FILE) { databaseMap = fileDatabaseMap; key = filePathToKey(path); } else if (type == DatabaseURL.S_RES) { databaseMap = resDatabaseMap; } else if (type == DatabaseURL.S_MEM) { databaseMap = memDatabaseMap; } else { throw (Error.runtimeError( ErrorCode.U_S0500, "DatabaseManager.lookupDatabaseObject()")); } ... disabled 11 lines */ assert ( type == DatabaseURL . S_MEM ) ; java . util . HashMap < String , Database > databaseMap = memDatabaseMap ; String key = path ; // End of VoltDB extension databaseIDMap . remove ( dbID ) ; databaseMap . remove ( key ) ; if ( databaseIDMap . isEmpty ( ) ) { ValuePool . resetPool ( ) ; } }
Removes the database from registry .
289
7
155,053
private static void deRegisterServer ( Server server , Database db ) { Iterator it = serverMap . values ( ) . iterator ( ) ; for ( ; it . hasNext ( ) ; ) { HashSet databases = ( HashSet ) it . next ( ) ; databases . remove ( db ) ; if ( databases . isEmpty ( ) ) { it . remove ( ) ; } } }
Deregisters a server as serving a given database . Not yet used .
81
16
155,054
private static void registerServer ( Server server , Database db ) { if ( ! serverMap . containsKey ( server ) ) { serverMap . put ( server , new HashSet ( ) ) ; } HashSet databases = ( HashSet ) serverMap . get ( server ) ; databases . add ( db ) ; }
Registers a server as serving a given database .
65
10
155,055
private static void notifyServers ( Database db ) { Iterator it = serverMap . keySet ( ) . iterator ( ) ; for ( ; it . hasNext ( ) ; ) { Server server = ( Server ) it . next ( ) ; HashSet databases = ( HashSet ) serverMap . get ( server ) ; if ( databases . contains ( db ) ) { // A VoltDB extension to disable a package dependency /* disable 2 lines ... server.notify(ServerConstants.SC_DATABASE_SHUTDOWN, db.databaseID); ... disabled 2 lines */ // End of VoltDB extension } } }
Notifies all servers that serve the database that the database has been shutdown .
131
15
155,056
private static String filePathToKey ( String path ) { try { return FileUtil . getDefaultInstance ( ) . canonicalPath ( path ) ; } catch ( Exception e ) { return path ; } }
thrown exception to an HsqlException in the process
43
11
155,057
public static boolean isComment ( String sql ) { Matcher commentMatcher = PAT_SINGLE_LINE_COMMENT . matcher ( sql ) ; return commentMatcher . matches ( ) ; }
Check if a SQL string is a comment .
43
9
155,058
public static String extractDDLToken ( String sql ) { String ddlToken = null ; Matcher ddlMatcher = PAT_ANY_DDL_FIRST_TOKEN . matcher ( sql ) ; if ( ddlMatcher . find ( ) ) { ddlToken = ddlMatcher . group ( 1 ) . toLowerCase ( ) ; } return ddlToken ; }
Get the DDL token if any at the start of this statement .
84
14
155,059
public static String extractDDLTableName ( String sql ) { Matcher matcher = PAT_TABLE_DDL_PREAMBLE . matcher ( sql ) ; if ( matcher . find ( ) ) { return matcher . group ( 2 ) . toLowerCase ( ) ; } return null ; }
Get the table name for a CREATE or DROP DDL statement .
65
15
155,060
public static String checkPermitted ( String sql ) { /* * IMPORTANT: Black-lists are checked first because they know more about * what they don't like about a statement and can provide a better message. * It requires that black-lists patterns be very selective and that they * don't mind seeing statements that wouldn't pass the white-lists. */ //=== Check against blacklists, must not be rejected by any. for ( CheckedPattern cp : BLACKLISTS ) { CheckedPattern . Result result = cp . check ( sql ) ; if ( result . matcher != null ) { return String . format ( "%s, in statement: %s" , result . explanation , sql ) ; } } //=== Check against whitelists, must be accepted by at least one. boolean hadWLMatch = false ; for ( CheckedPattern cp : WHITELISTS ) { if ( cp . matches ( sql ) ) { hadWLMatch = true ; break ; } } if ( ! hadWLMatch ) { return String . format ( "AdHoc DDL contains an unsupported statement: %s" , sql ) ; } // The statement is permitted. return null ; }
Naive filtering for stuff we haven t implemented yet . Hopefully this gets whittled away and eventually disappears .
251
22
155,061
static private boolean matchesStringAtIndex ( char [ ] buf , int index , String str ) { int strLength = str . length ( ) ; if ( index + strLength > buf . length ) { return false ; } for ( int i = 0 ; i < strLength ; ++ i ) { if ( buf [ index + i ] != str . charAt ( i ) ) { return false ; } } return true ; }
Determine if a character buffer contains the specified string a the specified index . Avoids an array index exception if the buffer is too short .
89
29
155,062
private static String removeCStyleComments ( String ddl ) { // Avoid Apache commons StringUtils.join() to minimize client dependencies. StringBuilder sb = new StringBuilder ( ) ; for ( String part : PAT_STRIP_CSTYLE_COMMENTS . split ( ddl ) ) { sb . append ( part ) ; } return sb . toString ( ) ; }
Remove c - style comments from a string aggressively
84
9
155,063
private static ObjectToken findObjectToken ( String objectTypeName ) { if ( objectTypeName != null ) { for ( ObjectToken ot : OBJECT_TOKENS ) { if ( ot . token . equalsIgnoreCase ( objectTypeName ) ) { return ot ; } } } return null ; }
Find information about an object type token if it s a known object type .
64
15
155,064
synchronized void pushPair ( Session session , Object [ ] row1 , Object [ ] row2 ) { if ( maxRowsQueued == 0 ) { trigger . fire ( triggerType , name . name , table . getName ( ) . name , row1 , row2 ) ; return ; } if ( rowsQueued >= maxRowsQueued ) { if ( nowait ) { pendingQueue . removeLast ( ) ; // overwrite last } else { try { wait ( ) ; } catch ( InterruptedException e ) { /* ignore and resume */ } rowsQueued ++ ; } } else { rowsQueued ++ ; } pendingQueue . add ( new TriggerData ( session , row1 , row2 ) ) ; notify ( ) ; // notify pop's wait }
The main thread tells the trigger thread to fire by this call . If this Trigger is not threaded then the fire method is caled immediately and executed by the main thread . Otherwise the row data objects are added to the queue to be used by the Trigger thread .
163
52
155,065
public synchronized CompiledPlan planSqlCore ( String sql , StatementPartitioning partitioning ) { TrivialCostModel costModel = new TrivialCostModel ( ) ; DatabaseEstimates estimates = new DatabaseEstimates ( ) ; CompiledPlan plan = null ; // This try-with-resources block acquires a global lock on all planning // This is required until we figure out how to do parallel planning. try ( QueryPlanner planner = new QueryPlanner ( sql , "PlannerTool" , "PlannerToolProc" , m_database , partitioning , m_hsql , estimates , ! VoltCompiler . DEBUG_MODE , costModel , null , null , DeterminismMode . FASTER , false ) ) { // do the expensive full planning. planner . parse ( ) ; plan = planner . plan ( ) ; assert ( plan != null ) ; } catch ( Exception e ) { /* * Don't log PlanningErrorExceptions or HSQLParseExceptions, as they * are at least somewhat expected. */ String loggedMsg = "" ; if ( ! ( e instanceof PlanningErrorException || e instanceof HSQLParseException ) ) { logException ( e , "Error compiling query" ) ; loggedMsg = " (Stack trace has been written to the log.)" ; } if ( e . getMessage ( ) != null ) { throw new RuntimeException ( "SQL error while compiling query: " + e . getMessage ( ) + loggedMsg , e ) ; } throw new RuntimeException ( "SQL error while compiling query: " + e . toString ( ) + loggedMsg , e ) ; } if ( plan == null ) { throw new RuntimeException ( "Null plan received in PlannerTool.planSql" ) ; } return plan ; }
Stripped down compile that is ONLY used to plan default procedures .
377
14
155,066
private boolean isReadOnlyProcedure ( String pname ) { final Boolean b = m_procedureInfo . get ( ) . get ( pname ) ; if ( b == null ) { return false ; } return b ; }
Check if procedure is readonly?
50
7
155,067
private VoltTable [ ] aggregateProcedureProfileStats ( VoltTable [ ] baseStats ) { if ( baseStats == null || baseStats . length != 1 ) { return baseStats ; } StatsProcProfTable timeTable = new StatsProcProfTable ( ) ; baseStats [ 0 ] . resetRowPosition ( ) ; while ( baseStats [ 0 ] . advanceRow ( ) ) { // Skip non-transactional procedures for some of these rollups until // we figure out how to make them less confusing. // NB: They still show up in the raw PROCEDURE stata. boolean transactional = baseStats [ 0 ] . getLong ( "TRANSACTIONAL" ) == 1 ; if ( ! transactional ) { continue ; } if ( ! baseStats [ 0 ] . getString ( "STATEMENT" ) . equalsIgnoreCase ( "<ALL>" ) ) { continue ; } String pname = baseStats [ 0 ] . getString ( "PROCEDURE" ) ; timeTable . updateTable ( ! isReadOnlyProcedure ( pname ) , baseStats [ 0 ] . getLong ( "TIMESTAMP" ) , pname , baseStats [ 0 ] . getLong ( "PARTITION_ID" ) , baseStats [ 0 ] . getLong ( "INVOCATIONS" ) , baseStats [ 0 ] . getLong ( "MIN_EXECUTION_TIME" ) , baseStats [ 0 ] . getLong ( "MAX_EXECUTION_TIME" ) , baseStats [ 0 ] . getLong ( "AVG_EXECUTION_TIME" ) , baseStats [ 0 ] . getLong ( "FAILURES" ) , baseStats [ 0 ] . getLong ( "ABORTS" ) ) ; } return new VoltTable [ ] { timeTable . sortByAverage ( "EXECUTION_TIME" ) } ; }
Produce PROCEDUREPROFILE aggregation of PROCEDURE subselector
402
15
155,068
private VoltTable [ ] aggregateProcedureInputStats ( VoltTable [ ] baseStats ) { if ( baseStats == null || baseStats . length != 1 ) { return baseStats ; } StatsProcInputTable timeTable = new StatsProcInputTable ( ) ; baseStats [ 0 ] . resetRowPosition ( ) ; while ( baseStats [ 0 ] . advanceRow ( ) ) { // Skip non-transactional procedures for some of these rollups until // we figure out how to make them less confusing. // NB: They still show up in the raw PROCEDURE stata. boolean transactional = baseStats [ 0 ] . getLong ( "TRANSACTIONAL" ) == 1 ; if ( ! transactional ) { continue ; } if ( ! baseStats [ 0 ] . getString ( "STATEMENT" ) . equalsIgnoreCase ( "<ALL>" ) ) { continue ; } String pname = baseStats [ 0 ] . getString ( "PROCEDURE" ) ; timeTable . updateTable ( ! isReadOnlyProcedure ( pname ) , pname , baseStats [ 0 ] . getLong ( "PARTITION_ID" ) , baseStats [ 0 ] . getLong ( "TIMESTAMP" ) , baseStats [ 0 ] . getLong ( "INVOCATIONS" ) , baseStats [ 0 ] . getLong ( "MIN_PARAMETER_SET_SIZE" ) , baseStats [ 0 ] . getLong ( "MAX_PARAMETER_SET_SIZE" ) , baseStats [ 0 ] . getLong ( "AVG_PARAMETER_SET_SIZE" ) ) ; } return new VoltTable [ ] { timeTable . sortByInput ( "PROCEDURE_INPUT" ) } ; }
Produce PROCEDUREINPUT aggregation of PROCEDURE subselector
382
15
155,069
private VoltTable [ ] aggregateProcedureOutputStats ( VoltTable [ ] baseStats ) { if ( baseStats == null || baseStats . length != 1 ) { return baseStats ; } StatsProcOutputTable timeTable = new StatsProcOutputTable ( ) ; baseStats [ 0 ] . resetRowPosition ( ) ; while ( baseStats [ 0 ] . advanceRow ( ) ) { // Skip non-transactional procedures for some of these rollups until // we figure out how to make them less confusing. // NB: They still show up in the raw PROCEDURE stata. boolean transactional = baseStats [ 0 ] . getLong ( "TRANSACTIONAL" ) == 1 ; if ( ! transactional ) { continue ; } if ( ! baseStats [ 0 ] . getString ( "STATEMENT" ) . equalsIgnoreCase ( "<ALL>" ) ) { continue ; } String pname = baseStats [ 0 ] . getString ( "PROCEDURE" ) ; timeTable . updateTable ( ! isReadOnlyProcedure ( pname ) , pname , baseStats [ 0 ] . getLong ( "PARTITION_ID" ) , baseStats [ 0 ] . getLong ( "TIMESTAMP" ) , baseStats [ 0 ] . getLong ( "INVOCATIONS" ) , baseStats [ 0 ] . getLong ( "MIN_RESULT_SIZE" ) , baseStats [ 0 ] . getLong ( "MAX_RESULT_SIZE" ) , baseStats [ 0 ] . getLong ( "AVG_RESULT_SIZE" ) ) ; } return new VoltTable [ ] { timeTable . sortByOutput ( "PROCEDURE_OUTPUT" ) } ; }
Produce PROCEDUREOUTPUT aggregation of PROCEDURE subselector
371
16
155,070
public void notifyOfCatalogUpdate ( ) { m_procedureInfo = getProcedureInformationfoSupplier ( ) ; m_registeredStatsSources . put ( StatsSelector . PROCEDURE , new NonBlockingHashMap < Long , NonBlockingHashSet < StatsSource > > ( ) ) ; }
Please be noted that this function will be called from Site thread where most other functions in the class are from StatsAgent thread .
67
25
155,071
public void addState ( long agreementHSId ) { SiteState ss = m_stateBySite . get ( agreementHSId ) ; if ( ss != null ) return ; ss = new SiteState ( ) ; ss . hsId = agreementHSId ; ss . newestConfirmedTxnId = m_newestConfirmedTxnId ; m_stateBySite . put ( agreementHSId , ss ) ; }
Once a failed node is rejoined put it s sites back into all of the data structures here .
89
20
155,072
public Pair < CompleteTransactionTask , Boolean > pollFirstCompletionTask ( CompletionCounter nextTaskCounter ) { // remove from the head Pair < CompleteTransactionTask , Boolean > pair = m_compTasks . pollFirstEntry ( ) . getValue ( ) ; if ( m_compTasks . isEmpty ( ) ) { return pair ; } // check next task for completion to ensure that the heads on all the site // have the same transaction and timestamp Pair < CompleteTransactionTask , Boolean > next = peekFirst ( ) ; if ( nextTaskCounter . txnId == 0L ) { nextTaskCounter . txnId = next . getFirst ( ) . getMsgTxnId ( ) ; nextTaskCounter . completionCount ++ ; nextTaskCounter . timestamp = next . getFirst ( ) . getTimestamp ( ) ; } else if ( nextTaskCounter . txnId == next . getFirst ( ) . getMsgTxnId ( ) && nextTaskCounter . timestamp == next . getFirst ( ) . getTimestamp ( ) ) { nextTaskCounter . completionCount ++ ; } return pair ; }
Remove the CompleteTransactionTask from the head and count the next CompleteTransactionTask
234
15
155,073
private static boolean isComparable ( CompleteTransactionTask c1 , CompleteTransactionTask c2 ) { return c1 . getMsgTxnId ( ) == c2 . getMsgTxnId ( ) && MpRestartSequenceGenerator . isForRestart ( c1 . getTimestamp ( ) ) == MpRestartSequenceGenerator . isForRestart ( c2 . getTimestamp ( ) ) ; }
4 ) restart completion and repair completion can t overwrite each other
92
12
155,074
public boolean matchCompleteTransactionTask ( long txnId , long timestamp ) { if ( ! m_compTasks . isEmpty ( ) ) { long lowestTxnId = m_compTasks . firstKey ( ) ; if ( txnId == lowestTxnId ) { Pair < CompleteTransactionTask , Boolean > pair = m_compTasks . get ( lowestTxnId ) ; return timestamp == pair . getFirst ( ) . getTimestamp ( ) ; } } return false ; }
Only match CompleteTransactionTask at head of the queue
106
10
155,075
public static VoltTable aggregateStats ( VoltTable stats ) throws IllegalArgumentException { stats . resetRowPosition ( ) ; if ( stats . getRowCount ( ) == 0 ) { return stats ; } String role = null ; Map < Byte , State > states = new TreeMap <> ( ) ; while ( stats . advanceRow ( ) ) { final byte clusterId = ( byte ) stats . getLong ( CN_REMOTE_CLUSTER_ID ) ; final String curRole = stats . getString ( CN_ROLE ) ; if ( role == null ) { role = curRole ; } else if ( ! role . equals ( curRole ) ) { throw new IllegalArgumentException ( "Inconsistent DR role across cluster nodes: " + stats . toFormattedString ( false ) ) ; } final State state = State . valueOf ( stats . getString ( CN_STATE ) ) ; states . put ( clusterId , state . and ( states . get ( clusterId ) ) ) ; } // Remove the -1 placeholder if there are real cluster states if ( states . size ( ) > 1 ) { states . remove ( ( byte ) - 1 ) ; } assert role != null ; stats . clearRowData ( ) ; for ( Map . Entry < Byte , State > e : states . entrySet ( ) ) { stats . addRow ( role , e . getValue ( ) . name ( ) , e . getKey ( ) ) ; } return stats ; }
Aggregates DRROLE statistics reported by multiple nodes into a single cluster - wide row . The role column should be the same across all nodes . The state column may defer slightly and it uses the same logical AND - ish operation to combine the states .
312
52
155,076
public static String getString ( String key ) { if ( RESOURCE_BUNDLE == null ) throw new RuntimeException ( "Localized messages from resource bundle '" + BUNDLE_NAME + "' not loaded during initialization of driver." ) ; try { if ( key == null ) throw new IllegalArgumentException ( "Message key can not be null" ) ; String message = RESOURCE_BUNDLE . getString ( key ) ; if ( message == null ) message = "Missing error message for key '" + key + "'" ; return message ; } catch ( MissingResourceException e ) { return ' ' + key + ' ' ; } }
Returns the localized message for the given message key
141
9
155,077
public void createAllDevNullTargets ( Exception lastWriteException ) { Map < Integer , SnapshotDataTarget > targets = Maps . newHashMap ( ) ; final AtomicInteger numTargets = new AtomicInteger ( ) ; for ( Deque < SnapshotTableTask > tasksForSite : m_taskListsForHSIds . values ( ) ) { for ( SnapshotTableTask task : tasksForSite ) { // Close any created targets and replace them with DevNull, go web-scale if ( task . getTarget ( true ) != null ) { try { task . getTarget ( ) . close ( ) ; } catch ( Exception e ) { SNAP_LOG . error ( "Failed closing data target after error" , e ) ; } } SnapshotDataTarget target = targets . get ( task . m_table . getRelativeIndex ( ) ) ; if ( target == null ) { target = new DevNullSnapshotTarget ( lastWriteException ) ; final Runnable onClose = new TargetStatsClosure ( target , task . m_table . getTypeName ( ) , numTargets , m_snapshotRecord ) ; target . setOnCloseHandler ( onClose ) ; targets . put ( task . m_table . getRelativeIndex ( ) , target ) ; m_targets . add ( target ) ; numTargets . incrementAndGet ( ) ; } task . setTarget ( target ) ; } } }
In case the deferred setup phase fails some data targets may have not been created yet . This method will close all existing data targets and replace all with DevNullDataTargets so that snapshot can be drained .
309
42
155,078
private Map < String , Boolean > collapseSets ( List < List < String > > allTableSets ) { Map < String , Boolean > answer = new TreeMap <> ( ) ; for ( List < String > tables : allTableSets ) { for ( String table : tables ) { answer . put ( table , false ) ; } } return answer ; }
Take a list of list of table names and collapse into a map which maps all table names to false . We will set the correct values later on . We just want to get the structure right now . Note that tables may be named multiple time in the lists of lists of tables . Everything gets mapped to false so we don t care .
77
67
155,079
private List < List < String > > decodeTables ( String [ ] tablesThatMustBeEmpty ) { List < List < String >> answer = new ArrayList <> ( ) ; for ( String tableSet : tablesThatMustBeEmpty ) { String tableNames [ ] = tableSet . split ( "\\+" ) ; answer . add ( Arrays . asList ( tableNames ) ) ; } return answer ; }
Decode sets of names encoded as by concatenation with plus signs into lists of lists of strings . Preserve the order since we need it to match to error messages later on .
88
37
155,080
public VoltTable [ ] run ( SystemProcedureExecutionContext ctx ) { createAndExecuteSysProcPlan ( SysProcFragmentId . PF_shutdownSync , SysProcFragmentId . PF_shutdownSyncDone ) ; SynthesizedPlanFragment pfs [ ] = new SynthesizedPlanFragment [ ] { new SynthesizedPlanFragment ( SysProcFragmentId . PF_shutdownCommand , true ) } ; executeSysProcPlanFragments ( pfs , SysProcFragmentId . PF_procedureDone ) ; return new VoltTable [ 0 ] ; }
Begin an un - graceful shutdown .
140
7
155,081
public void drain ( ) throws InterruptedException , IOException { ClientImpl currentClient = this . getClient ( ) ; if ( currentClient == null ) { throw new IOException ( "Client is unavailable for drain()." ) ; } currentClient . drain ( ) ; }
Block the current thread until all queued stored procedure invocations have received responses or there are no more connections to the cluster
57
24
155,082
public void backpressureBarrier ( ) throws InterruptedException , IOException { ClientImpl currentClient = this . getClient ( ) ; if ( currentClient == null ) { throw new IOException ( "Client is unavailable for backpressureBarrier()." ) ; } currentClient . backpressureBarrier ( ) ; }
Blocks the current thread until there is no more backpressure or there are no more connections to the database
66
20
155,083
public boolean absolute ( int position ) { if ( position < 0 ) { position += size ; } if ( position < 0 ) { beforeFirst ( ) ; return false ; } if ( position > size ) { afterLast ( ) ; return false ; } if ( size == 0 ) { return false ; } if ( position < currentPos ) { beforeFirst ( ) ; } // go to the tagget row; while ( position > currentPos ) { next ( ) ; } return true ; }
Uses similar semantics to java . sql . ResultSet except this is 0 based . When position is 0 or positive it is from the start ; when negative it is from end
102
35
155,084
private void printErrorAndQuit ( String message ) { System . out . println ( message + "\n-------------------------------------------------------------------------------------\n" ) ; printUsage ( ) ; System . exit ( - 1 ) ; }
Prints out an error message and the application usage print - out then terminates the application .
44
19
155,085
public AppHelper printActualUsage ( ) { System . out . println ( "-------------------------------------------------------------------------------------" ) ; int maxLength = 24 ; for ( Argument a : Arguments ) if ( maxLength < a . Name . length ( ) ) maxLength = a . Name . length ( ) ; for ( Argument a : Arguments ) { String template = "%1$" + String . valueOf ( maxLength - 1 ) + "s : " ; System . out . printf ( template , a . Name ) ; System . out . println ( a . Value ) ; } System . out . println ( "-------------------------------------------------------------------------------------" ) ; return this ; }
Prints a full list of actual arguments that will be used by the application after interpretation of defaults and actual argument values as passed by the user on the command line .
134
33
155,086
public byte byteValue ( String name ) { try { return Byte . valueOf ( this . getArgumentByName ( name ) . Value ) ; } catch ( NullPointerException npe ) { printErrorAndQuit ( String . format ( "Argument '%s' was not provided." , name ) ) ; } catch ( Exception x ) { printErrorAndQuit ( String . format ( "Argument '%s' could not be cast to type: 'byte'." , name ) ) ; } return - 1 ; // We will never get here: printErrorAndQuit will have terminated the application! }
Retrieves the value of an argument as a byte .
131
12
155,087
public short shortValue ( String name ) { try { return Short . valueOf ( this . getArgumentByName ( name ) . Value ) ; } catch ( NullPointerException npe ) { printErrorAndQuit ( String . format ( "Argument '%s' was not provided." , name ) ) ; } catch ( Exception x ) { printErrorAndQuit ( String . format ( "Argument '%s' could not be cast to type: 'short'." , name ) ) ; } return - 1 ; // We will never get here: printErrorAndQuit will have terminated the application! }
Retrieves the value of an argument as a short .
131
12
155,088
public int intValue ( String name ) { try { return Integer . valueOf ( this . getArgumentByName ( name ) . Value ) ; } catch ( NullPointerException npe ) { printErrorAndQuit ( String . format ( "Argument '%s' was not provided." , name ) ) ; } catch ( Exception x ) { printErrorAndQuit ( String . format ( "Argument '%s' could not be cast to type: 'int'." , name ) ) ; } return - 1 ; // We will never get here: printErrorAndQuit will have terminated the application! }
Retrieves the value of an argument as a int .
131
12
155,089
public long longValue ( String name ) { try { return Long . valueOf ( this . getArgumentByName ( name ) . Value ) ; } catch ( NullPointerException npe ) { printErrorAndQuit ( String . format ( "Argument '%s' was not provided." , name ) ) ; } catch ( Exception x ) { printErrorAndQuit ( String . format ( "Argument '%s' could not be cast to type: 'long'." , name ) ) ; } return - 1 ; // We will never get here: printErrorAndQuit will have terminated the application! }
Retrieves the value of an argument as a long .
131
12
155,090
public double doubleValue ( String name ) { try { return Double . valueOf ( this . getArgumentByName ( name ) . Value ) ; } catch ( NullPointerException npe ) { printErrorAndQuit ( String . format ( "Argument '%s' was not provided." , name ) ) ; } catch ( Exception x ) { printErrorAndQuit ( String . format ( "Argument '%s' could not be cast to type: 'double'." , name ) ) ; } return - 1 ; // We will never get here: printErrorAndQuit will have terminated the application! }
Retrieves the value of an argument as a double .
131
12
155,091
public String stringValue ( String name ) { try { return this . getArgumentByName ( name ) . Value ; } catch ( Exception npe ) { printErrorAndQuit ( String . format ( "Argument '%s' was not provided." , name ) ) ; } return null ; // We will never get here: printErrorAndQuit will have terminated the application! }
Retrieves the value of an argument as a string .
82
12
155,092
public boolean booleanValue ( String name ) { try { return Boolean . valueOf ( this . getArgumentByName ( name ) . Value ) ; } catch ( NullPointerException npe ) { printErrorAndQuit ( String . format ( "Argument '%s' was not provided." , name ) ) ; } catch ( Exception x ) { printErrorAndQuit ( String . format ( "Argument '%s' could not be cast to type: 'boolean'." , name ) ) ; } return false ; // We will never get here: printErrorAndQuit will have terminated the application! }
Retrieves the value of an argument as a boolean .
131
12
155,093
public void setBounds ( int x , int y , int w , int h ) { super . setBounds ( x , y , w , h ) ; iSbHeight = sbHoriz . getPreferredSize ( ) . height ; iSbWidth = sbVert . getPreferredSize ( ) . width ; iHeight = h - iSbHeight ; iWidth = w - iSbWidth ; sbHoriz . setBounds ( 0 , iHeight , iWidth , iSbHeight ) ; sbVert . setBounds ( iWidth , 0 , iSbWidth , iHeight ) ; adjustScroll ( ) ; iImage = null ; repaint ( ) ; }
with additional replacement of deprecated methods
152
6
155,094
private String write ( String logDir ) throws IOException , ExecutionException , InterruptedException { final File file = new File ( logDir , "trace_" + System . currentTimeMillis ( ) + ".json.gz" ) ; if ( file . exists ( ) ) { throw new IOException ( "Trace file " + file . getAbsolutePath ( ) + " already exists" ) ; } if ( ! file . getParentFile ( ) . canWrite ( ) || ! file . getParentFile ( ) . canExecute ( ) ) { throw new IOException ( "Trace file " + file . getAbsolutePath ( ) + " is not writable" ) ; } SettableFuture < Future < ? > > f = SettableFuture . create ( ) ; m_work . offer ( ( ) -> f . set ( dumpEvents ( file ) ) ) ; final Future < ? > writeFuture = f . get ( ) ; if ( writeFuture != null ) { writeFuture . get ( ) ; // Wait for the write to finish without blocking new events return file . getAbsolutePath ( ) ; } else { // A write is already in progress, ignore this request return null ; } }
Write the events in the queue to file .
256
9
155,095
public static TraceEventBatch log ( Category cat ) { final VoltTrace tracer = s_tracer ; if ( tracer != null && tracer . isCategoryEnabled ( cat ) ) { final TraceEventBatch batch = new TraceEventBatch ( cat ) ; tracer . queueEvent ( batch ) ; return batch ; } else { return null ; } }
Create a trace event batch for the given category . The events that go into this batch should all originate from the same thread .
78
25
155,096
public static String closeAllAndShutdown ( String logDir , long timeOutMillis ) throws IOException { String path = null ; final VoltTrace tracer = s_tracer ; if ( tracer != null ) { if ( logDir != null ) { path = dump ( logDir ) ; } s_tracer = null ; if ( timeOutMillis >= 0 ) { try { tracer . m_writerThread . shutdownNow ( ) ; tracer . m_writerThread . awaitTermination ( timeOutMillis , TimeUnit . MILLISECONDS ) ; } catch ( InterruptedException e ) { } } tracer . shutdown ( ) ; } return path ; }
Close all open files and wait for shutdown .
147
9
155,097
private static synchronized void start ( ) throws IOException { if ( s_tracer == null ) { final VoltTrace tracer = new VoltTrace ( ) ; final Thread thread = new Thread ( tracer ) ; thread . setDaemon ( true ) ; thread . start ( ) ; s_tracer = tracer ; } }
Creates and starts a new tracer . If one already exists this is a no - op . Synchronized to prevent multiple threads enabling it at the same time .
71
34
155,098
public static String dump ( String logDir ) throws IOException { String path = null ; final VoltTrace tracer = s_tracer ; if ( tracer != null ) { final File dir = new File ( logDir ) ; if ( ! dir . getParentFile ( ) . canWrite ( ) || ! dir . getParentFile ( ) . canExecute ( ) ) { throw new IOException ( "Trace log parent directory " + dir . getParentFile ( ) . getAbsolutePath ( ) + " is not writable" ) ; } if ( ! dir . exists ( ) ) { if ( ! dir . mkdir ( ) ) { throw new IOException ( "Failed to create trace log directory " + dir . getAbsolutePath ( ) ) ; } } try { path = tracer . write ( logDir ) ; } catch ( Exception e ) { s_logger . info ( "Unable to write trace file: " + e . getMessage ( ) , e ) ; } } return path ; }
Write all trace events in the queue to file .
219
10
155,099
public static void enableCategories ( Category ... categories ) throws IOException { if ( s_tracer == null ) { start ( ) ; } final VoltTrace tracer = s_tracer ; assert tracer != null ; final ImmutableSet . Builder < Category > builder = ImmutableSet . builder ( ) ; builder . addAll ( tracer . m_enabledCategories ) ; builder . addAll ( Arrays . asList ( categories ) ) ; tracer . m_enabledCategories = builder . build ( ) ; }
Enable the given categories . If the tracer is not running at the moment create a new one .
113
20