idx
int64
0
165k
question
stringlengths
73
4.15k
target
stringlengths
5
918
len_question
int64
21
890
len_target
int64
3
255
154,300
public void toLeftJoin ( ) { assert ( ( m_leftNode != null && m_rightNode != null ) || ( m_leftNode == null && m_rightNode == null ) ) ; if ( m_leftNode == null && m_rightNode == null ) { // End of recursion return ; } // recursive calls if ( m_leftNode instanceof BranchNode ) { ( ( BranchNode ) m_leftNode ) . toLeftJoin ( ) ; } if ( m_rightNode instanceof BranchNode ) { ( ( BranchNode ) m_rightNode ) . toLeftJoin ( ) ; } // Swap own children if ( m_joinType == JoinType . RIGHT ) { JoinNode node = m_rightNode ; m_rightNode = m_leftNode ; m_leftNode = node ; m_joinType = JoinType . LEFT ; } }
Transform all RIGHT joins from the tree into the LEFT ones by swapping the nodes and their join types
187
20
154,301
@ Override protected void extractSubTree ( List < JoinNode > leafNodes ) { JoinNode [ ] children = { m_leftNode , m_rightNode } ; for ( JoinNode child : children ) { // Leaf nodes don't have a significant join type, // test for them first and never attempt to start a new tree at a leaf. if ( ! ( child instanceof BranchNode ) ) { continue ; } if ( ( ( BranchNode ) child ) . m_joinType == m_joinType ) { // The join type for this node is the same as the root's one // Keep walking down the tree child . extractSubTree ( leafNodes ) ; } else { // The join type for this join differs from the root's one // Terminate the sub-tree leafNodes . add ( child ) ; // Replace the join node with the temporary node having the id negated JoinNode tempNode = new TableLeafNode ( - child . m_id , child . m_joinExpr , child . m_whereExpr , null ) ; if ( child == m_leftNode ) { m_leftNode = tempNode ; } else { m_rightNode = tempNode ; } } } }
Starting from the root recurse to its children stopping at the first join node of the different type and discontinue the tree at this point by replacing the join node with the temporary node which id matches the join node id . This join node is the root of the next sub - tree .
258
57
154,302
@ Override public boolean hasOuterJoin ( ) { assert ( m_leftNode != null && m_rightNode != null ) ; return m_joinType != JoinType . INNER || m_leftNode . hasOuterJoin ( ) || m_rightNode . hasOuterJoin ( ) ; }
Returns true if one of the tree nodes has outer join
66
11
154,303
@ Override public void extractEphemeralTableQueries ( List < StmtEphemeralTableScan > scans ) { if ( m_leftNode != null ) { m_leftNode . extractEphemeralTableQueries ( scans ) ; } if ( m_rightNode != null ) { m_rightNode . extractEphemeralTableQueries ( scans ) ; } }
Returns a list of immediate sub - queries which are part of this query .
81
15
154,304
@ Override public boolean allInnerJoins ( ) { return m_joinType == JoinType . INNER && ( m_leftNode == null || m_leftNode . allInnerJoins ( ) ) && ( m_rightNode == null || m_rightNode . allInnerJoins ( ) ) ; }
Returns if all the join operations within this join tree are inner joins .
70
14
154,305
public static void apply ( CompiledPlan plan , DeterminismMode detMode ) { if ( detMode == DeterminismMode . FASTER ) { return ; } if ( plan . hasDeterministicStatement ( ) ) { return ; } AbstractPlanNode planGraph = plan . rootPlanGraph ; if ( planGraph . isOrderDeterministic ( ) ) { return ; } AbstractPlanNode root = plan . rootPlanGraph ; root = recursivelyApply ( root ) ; plan . rootPlanGraph = root ; }
Only applies when stronger determinism is needed .
111
9
154,306
public void updateLastSeenUniqueIds ( VoltMessage message ) { long sequenceWithUniqueId = Long . MIN_VALUE ; boolean commandLog = ( message instanceof TransactionInfoBaseMessage && ( ( ( TransactionInfoBaseMessage ) message ) . isForReplay ( ) ) ) ; boolean sentinel = message instanceof MultiPartitionParticipantMessage ; // if replay if ( commandLog || sentinel ) { sequenceWithUniqueId = ( ( TransactionInfoBaseMessage ) message ) . getUniqueId ( ) ; // Update last seen and last polled txnId for replicas m_replaySequencer . updateLastSeenUniqueId ( sequenceWithUniqueId , ( TransactionInfoBaseMessage ) message ) ; m_replaySequencer . updateLastPolledUniqueId ( sequenceWithUniqueId , ( TransactionInfoBaseMessage ) message ) ; } }
Update last seen uniqueIds in the replay sequencer . This is used on MPI repair .
178
20
154,307
public void parseRestoreResultRow ( VoltTable vt ) { RestoreResultKey key = new RestoreResultKey ( ( int ) vt . getLong ( "HOST_ID" ) , ( int ) vt . getLong ( "PARTITION_ID" ) , vt . getString ( "TABLE" ) ) ; if ( containsKey ( key ) ) { get ( key ) . mergeData ( vt . getString ( "RESULT" ) . equals ( "SUCCESS" ) , vt . getString ( "ERR_MSG" ) ) ; } else { put ( key , new RestoreResultValue ( ( int ) vt . getLong ( "SITE_ID" ) , vt . getString ( "RESULT" ) . equals ( "SUCCESS" ) , vt . getString ( "HOSTNAME" ) , vt . getString ( "ERR_MSG" ) ) ) ; } }
Parse a restore result table row and add to the set .
206
13
154,308
public static < E extends Comparable > int binarySearch ( List < ? extends E > list , E e , KeyPresentBehavior presentBehavior , KeyAbsentBehavior absentBehavior ) { checkNotNull ( e ) ; return binarySearch ( list , e , Ordering . natural ( ) , presentBehavior , absentBehavior ) ; }
Searches the specified naturally ordered list for the specified object using the binary search algorithm .
73
18
154,309
private final < T > ImmutableList < Callable < T > > wrapTasks ( Collection < ? extends Callable < T > > tasks ) { ImmutableList . Builder < Callable < T >> builder = ImmutableList . builder ( ) ; for ( Callable < T > task : tasks ) { builder . add ( wrapTask ( task ) ) ; } return builder . build ( ) ; }
Wraps a collection of tasks .
85
7
154,310
public void loadProcedures ( CatalogContext catalogContext , boolean isInitOrReplay ) { m_defaultProcManager = catalogContext . m_defaultProcs ; // default proc caches clear on catalog update m_defaultProcCache . clear ( ) ; m_plannerTool = catalogContext . m_ptool ; // reload all system procedures from beginning m_sysProcs = loadSystemProcedures ( catalogContext , m_site ) ; try { if ( isInitOrReplay ) { // reload user procedures m_userProcs = loadUserProcedureRunners ( catalogContext . database . getProcedures ( ) , catalogContext . getCatalogJar ( ) . getLoader ( ) , null , m_site ) ; } else { // When catalog updates, only user procedures needs to be reloaded. m_userProcs = catalogContext . getPreparedUserProcedureRunners ( m_site ) ; } } catch ( Exception e ) { VoltDB . crashLocalVoltDB ( "Error trying to load user procedures: " + e . getMessage ( ) ) ; } }
Load procedures .
236
3
154,311
private static SQLPatternPart makeInnerProcedureModifierClausePattern ( boolean captureTokens ) { return SPF . oneOf ( SPF . clause ( SPF . token ( "allow" ) , SPF . group ( captureTokens , SPF . commaList ( SPF . userName ( ) ) ) ) , SPF . clause ( SPF . token ( "partition" ) , SPF . token ( "on" ) , SPF . token ( "table" ) , SPF . group ( captureTokens , SPF . databaseObjectName ( ) ) , SPF . token ( "column" ) , SPF . group ( captureTokens , SPF . databaseObjectName ( ) ) , SPF . optional ( SPF . clause ( SPF . token ( "parameter" ) , SPF . group ( captureTokens , SPF . integer ( ) ) ) ) , // parse a two-partition transaction clause SPF . optional ( SPF . clause ( SPF . token ( "and" ) , SPF . token ( "on" ) , SPF . token ( "table" ) , SPF . group ( captureTokens , SPF . databaseObjectName ( ) ) , SPF . token ( "column" ) , SPF . group ( captureTokens , SPF . databaseObjectName ( ) ) , SPF . optional ( SPF . clause ( SPF . token ( "parameter" ) , SPF . group ( captureTokens , SPF . integer ( ) ) ) ) ) ) ) ) ; }
Build a pattern segment to accept a single optional ALLOW or PARTITION clause to modify CREATE PROCEDURE statements .
329
24
154,312
static SQLPatternPart unparsedProcedureModifierClauses ( ) { // Force the leading space to go inside the repeat block. return SPF . capture ( SPF . repeat ( makeInnerProcedureModifierClausePattern ( false ) ) ) . withFlags ( SQLPatternFactory . ADD_LEADING_SPACE_TO_CHILD ) ; }
Build a pattern segment to recognize all the ALLOW or PARTITION modifier clauses of a CREATE PROCEDURE statement .
80
24
154,313
private static SQLPatternPart makeInnerStreamModifierClausePattern ( boolean captureTokens ) { return SPF . oneOf ( SPF . clause ( SPF . token ( "export" ) , SPF . token ( "to" ) , SPF . token ( "target" ) , SPF . group ( captureTokens , SPF . databaseObjectName ( ) ) ) , SPF . clause ( SPF . token ( "partition" ) , SPF . token ( "on" ) , SPF . token ( "column" ) , SPF . group ( captureTokens , SPF . databaseObjectName ( ) ) ) ) ; }
Build a pattern segment to accept a single optional EXPORT or PARTITION clause to modify CREATE STREAM statements .
138
23
154,314
private static SQLPatternPart unparsedStreamModifierClauses ( ) { // Force the leading space to go inside the repeat block. return SPF . capture ( SPF . repeat ( makeInnerStreamModifierClausePattern ( false ) ) ) . withFlags ( SQLPatternFactory . ADD_LEADING_SPACE_TO_CHILD ) ; }
Build a pattern segment to recognize all the EXPORT or PARTITION modifier clauses of a CREATE STREAM statement .
77
23
154,315
private static List < String > parseExecParameters ( String paramText ) { final String SafeParamStringValuePattern = "#(SQL_PARSER_SAFE_PARAMSTRING)" ; // Find all quoted strings. // Mask out strings that contain whitespace or commas // that must not be confused with parameter separators. // "Safe" strings that don't contain these characters don't need to be masked // but they DO need to be found and explicitly skipped so that their closing // quotes don't trigger a false positive for the START of an unsafe string. // Skipping is accomplished by resetting paramText to an offset substring // after copying the skipped (or substituted) text to a string builder. ArrayList < String > originalString = new ArrayList <> ( ) ; Matcher stringMatcher = SingleQuotedString . matcher ( paramText ) ; StringBuilder safeText = new StringBuilder ( ) ; while ( stringMatcher . find ( ) ) { // Save anything before the found string. safeText . append ( paramText . substring ( 0 , stringMatcher . start ( ) ) ) ; String asMatched = stringMatcher . group ( ) ; if ( SingleQuotedStringContainingParameterSeparators . matcher ( asMatched ) . matches ( ) ) { // The matched string is unsafe, provide cover for it in safeText. originalString . add ( asMatched ) ; safeText . append ( SafeParamStringValuePattern ) ; } else { // The matched string is safe. Add it to safeText. safeText . append ( asMatched ) ; } paramText = paramText . substring ( stringMatcher . end ( ) ) ; stringMatcher = SingleQuotedString . matcher ( paramText ) ; } // Save anything after the last found string. safeText . append ( paramText ) ; ArrayList < String > params = new ArrayList <> ( ) ; int subCount = 0 ; int neededSubs = originalString . size ( ) ; // Split the params at the separators String [ ] split = safeText . toString ( ) . split ( "[\\s,]+" ) ; for ( String fragment : split ) { if ( fragment . isEmpty ( ) ) { continue ; // ignore effects of leading or trailing separators } // Replace each substitution in order exactly once. if ( subCount < neededSubs ) { // Substituted strings will normally take up an entire parameter, // but some cases like parameters containing escaped single quotes // may require multiple serial substitutions. while ( fragment . indexOf ( SafeParamStringValuePattern ) > - 1 ) { fragment = fragment . replace ( SafeParamStringValuePattern , originalString . get ( subCount ) ) ; ++ subCount ; } } params . add ( fragment ) ; } assert ( subCount == neededSubs ) ; return params ; }
to the extent that comments are supported they have already been stripped out .
600
14
154,316
public static ParseRecallResults parseRecallStatement ( String statement , int lineMax ) { Matcher matcher = RecallToken . matcher ( statement ) ; if ( matcher . matches ( ) ) { String commandWordTerminator = matcher . group ( 1 ) ; String lineNumberText = matcher . group ( 2 ) ; String error ; if ( OneWhitespace . matcher ( commandWordTerminator ) . matches ( ) ) { String trailings = matcher . group ( 3 ) + ";" + matcher . group ( 4 ) ; // In a valid command, both "trailings" groups should be empty. if ( trailings . equals ( ";" ) ) { try { int line = Integer . parseInt ( lineNumberText ) - 1 ; if ( line < 0 || line > lineMax ) { throw new NumberFormatException ( ) ; } // Return the recall line number. return new ParseRecallResults ( line ) ; } catch ( NumberFormatException e ) { error = "Invalid RECALL line number argument: '" + lineNumberText + "'" ; } } // For an invalid form of the command, // return an approximation of the garbage input. else { error = "Invalid RECALL line number argument: '" + lineNumberText + " " + trailings + "'" ; } } else if ( commandWordTerminator . equals ( "" ) || commandWordTerminator . equals ( ";" ) ) { error = "Incomplete RECALL command. RECALL expects a line number argument." ; } else { error = "Invalid RECALL command: a space and line number are required after 'recall'" ; } return new ParseRecallResults ( error ) ; } return null ; }
Parse RECALL statement for sqlcmd .
365
9
154,317
public static List < FileInfo > parseFileStatement ( FileInfo parentContext , String statement ) { Matcher fileMatcher = FileToken . matcher ( statement ) ; if ( ! fileMatcher . lookingAt ( ) ) { // This input does not start with FILE, // so it's not a file command, it's something else. // Return to caller a null and no errors. return null ; } String remainder = statement . substring ( fileMatcher . end ( ) , statement . length ( ) ) ; List < FileInfo > filesInfo = new ArrayList <> ( ) ; Matcher inlineBatchMatcher = DashInlineBatchToken . matcher ( remainder ) ; if ( inlineBatchMatcher . lookingAt ( ) ) { remainder = remainder . substring ( inlineBatchMatcher . end ( ) , remainder . length ( ) ) ; Matcher delimiterMatcher = DelimiterToken . matcher ( remainder ) ; // use matches here (not lookingAt) because we want to match // all of the remainder, not just beginning if ( delimiterMatcher . matches ( ) ) { String delimiter = delimiterMatcher . group ( 1 ) ; filesInfo . add ( new FileInfo ( parentContext , FileOption . INLINEBATCH , delimiter ) ) ; return filesInfo ; } throw new SQLParser . Exception ( "Did not find valid delimiter for \"file -inlinebatch\" command." ) ; } // It is either a plain or a -batch file command. FileOption option = FileOption . PLAIN ; Matcher batchMatcher = DashBatchToken . matcher ( remainder ) ; if ( batchMatcher . lookingAt ( ) ) { option = FileOption . BATCH ; remainder = remainder . substring ( batchMatcher . end ( ) , remainder . length ( ) ) ; } // remove spaces before and after filenames remainder = remainder . trim ( ) ; // split filenames assuming they are separated by space ignoring spaces within quotes // tests for parsing in TestSqlCmdInterface.java List < String > filenames = new ArrayList <> ( ) ; Pattern regex = Pattern . compile ( "[^\\s\']+|'[^']*'" ) ; Matcher regexMatcher = regex . matcher ( remainder ) ; while ( regexMatcher . find ( ) ) { filenames . add ( regexMatcher . group ( ) ) ; } for ( String filename : filenames ) { Matcher filenameMatcher = FilenameToken . matcher ( filename ) ; // Use matches to match all input, not just beginning if ( filenameMatcher . matches ( ) ) { filename = filenameMatcher . group ( 1 ) ; // Trim whitespace from beginning and end of the file name. // User may have wanted quoted whitespace at the beginning or end // of the file name, but that seems very unlikely. filename = filename . trim ( ) ; if ( filename . startsWith ( "~" ) ) { filename = filename . replaceFirst ( "~" , System . getProperty ( "user.home" ) ) ; } filesInfo . add ( new FileInfo ( parentContext , option , filename ) ) ; } } // If no filename, or a filename of only spaces, then throw an error. if ( filesInfo . size ( ) == 0 ) { String msg = String . format ( "Did not find valid file name in \"file%s\" command." , option == FileOption . BATCH ? " -batch" : "" ) ; throw new SQLParser . Exception ( msg ) ; } return filesInfo ; }
Parse FILE statement for sqlcmd .
761
8
154,318
public static String parseShowStatementSubcommand ( String statement ) { Matcher matcher = ShowToken . matcher ( statement ) ; if ( matcher . matches ( ) ) { String commandWordTerminator = matcher . group ( 1 ) ; if ( OneWhitespace . matcher ( commandWordTerminator ) . matches ( ) ) { String trailings = matcher . group ( 3 ) + ";" + matcher . group ( 4 ) ; // In a valid command, both "trailings" groups should be empty. if ( trailings . equals ( ";" ) ) { // Return the subcommand keyword -- possibly a valid one. return matcher . group ( 2 ) ; } // For an invalid form of the command, // return an approximation of the garbage input. return matcher . group ( 2 ) + " " + trailings ; } if ( commandWordTerminator . equals ( "" ) || commandWordTerminator . equals ( ";" ) ) { return commandWordTerminator ; // EOL or ; reached before subcommand } } return null ; }
Parse a SHOW or LIST statement for sqlcmd .
224
11
154,319
public static String parseHelpStatement ( String statement ) { Matcher matcher = HelpToken . matcher ( statement ) ; if ( matcher . matches ( ) ) { String commandWordTerminator = matcher . group ( 1 ) ; if ( OneWhitespace . matcher ( commandWordTerminator ) . matches ( ) ) { String trailings = matcher . group ( 3 ) + ";" + matcher . group ( 4 ) ; // In a valid command, both "trailings" groups should be empty. if ( trailings . equals ( ";" ) ) { // Return the subcommand keyword -- possibly a valid one. return matcher . group ( 2 ) ; } // For an invalid form of the command, // return an approximation of the garbage input. return matcher . group ( 2 ) + " " + trailings ; } if ( commandWordTerminator . equals ( "" ) || commandWordTerminator . equals ( ";" ) ) { return "" ; // EOL or ; reached before subcommand } return matcher . group ( 1 ) . trim ( ) ; } return null ; }
Parse HELP statement for sqlcmd . The sub - command will be if the user just typed HELP .
232
21
154,320
public static String getDigitsFromHexLiteral ( String paramString ) { Matcher matcher = SingleQuotedHexLiteral . matcher ( paramString ) ; if ( matcher . matches ( ) ) { return matcher . group ( 1 ) ; } return null ; }
Given a parameter string if it s of the form x 0123456789ABCDEF return a string containing just the digits . Otherwise return null .
63
30
154,321
public static long hexDigitsToLong ( String hexDigits ) throws SQLParser . Exception { // BigInteger.longValue() will truncate to the lowest 64 bits, // so we need to explicitly check if there's too many digits. if ( hexDigits . length ( ) > 16 ) { throw new SQLParser . Exception ( "Too many hexadecimal digits for BIGINT value" ) ; } if ( hexDigits . length ( ) == 0 ) { throw new SQLParser . Exception ( "Zero hexadecimal digits is invalid for BIGINT value" ) ; } // The method // Long.parseLong(<digits>, <radix>); // Doesn't quite do what we want---it expects a '-' to // indicate negative values, and doesn't want the sign bit set // in the hex digits. // // Once we support Java 1.8, we can use Long.parseUnsignedLong(<digits>, 16) // instead. long val = new BigInteger ( hexDigits , 16 ) . longValue ( ) ; return val ; }
Given a string of hex digits produce a long value assuming a 2 s complement representation .
227
17
154,322
public static ExecuteCallResults parseExecuteCall ( String statement , Map < String , Map < Integer , List < String > > > procedures ) throws SQLParser . Exception { assert ( procedures != null ) ; return parseExecuteCallInternal ( statement , procedures ) ; }
Parse EXECUTE procedure call .
56
8
154,323
private static ExecuteCallResults parseExecuteCallInternal ( String statement , Map < String , Map < Integer , List < String > > > procedures ) throws SQLParser . Exception { Matcher matcher = ExecuteCallPreamble . matcher ( statement ) ; if ( ! matcher . lookingAt ( ) ) { return null ; } String commandWordTerminator = matcher . group ( 1 ) ; if ( OneWhitespace . matcher ( commandWordTerminator ) . matches ( ) || // Might as well accept a comma delimiter anywhere in the exec command, // even near the start commandWordTerminator . equals ( "," ) ) { ExecuteCallResults results = new ExecuteCallResults ( ) ; String rawParams = statement . substring ( matcher . end ( ) ) ; results . params = parseExecParameters ( rawParams ) ; results . procedure = results . params . remove ( 0 ) ; // TestSqlCmdInterface passes procedures==null because it // doesn't need/want the param types. if ( procedures == null ) { results . paramTypes = null ; return results ; } Map < Integer , List < String > > signature = procedures . get ( results . procedure ) ; if ( signature == null ) { throw new SQLParser . Exception ( "Undefined procedure: %s" , results . procedure ) ; } results . paramTypes = signature . get ( results . params . size ( ) ) ; if ( results . paramTypes == null || results . params . size ( ) != results . paramTypes . size ( ) ) { String expectedSizes = "" ; for ( Integer expectedSize : signature . keySet ( ) ) { expectedSizes += expectedSize + ", " ; } throw new SQLParser . Exception ( "Invalid parameter count for procedure: %s (expected: %s received: %d)" , results . procedure , expectedSizes , results . params . size ( ) ) ; } return results ; } if ( commandWordTerminator . equals ( ";" ) ) { // EOL or ; reached before subcommand throw new SQLParser . Exception ( "Incomplete EXECUTE command. EXECUTE requires a procedure name argument." ) ; } throw new SQLParser . Exception ( "Invalid EXECUTE command. unexpected input: '" + commandWordTerminator + "'." ) ; }
Private implementation of parse EXECUTE procedure call . Also supports short - circuiting procedure lookup for testing .
490
22
154,324
public static boolean appearsToBeValidDDLBatch ( String batch ) { BufferedReader reader = new BufferedReader ( new StringReader ( batch ) ) ; String line ; try { while ( ( line = reader . readLine ( ) ) != null ) { if ( isWholeLineComment ( line ) ) { continue ; } line = line . trim ( ) ; if ( line . equals ( "" ) ) continue ; // we have a non-blank line that contains more than just a comment. return queryIsDDL ( line ) ; } } catch ( IOException e ) { // This should never happen for a StringReader assert ( false ) ; } // trivial empty batch: no lines are non-blank or non-comments return true ; }
Make sure that the batch starts with an appropriate DDL verb . We do not look further than the first token of the first non - comment and non - whitespace line .
156
35
154,325
public static String parseEchoStatement ( String statement ) { Matcher matcher = EchoToken . matcher ( statement ) ; if ( matcher . matches ( ) ) { String commandWordTerminator = matcher . group ( 1 ) ; if ( OneWhitespace . matcher ( commandWordTerminator ) . matches ( ) ) { return matcher . group ( 2 ) ; } return "" ; } return null ; }
Parse ECHO statement for sqlcmd . The result will be if the user just typed ECHO .
89
21
154,326
public static String parseEchoErrorStatement ( String statement ) { Matcher matcher = EchoErrorToken . matcher ( statement ) ; if ( matcher . matches ( ) ) { String commandWordTerminator = matcher . group ( 1 ) ; if ( OneWhitespace . matcher ( commandWordTerminator ) . matches ( ) ) { return matcher . group ( 2 ) ; } return "" ; } return null ; }
Parse ECHOERROR statement for sqlcmd . The result will be if the user just typed ECHOERROR .
91
23
154,327
public static String parseDescribeStatement ( String statement ) { Matcher matcher = DescribeToken . matcher ( statement ) ; if ( matcher . matches ( ) ) { String commandWordTerminator = matcher . group ( 1 ) ; if ( OneWhitespace . matcher ( commandWordTerminator ) . matches ( ) ) { String trailings = matcher . group ( 3 ) + ";" + matcher . group ( 4 ) ; // In a valid command, both "trailings" groups should be empty. if ( trailings . equals ( ";" ) ) { // Return the subcommand keyword -- possibly a valid one. return matcher . group ( 2 ) ; } // For an invalid form of the command, // return an approximation of the garbage input. return matcher . group ( 2 ) + " " + trailings ; } if ( commandWordTerminator . equals ( "" ) || commandWordTerminator . equals ( ";" ) ) { return commandWordTerminator ; // EOL or ; reached before subcommand } } return null ; }
Parse DESCRIBE statement for sqlcmd . The result will be if the user just typed DESCRIBE or DESC .
224
28
154,328
void resolveColumnRefernecesInUnionOrderBy ( ) { int orderCount = sortAndSlice . getOrderLength ( ) ; if ( orderCount == 0 ) { return ; } String [ ] unionColumnNames = getColumnNames ( ) ; for ( int i = 0 ; i < orderCount ; i ++ ) { Expression sort = ( Expression ) sortAndSlice . exprList . get ( i ) ; Expression e = sort . getLeftNode ( ) ; if ( e . getType ( ) == OpTypes . VALUE ) { if ( e . getDataType ( ) . typeCode == Types . SQL_INTEGER ) { int index = ( ( Integer ) e . getValue ( null ) ) . intValue ( ) ; if ( 0 < index && index <= unionColumnNames . length ) { sort . getLeftNode ( ) . queryTableColumnIndex = index - 1 ; continue ; } } } else if ( e . getType ( ) == OpTypes . COLUMN ) { int index = ArrayUtil . find ( unionColumnNames , e . getColumnName ( ) ) ; if ( index >= 0 ) { sort . getLeftNode ( ) . queryTableColumnIndex = index ; continue ; } } throw Error . error ( ErrorCode . X_42576 ) ; } sortAndSlice . prepare ( null ) ; }
Only simple column reference or column position allowed
286
8
154,329
public void setTableColumnNames ( HashMappedList list ) { if ( resultTable != null ) { ( ( TableDerived ) resultTable ) . columnList = list ; return ; } leftQueryExpression . setTableColumnNames ( list ) ; }
Used in views after full type resolution
54
7
154,330
public void setAsTopLevel ( ) { if ( compileContext . getSequences ( ) . length > 0 ) { throw Error . error ( ErrorCode . X_42598 ) ; } isTopLevel = true ; setReturningResultSet ( ) ; }
Not for views . Only used on root node .
55
10
154,331
void setReturningResultSet ( ) { if ( unionCorresponding ) { persistenceScope = TableBase . SCOPE_SESSION ; columnMode = TableBase . COLUMNS_UNREFERENCED ; return ; } leftQueryExpression . setReturningResultSet ( ) ; }
Sets the scope to SESSION for the QueryExpression object that creates the table
62
17
154,332
public void schedulePeriodicStats ( ) { Runnable statsPrinter = new Runnable ( ) { @ Override public void run ( ) { printStatistics ( ) ; } } ; m_scheduler . scheduleWithFixedDelay ( statsPrinter , m_config . displayinterval , m_config . displayinterval , TimeUnit . SECONDS ) ; }
Add a task to the scheduler to print statistics to the console at regular intervals .
82
17
154,333
public synchronized void printResults ( ) throws Exception { ClientStats stats = m_fullStatsContext . fetch ( ) . getStats ( ) ; System . out . print ( HORIZONTAL_RULE ) ; System . out . println ( " Client Workload Statistics" ) ; System . out . println ( HORIZONTAL_RULE ) ; System . out . printf ( "Average throughput: %,9d txns/sec\n" , stats . getTxnThroughput ( ) ) ; if ( m_config . latencyreport ) { System . out . printf ( "Average latency: %,9.2f ms\n" , stats . getAverageLatency ( ) ) ; System . out . printf ( "10th percentile latency: %,9.2f ms\n" , stats . kPercentileLatencyAsDouble ( .1 ) ) ; System . out . printf ( "25th percentile latency: %,9.2f ms\n" , stats . kPercentileLatencyAsDouble ( .25 ) ) ; System . out . printf ( "50th percentile latency: %,9.2f ms\n" , stats . kPercentileLatencyAsDouble ( .5 ) ) ; System . out . printf ( "75th percentile latency: %,9.2f ms\n" , stats . kPercentileLatencyAsDouble ( .75 ) ) ; System . out . printf ( "90th percentile latency: %,9.2f ms\n" , stats . kPercentileLatencyAsDouble ( .9 ) ) ; System . out . printf ( "95th percentile latency: %,9.2f ms\n" , stats . kPercentileLatencyAsDouble ( .95 ) ) ; System . out . printf ( "99th percentile latency: %,9.2f ms\n" , stats . kPercentileLatencyAsDouble ( .99 ) ) ; System . out . printf ( "99.5th percentile latency: %,9.2f ms\n" , stats . kPercentileLatencyAsDouble ( .995 ) ) ; System . out . printf ( "99.9th percentile latency: %,9.2f ms\n" , stats . kPercentileLatencyAsDouble ( .999 ) ) ; System . out . print ( "\n" + HORIZONTAL_RULE ) ; System . out . println ( " System Server Statistics" ) ; System . out . println ( HORIZONTAL_RULE ) ; System . out . printf ( "Reported Internal Avg Latency: %,9.2f ms\n" , stats . getAverageInternalLatency ( ) ) ; System . out . print ( "\n" + HORIZONTAL_RULE ) ; System . out . println ( " Latency Histogram" ) ; System . out . println ( HORIZONTAL_RULE ) ; System . out . println ( stats . latencyHistoReport ( ) ) ; } // 4. Write stats to file if requested m_client . writeSummaryCSV ( stats , m_config . statsfile ) ; }
Prints some summary statistics about performance .
670
8
154,334
private void shutdown ( ) { // Stop the stats printer, the bid generator and the nibble deleter. m_scheduler . shutdown ( ) ; try { m_scheduler . awaitTermination ( 60 , TimeUnit . SECONDS ) ; } catch ( InterruptedException e ) { e . printStackTrace ( ) ; } try { // block until all outstanding txns return m_client . drain ( ) ; // close down the client connections m_client . close ( ) ; } catch ( IOException | InterruptedException e ) { e . printStackTrace ( ) ; } }
Perform various tasks to end the demo cleanly .
128
11
154,335
private void requestAd ( ) { long deviceId = Math . abs ( m_rand . nextLong ( ) ) % AdBrokerBenchmark . NUM_DEVICES ; GeographyPointValue point = getRandomPoint ( ) ; try { m_client . callProcedure ( new NullCallback ( ) , "GetHighestBidForLocation" , deviceId , point ) ; } catch ( IOException e ) { e . printStackTrace ( ) ; } }
Invoke the stored procedure GetHighestBidForLocation which given a random point returns the id of the bid that has the highest dollar amount .
100
30
154,336
public void promoteSinglePartitionInfo ( HashMap < AbstractExpression , Set < AbstractExpression > > valueEquivalence , Set < Set < AbstractExpression > > eqSets ) { assert ( getScanPartitioning ( ) != null ) ; if ( getScanPartitioning ( ) . getCountOfPartitionedTables ( ) == 0 || getScanPartitioning ( ) . requiresTwoFragments ( ) ) { return ; } // This subquery is a single partitioned query on partitioned tables // promoting the single partition expression up to its parent level. AbstractExpression spExpr = getScanPartitioning ( ) . singlePartitioningExpression ( ) ; for ( SchemaColumn col : m_partitioningColumns ) { AbstractExpression tveKey = col . getExpression ( ) ; assert ( tveKey instanceof TupleValueExpression ) ; Set < AbstractExpression > values = null ; if ( valueEquivalence . containsKey ( tveKey ) ) { values = valueEquivalence . get ( tveKey ) ; } else if ( valueEquivalence . containsKey ( spExpr ) ) { values = valueEquivalence . get ( spExpr ) ; } else { for ( SchemaColumn otherCol : m_partitioningColumns ) { if ( col != otherCol && valueEquivalence . containsKey ( otherCol . getExpression ( ) ) ) { values = valueEquivalence . get ( otherCol . getExpression ( ) ) ; break ; } } if ( values == null ) { values = new HashSet <> ( ) ; } } updateEqualSets ( values , valueEquivalence , eqSets , tveKey , spExpr ) ; } }
upgrade single partitioning expression to parent level add the info to equality sets and input value equivalence
379
20
154,337
private void updateEqualSets ( Set < AbstractExpression > values , HashMap < AbstractExpression , Set < AbstractExpression > > valueEquivalence , Set < Set < AbstractExpression > > eqSets , AbstractExpression tveKey , AbstractExpression spExpr ) { boolean hasLegacyValues = false ; if ( eqSets . contains ( values ) ) { eqSets . remove ( values ) ; hasLegacyValues = true ; } values . add ( spExpr ) ; values . add ( tveKey ) ; if ( hasLegacyValues ) { eqSets . add ( values ) ; } valueEquivalence . put ( spExpr , values ) ; valueEquivalence . put ( tveKey , values ) ; }
Because HashSet stored a legacy hashcode for the non - final object .
165
15
154,338
@ Override public boolean getIsReplicated ( ) { for ( StmtTableScan tableScan : m_subqueryStmt . allScans ( ) ) { if ( ! tableScan . getIsReplicated ( ) ) { return false ; } } return true ; }
The subquery is replicated if all tables from the FROM clause defining this subquery are replicated
58
18
154,339
public TupleValueExpression getOutputExpression ( int index ) { SchemaColumn schemaCol = getSchemaColumn ( index ) ; TupleValueExpression tve = new TupleValueExpression ( getTableAlias ( ) , getTableAlias ( ) , schemaCol . getColumnAlias ( ) , schemaCol . getColumnAlias ( ) , index ) ; return tve ; }
Produce a tuple value expression for a column produced by this subquery
82
14
154,340
static private ComparisonExpression rangeFilterFromPrefixLike ( AbstractExpression leftExpr , ExpressionType rangeComparator , String comparand ) { ConstantValueExpression cve = new ConstantValueExpression ( ) ; cve . setValueType ( VoltType . STRING ) ; cve . setValue ( comparand ) ; cve . setValueSize ( comparand . length ( ) ) ; ComparisonExpression rangeFilter = new ComparisonExpression ( rangeComparator , leftExpr , cve ) ; return rangeFilter ; }
Construct the upper or lower bound expression that is implied by a prefix LIKE operator given its required elements .
112
20
154,341
public NodeSchema resetTableName ( String tbName , String tbAlias ) { m_columns . forEach ( sc -> sc . reset ( tbName , tbAlias , sc . getColumnName ( ) , sc . getColumnAlias ( ) ) ) ; m_columnsMapHelper . forEach ( ( k , v ) -> k . reset ( tbName , tbAlias , k . getColumnName ( ) , k . getColumnAlias ( ) ) ) ; return this ; }
Substitute table name only for all schema columns and map entries
109
13
154,342
public void addColumn ( SchemaColumn column ) { int size = m_columns . size ( ) ; m_columnsMapHelper . put ( column , size ) ; m_columns . add ( column ) ; }
Add a column to this schema .
48
7
154,343
public SchemaColumn find ( String tableName , String tableAlias , String columnName , String columnAlias ) { SchemaColumn col = new SchemaColumn ( tableName , tableAlias , columnName , columnAlias ) ; int index = findIndexOfColumn ( col ) ; if ( index != - 1 ) { return m_columns . get ( index ) ; } return null ; }
Retrieve the SchemaColumn that matches the provided arguments .
81
12
154,344
void sortByTveIndex ( int fromIndex , int toIndex ) { Collections . sort ( m_columns . subList ( fromIndex , toIndex ) , TVE_IDX_COMPARE ) ; }
Sort a sub - range of the schema columns by TVE index . All elements must be TupleValueExpressions . Modification is made in - place .
47
32
154,345
public boolean equalsOnlyNames ( NodeSchema otherSchema ) { if ( otherSchema == null ) { return false ; } if ( otherSchema . size ( ) != size ( ) ) { return false ; } for ( int colIndex = 0 ; colIndex < size ( ) ; colIndex ++ ) { SchemaColumn col1 = otherSchema . getColumn ( colIndex ) ; SchemaColumn col2 = m_columns . get ( colIndex ) ; if ( col1 . compareNames ( col2 ) != 0 ) { return false ; } } return true ; }
names are the same . Don t worry about the differentiator field .
124
14
154,346
NodeSchema copyAndReplaceWithTVE ( ) { NodeSchema copy = new NodeSchema ( ) ; int colIndex = 0 ; for ( SchemaColumn column : m_columns ) { copy . addColumn ( column . copyAndReplaceWithTVE ( colIndex ) ) ; ++ colIndex ; } return copy ; }
Returns a copy of this NodeSchema but with all non - TVE expressions replaced with an appropriate TVE . This is used primarily when generating a node s output schema based on its childrens schema ; we want to carry the columns across but leave any non - TVE expressions behind .
73
58
154,347
public boolean harmonize ( NodeSchema otherSchema , String schemaKindName ) { if ( size ( ) != otherSchema . size ( ) ) { throw new PlanningErrorException ( "The " + schemaKindName + "schema and the statement output schemas have different lengths." ) ; } boolean changedSomething = false ; for ( int idx = 0 ; idx < size ( ) ; idx += 1 ) { SchemaColumn myColumn = getColumn ( idx ) ; SchemaColumn otherColumn = otherSchema . getColumn ( idx ) ; VoltType myType = myColumn . getValueType ( ) ; VoltType otherType = otherColumn . getValueType ( ) ; VoltType commonType = myType ; if ( ! myType . canExactlyRepresentAnyValueOf ( otherType ) ) { if ( otherType . canExactlyRepresentAnyValueOf ( myType ) ) { commonType = otherType ; } else { throw new PlanningErrorException ( "The " + schemaKindName + " column type and the statement output type for column " + idx + " are incompatible." ) ; } } if ( myType != commonType ) { changedSomething = true ; myColumn . setValueType ( commonType ) ; } // Now determine the length, and the "in bytes" flag if needed assert ( myType . isVariableLength ( ) == otherType . isVariableLength ( ) ) ; // The type will be one of: // - fixed size // - VARCHAR (need special logic for bytes vs. chars) // - Some other variable length type int commonSize ; if ( ! myType . isVariableLength ( ) ) { commonSize = myType . getLengthInBytesForFixedTypesWithoutCheck ( ) ; } else if ( myType == VoltType . STRING ) { boolean myInBytes = myColumn . getInBytes ( ) ; boolean otherInBytes = otherColumn . getInBytes ( ) ; if ( myInBytes == otherInBytes ) { commonSize = Math . max ( myColumn . getValueSize ( ) , otherColumn . getValueSize ( ) ) ; } else { // one is in bytes and the other is in characters int mySizeInBytes = ( myColumn . getInBytes ( ) ? 1 : 4 ) * myColumn . getValueSize ( ) ; int otherSizeInBytes = ( otherColumn . getInBytes ( ) ? 1 : 4 ) * otherColumn . getValueSize ( ) ; if ( ! myColumn . getInBytes ( ) ) { myColumn . setInBytes ( true ) ; changedSomething = true ; } commonSize = Math . max ( mySizeInBytes , otherSizeInBytes ) ; if ( commonSize > VoltType . MAX_VALUE_LENGTH ) { commonSize = VoltType . MAX_VALUE_LENGTH ; } } } else { commonSize = Math . max ( myColumn . getValueSize ( ) , otherColumn . getValueSize ( ) ) ; } if ( commonSize != myColumn . getValueSize ( ) ) { myColumn . setValueSize ( commonSize ) ; changedSomething = true ; } } return changedSomething ; }
Modifies this schema such that its columns can accommodate both values of its own types and that of otherSchema . Does not modify otherSchema .
667
30
154,348
void set ( final long valueIteratedTo , final long valueIteratedFrom , final long countAtValueIteratedTo , final long countInThisIterationStep , final long totalCountToThisValue , final long totalValueToThisValue , final double percentile , final double percentileLevelIteratedTo , double integerToDoubleValueConversionRatio ) { this . valueIteratedTo = valueIteratedTo ; this . valueIteratedFrom = valueIteratedFrom ; this . countAtValueIteratedTo = countAtValueIteratedTo ; this . countAddedInThisIterationStep = countInThisIterationStep ; this . totalCountToThisValue = totalCountToThisValue ; this . totalValueToThisValue = totalValueToThisValue ; this . percentile = percentile ; this . percentileLevelIteratedTo = percentileLevelIteratedTo ; this . integerToDoubleValueConversionRatio = integerToDoubleValueConversionRatio ; }
Set is all - or - nothing to avoid the potential for accidental omission of some values ...
200
18
154,349
@ Override public List < AbstractExpression > bindingToIndexedExpression ( AbstractExpression expr ) { if ( m_originalValue == null || ! m_originalValue . equals ( expr ) ) { return null ; } // This parameter's value was matched, so return this as one bound parameter. List < AbstractExpression > result = new ArrayList < AbstractExpression > ( ) ; result . add ( this ) ; return result ; }
query in which that constant differs .
94
7
154,350
Object getParameterAtIndex ( int partitionIndex ) { try { if ( serializedParams != null ) { return ParameterSet . getParameterAtIndex ( partitionIndex , serializedParams . duplicate ( ) ) ; } else { return params . get ( ) . getParam ( partitionIndex ) ; } } catch ( Exception ex ) { throw new RuntimeException ( "Invalid partitionIndex: " + partitionIndex , ex ) ; } }
Read into an serialized parameter buffer to extract a single parameter
91
12
154,351
public void flattenToBufferForOriginalVersion ( ByteBuffer buf ) throws IOException { assert ( ( params != null ) || ( serializedParams != null ) ) ; // for self-check assertion int startPosition = buf . position ( ) ; buf . put ( ProcedureInvocationType . ORIGINAL . getValue ( ) ) ; SerializationHelper . writeVarbinary ( getProcNameBytes ( ) , buf ) ; buf . putLong ( clientHandle ) ; serializeParams ( buf ) ; int len = buf . position ( ) - startPosition ; assert ( len == getSerializedSizeForOriginalVersion ( ) ) ; }
Serializes this SPI in the original serialization version . This is currently used by DR .
134
18
154,352
@ Override public synchronized void submit ( long offset ) { if ( submittedOffset == - 1L && offset >= 0 ) { committedOffsets [ idx ( offset ) ] = safeOffset = submittedOffset = offset ; } if ( firstOffset == - 1L ) { firstOffset = offset ; } if ( ( offset - safeOffset ) >= committedOffsets . length ) { offerOffset = offset ; try { wait ( m_gapFullWait ) ; } catch ( InterruptedException e ) { LOGGER . rateLimitedLog ( LOG_SUPPRESSION_INTERVAL_SECONDS , Level . WARN , e , "CommitTracker wait was interrupted for group " + consumerGroup + " topic " + topic + " partition " + partition ) ; } } if ( offset > submittedOffset ) { submittedOffset = offset ; } }
submit an offset while consuming a message and record the maximal submitted offset
173
13
154,353
@ Override public synchronized long commit ( long offset ) { if ( offset <= submittedOffset && offset > safeOffset ) { int ggap = ( int ) Math . min ( committedOffsets . length , offset - safeOffset ) ; if ( ggap == committedOffsets . length ) { LOGGER . rateLimitedLog ( LOG_SUPPRESSION_INTERVAL_SECONDS , Level . WARN , null , "CommitTracker moving topic commit point from %d to %d for topic " + topic + " partition " + partition + " group:" + consumerGroup , safeOffset , ( offset - committedOffsets . length + 1 ) ) ; safeOffset = offset - committedOffsets . length + 1 ; committedOffsets [ idx ( safeOffset ) ] = safeOffset ; } committedOffsets [ idx ( offset ) ] = offset ; while ( ggap > 0 && committedOffsets [ idx ( safeOffset ) ] + 1 == committedOffsets [ idx ( safeOffset + 1 ) ] ) { ++ safeOffset ; } if ( offerOffset >= 0 && ( offerOffset - safeOffset ) < committedOffsets . length ) { offerOffset = - 1L ; notify ( ) ; } } if ( offset == firstOffset ) { firstOffsetCommitted = true ; } return safeOffset ; }
VoltDB . It will be recorded in committedOffsets and calculate the offset - safeOffset which is safe to commit to Kafka
274
26
154,354
public void log ( long now , Level level , Throwable cause , String stemformat , Object ... args ) { if ( now - m_lastLogTime > m_maxLogIntervalMillis ) { synchronized ( this ) { if ( now - m_lastLogTime > m_maxLogIntervalMillis ) { String message = formatMessage ( cause , stemformat , args ) ; switch ( level ) { case DEBUG : m_logger . debug ( message ) ; break ; case ERROR : m_logger . error ( message ) ; break ; case FATAL : m_logger . fatal ( message ) ; break ; case INFO : m_logger . info ( message ) ; break ; case TRACE : m_logger . trace ( message ) ; break ; case WARN : m_logger . warn ( message ) ; break ; } m_lastLogTime = now ; } } } }
This variant delays the formatting of the string message until it is actually logged
192
14
154,355
private void sendFirstFragResponse ( ) { if ( ELASTICLOG . isDebugEnabled ( ) ) { ELASTICLOG . debug ( "P" + m_partitionId + " sending first fragment response to coordinator " + CoreUtils . hsIdToString ( m_coordinatorHsId ) ) ; } RejoinMessage msg = new RejoinMessage ( m_mailbox . getHSId ( ) , RejoinMessage . Type . FIRST_FRAGMENT_RECEIVED ) ; m_mailbox . send ( m_coordinatorHsId , msg ) ; m_firstFragResponseSent = true ; }
Notify the coordinator that this site has received the first fragment message
137
13
154,356
private void runForBlockingDataTransfer ( SiteProcedureConnection siteConnection ) { boolean sourcesReady = false ; RestoreWork restoreWork = m_dataSink . poll ( m_snapshotBufferAllocator ) ; if ( restoreWork != null ) { restoreBlock ( restoreWork , siteConnection ) ; sourcesReady = true ; } // The completion monitor may fire even if m_dataSink has not reached EOF in the case that there's no // replicated table in the database, so check for both conditions. if ( m_dataSink . isEOF ( ) || m_snapshotCompletionMonitor . isDone ( ) ) { // No more data from this data sink, close and remove it from the list m_dataSink . close ( ) ; if ( m_streamSnapshotMb != null ) { VoltDB . instance ( ) . getHostMessenger ( ) . removeMailbox ( m_streamSnapshotMb . getHSId ( ) ) ; m_streamSnapshotMb = null ; ELASTICLOG . debug ( m_whoami + " data transfer is finished" ) ; } if ( m_snapshotCompletionMonitor . isDone ( ) ) { try { SnapshotCompletionEvent event = m_snapshotCompletionMonitor . get ( ) ; siteConnection . setDRProtocolVersion ( event . drVersion ) ; assert ( event != null ) ; ELASTICLOG . debug ( "P" + m_partitionId + " noticed data transfer completion" ) ; m_completionAction . setSnapshotTxnId ( event . multipartTxnId ) ; setJoinComplete ( siteConnection , event . exportSequenceNumbers , event . drSequenceNumbers , event . drMixedClusterSizeConsumerState , false /* requireExistingSequenceNumbers */ , event . clusterCreateTime ) ; } catch ( InterruptedException e ) { // isDone() already returned true, this shouldn't happen VoltDB . crashLocalVoltDB ( "Impossible interruption happend" , true , e ) ; } catch ( ExecutionException e ) { VoltDB . crashLocalVoltDB ( "Error waiting for snapshot to finish" , true , e ) ; } } else { m_taskQueue . offer ( this ) ; } } else { // The sources are not set up yet, don't block the site, // return here and retry later. returnToTaskQueue ( sourcesReady ) ; } }
Blocking transfer all partitioned table data and notify the coordinator .
521
13
154,357
public < T > T getService ( URI bundleURI , Class < T > svcClazz ) { return m_bundles . getService ( bundleURI , svcClazz ) ; }
Gets the service from the given bundle jar uri . Loads and starts the bundle if it isn t yet loaded
42
24
154,358
public void setPos ( int pos ) { position = pos ; NodeAVL n = nPrimaryNode ; while ( n != null ) { ( ( NodeAVLDisk ) n ) . iData = position ; n = n . nNext ; } }
Sets the file position for the row
53
8
154,359
void setNewNodes ( ) { int indexcount = tTable . getIndexCount ( ) ; nPrimaryNode = new NodeAVLDisk ( this , 0 ) ; NodeAVL n = nPrimaryNode ; for ( int i = 1 ; i < indexcount ; i ++ ) { n . nNext = new NodeAVLDisk ( this , i ) ; n = n . nNext ; } }
used in CachedDataRow
86
6
154,360
public void write ( RowOutputInterface out ) { try { writeNodes ( out ) ; if ( hasDataChanged ) { out . writeData ( rowData , tTable . colTypes ) ; out . writeEnd ( ) ; hasDataChanged = false ; } } catch ( IOException e ) { } }
Used exclusively by Cache to save the row to disk . New implementation in 1 . 7 . 2 writes out only the Node data if the table row data has not changed . This situation accounts for the majority of invocations as for each row deleted or inserted the Nodes for several other rows will change .
65
60
154,361
private void writeNodes ( RowOutputInterface out ) throws IOException { out . writeSize ( storageSize ) ; NodeAVL n = nPrimaryNode ; while ( n != null ) { n . write ( out ) ; n = n . nNext ; } hasNodesChanged = false ; }
Writes the Nodes immediately after the row size .
63
11
154,362
public void serializeToBuffer ( ByteBuffer b ) { assert ( getSerializedSize ( ) <= b . remaining ( ) ) ; b . putInt ( getSerializedSize ( ) - 4 ) ; b . put ( ( byte ) getExceptionType ( ) . ordinal ( ) ) ; if ( m_message != null ) { final byte messageBytes [ ] = m_message . getBytes ( ) ; b . putInt ( messageBytes . length ) ; b . put ( messageBytes ) ; } else { b . putInt ( 0 ) ; } p_serializeToBuffer ( b ) ; }
Serialize this exception to the supplied byte buffer
129
9
154,363
protected void populateColumnSchema ( ArrayList < ColumnInfo > columns ) { columns . add ( new ColumnInfo ( "TIMESTAMP" , VoltType . BIGINT ) ) ; columns . add ( new ColumnInfo ( VoltSystemProcedure . CNAME_HOST_ID , VoltSystemProcedure . CTYPE_ID ) ) ; columns . add ( new ColumnInfo ( "HOSTNAME" , VoltType . STRING ) ) ; }
Called from the constructor to generate the column schema at run time . Derived classes need to override this method in order to specify the columns they will be adding . The first line must always be a call the superclasses version of populateColumnSchema in order to ensure the columns are add to the list in the right order .
97
66
154,364
public Object [ ] [ ] getStatsRows ( boolean interval , final Long now ) { this . now = now ; /* * Synchronizing on this allows derived classes to maintain thread safety */ synchronized ( this ) { Iterator < Object > i = getStatsRowKeyIterator ( interval ) ; ArrayList < Object [ ] > rows = new ArrayList < Object [ ] > ( ) ; while ( i . hasNext ( ) ) { Object rowKey = i . next ( ) ; Object rowValues [ ] = new Object [ columns . size ( ) ] ; updateStatsRow ( rowKey , rowValues ) ; rows . add ( rowValues ) ; } return rows . toArray ( new Object [ 0 ] [ ] ) ; } }
Get the latest stat values as an array of arrays of objects suitable for insertion into an VoltTable
154
19
154,365
protected void updateStatsRow ( Object rowKey , Object rowValues [ ] ) { rowValues [ 0 ] = now ; rowValues [ 1 ] = m_hostId ; rowValues [ 2 ] = m_hostname ; }
Update the parameter array with the latest values . This is similar to populateColumnSchema in that it must be overriden by derived classes and the derived class implementation must call the super classes implementation .
48
40
154,366
@ Override public long deserialize ( DataTree dt , Map < Long , Long > sessions ) throws IOException { // we run through 100 snapshots (not all of them) // if we cannot get it running within 100 snapshots // we should give up List < File > snapList = findNValidSnapshots ( 100 ) ; if ( snapList . size ( ) == 0 ) { return - 1L ; } File snap = null ; boolean foundValid = false ; for ( int i = 0 ; i < snapList . size ( ) ; i ++ ) { snap = snapList . get ( i ) ; InputStream snapIS = null ; CheckedInputStream crcIn = null ; try { LOG . info ( "Reading snapshot " + snap ) ; snapIS = new BufferedInputStream ( new FileInputStream ( snap ) ) ; crcIn = new CheckedInputStream ( snapIS , new Adler32 ( ) ) ; InputArchive ia = BinaryInputArchive . getArchive ( crcIn ) ; deserialize ( dt , sessions , ia ) ; long checkSum = crcIn . getChecksum ( ) . getValue ( ) ; long val = ia . readLong ( "val" ) ; if ( val != checkSum ) { throw new IOException ( "CRC corruption in snapshot : " + snap ) ; } foundValid = true ; break ; } catch ( IOException e ) { LOG . warn ( "problem reading snap file " + snap , e ) ; } finally { if ( snapIS != null ) snapIS . close ( ) ; if ( crcIn != null ) crcIn . close ( ) ; } } if ( ! foundValid ) { throw new IOException ( "Not able to find valid snapshots in " + snapDir ) ; } dt . lastProcessedZxid = Util . getZxidFromName ( snap . getName ( ) , "snapshot" ) ; return dt . lastProcessedZxid ; }
deserialize a data tree from the most recent snapshot
429
11
154,367
public void deserialize ( DataTree dt , Map < Long , Long > sessions , InputArchive ia ) throws IOException { FileHeader header = new FileHeader ( ) ; header . deserialize ( ia , "fileheader" ) ; if ( header . getMagic ( ) != SNAP_MAGIC ) { throw new IOException ( "mismatching magic headers " + header . getMagic ( ) + " != " + FileSnap . SNAP_MAGIC ) ; } SerializeUtils . deserializeSnapshot ( dt , ia , sessions ) ; }
deserialize the datatree from an inputarchive
124
11
154,368
@ Override public File findMostRecentSnapshot ( ) throws IOException { List < File > files = findNValidSnapshots ( 1 ) ; if ( files . size ( ) == 0 ) { return null ; } return files . get ( 0 ) ; }
find the most recent snapshot in the database .
55
9
154,369
public List < File > findNRecentSnapshots ( int n ) throws IOException { List < File > files = Util . sortDataDir ( snapDir . listFiles ( ) , "snapshot" , false ) ; int i = 0 ; List < File > list = new ArrayList < File > ( ) ; for ( File f : files ) { if ( i == n ) break ; i ++ ; list . add ( f ) ; } return list ; }
find the last n snapshots . this does not have any checks if the snapshot might be valid or not
98
20
154,370
protected void serialize ( DataTree dt , Map < Long , Long > sessions , OutputArchive oa , FileHeader header ) throws IOException { // this is really a programmatic error and not something that can // happen at runtime if ( header == null ) throw new IllegalStateException ( "Snapshot's not open for writing: uninitialized header" ) ; header . serialize ( oa , "fileheader" ) ; SerializeUtils . serializeSnapshot ( dt , oa , sessions ) ; }
serialize the datatree and sessions
110
8
154,371
@ Override public synchronized void serialize ( DataTree dt , Map < Long , Long > sessions , File snapShot ) throws IOException { if ( ! close ) { OutputStream sessOS = new BufferedOutputStream ( new FileOutputStream ( snapShot ) ) ; CheckedOutputStream crcOut = new CheckedOutputStream ( sessOS , new Adler32 ( ) ) ; //CheckedOutputStream cout = new CheckedOutputStream() OutputArchive oa = BinaryOutputArchive . getArchive ( crcOut ) ; FileHeader header = new FileHeader ( SNAP_MAGIC , VERSION , dbId ) ; serialize ( dt , sessions , oa , header ) ; long val = crcOut . getChecksum ( ) . getValue ( ) ; oa . writeLong ( val , "val" ) ; oa . writeString ( "/" , "path" ) ; sessOS . flush ( ) ; crcOut . close ( ) ; sessOS . close ( ) ; } }
serialize the datatree and session into the file snapshot
223
12
154,372
public static Datum sampleSystemNow ( final boolean medium , final boolean large ) { Datum d = generateCurrentSample ( ) ; if ( d == null ) return null ; historyS . addLast ( d ) ; if ( historyS . size ( ) > historySize ) historyS . removeFirst ( ) ; if ( medium ) { historyM . addLast ( d ) ; if ( historyM . size ( ) > historySize ) historyM . removeFirst ( ) ; } if ( large ) { historyL . addLast ( d ) ; if ( historyL . size ( ) > historySize ) historyL . removeFirst ( ) ; } return d ; }
Synchronously collect memory stats .
139
7
154,373
public static synchronized void asyncSampleSystemNow ( final boolean medium , final boolean large ) { // slow mode starts an async thread if ( mode == GetRSSMode . PS ) { if ( thread != null ) { if ( thread . isAlive ( ) ) return ; else thread = null ; } thread = new Thread ( new Runnable ( ) { @ Override public void run ( ) { sampleSystemNow ( medium , large ) ; } } ) ; thread . start ( ) ; } // fast mode doesn't spawn a thread else { sampleSystemNow ( medium , large ) ; } }
Fire off a thread to asynchronously collect stats .
124
11
154,374
private static synchronized void initialize ( ) { PlatformProperties pp = PlatformProperties . getPlatformProperties ( ) ; String processName = java . lang . management . ManagementFactory . getRuntimeMXBean ( ) . getName ( ) ; String pidString = processName . substring ( 0 , processName . indexOf ( ' ' ) ) ; pid = Integer . valueOf ( pidString ) ; initialized = true ; // get the RSS and other stats from scraping "ps" from the command line PSScraper . PSData psdata = PSScraper . getPSData ( pid ) ; assert ( psdata . rss > 0 ) ; // figure out how much memory this thing has memorysize = pp . ramInMegabytes ; assert ( memorysize > 0 ) ; // now try to figure out the best way to get the rss size long rss = - 1 ; // try the mac method try { rss = ExecutionEngine . nativeGetRSS ( ) ; } // This catch is broad to specifically include the UnsatisfiedLinkError that arises when // using the hsqldb backend on linux -- along with any other exceptions that might arise. // Otherwise, the hsql backend would get an annoying report to stdout // as the useless stats thread got needlessly killed. catch ( Throwable e ) { } if ( rss > 0 ) mode = GetRSSMode . MACOSX_NATIVE ; // try procfs rss = getRSSFromProcFS ( ) ; if ( rss > 0 ) mode = GetRSSMode . PROCFS ; // notify users if stats collection might be slow if ( mode == GetRSSMode . PS ) { VoltLogger logger = new VoltLogger ( "HOST" ) ; logger . warn ( "System statistics will be collected in a sub-optimal " + "manner because either procfs couldn't be read from or " + "the native library couldn't be loaded." ) ; } }
Get the process id the total memory size and determine the best way to get the RSS on an ongoing basis .
415
22
154,375
private static long getRSSFromProcFS ( ) { try { File statFile = new File ( String . format ( "/proc/%d/stat" , pid ) ) ; FileInputStream fis = new FileInputStream ( statFile ) ; try { BufferedReader r = new BufferedReader ( new InputStreamReader ( fis ) ) ; String stats = r . readLine ( ) ; String [ ] parts = stats . split ( " " ) ; return Long . parseLong ( parts [ 23 ] ) * 4 * 1024 ; } finally { fis . close ( ) ; } } catch ( Exception e ) { return - 1 ; } }
Get the RSS using the procfs . If procfs is not around this will return - 1 ;
139
20
154,376
private static synchronized Datum generateCurrentSample ( ) { // Code used to fake system statistics by tests if ( testStatsProducer != null ) { return testStatsProducer . getCurrentStatsData ( ) ; } // get this info once if ( ! initialized ) initialize ( ) ; long rss = - 1 ; switch ( mode ) { case MACOSX_NATIVE : rss = ExecutionEngine . nativeGetRSS ( ) ; break ; case PROCFS : rss = getRSSFromProcFS ( ) ; break ; case PS : rss = PSScraper . getPSData ( pid ) . rss ; break ; } // create a new Datum which adds java stats Datum d = new Datum ( rss ) ; return d ; }
Poll the operating system and generate a Datum
161
9
154,377
public static synchronized String getGoogleChartURL ( int minutes , int width , int height , String timeLabel ) { ArrayDeque < Datum > history = historyS ; if ( minutes > 2 ) history = historyM ; if ( minutes > 30 ) history = historyL ; HTMLChartHelper chart = new HTMLChartHelper ( ) ; chart . width = width ; chart . height = height ; chart . timeLabel = timeLabel ; HTMLChartHelper . DataSet Jds = new HTMLChartHelper . DataSet ( ) ; chart . data . add ( Jds ) ; Jds . title = "UsedJava" ; Jds . belowcolor = "ff9999" ; HTMLChartHelper . DataSet Rds = new HTMLChartHelper . DataSet ( ) ; chart . data . add ( Rds ) ; Rds . title = "RSS" ; Rds . belowcolor = "ff0000" ; HTMLChartHelper . DataSet RUds = new HTMLChartHelper . DataSet ( ) ; chart . data . add ( RUds ) ; RUds . title = "RSS+UnusedJava" ; RUds . dashlength = 6 ; RUds . spacelength = 3 ; RUds . thickness = 2 ; RUds . belowcolor = "ffffff" ; long cropts = System . currentTimeMillis ( ) ; cropts -= ( 60 * 1000 * minutes ) ; long modulo = ( 60 * 1000 * minutes ) / 30 ; double maxmemdatum = 0 ; for ( Datum d : history ) { if ( d . timestamp < cropts ) continue ; double javaused = d . javausedheapmem + d . javausedsysmem ; double javaunused = SystemStatsCollector . javamaxheapmem - d . javausedheapmem ; javaused /= 1204 * 1024 ; javaunused /= 1204 * 1024 ; double rss = d . rss / 1024 / 1024 ; long ts = ( d . timestamp / modulo ) * modulo ; if ( ( rss + javaunused ) > maxmemdatum ) maxmemdatum = rss + javaunused ; RUds . append ( ts , rss + javaunused ) ; Rds . append ( ts , rss ) ; Jds . append ( ts , javaused ) ; } chart . megsMax = 2 ; while ( chart . megsMax < maxmemdatum ) chart . megsMax *= 2 ; return chart . getURL ( minutes ) ; }
Get a URL that uses the Google Charts API to show a chart of memory usage history .
539
19
154,378
public static void main ( String [ ] args ) { int repeat = 1000 ; long start , duration , correct ; double per ; String processName = java . lang . management . ManagementFactory . getRuntimeMXBean ( ) . getName ( ) ; String pidString = processName . substring ( 0 , processName . indexOf ( ' ' ) ) ; pid = Integer . valueOf ( pidString ) ; // ETHAN (11/7/2018): If loading the native library does not have to succeed, why load? // org.voltdb.NativeLibraryLoader.loadVoltDB(false); // test the default fallback performance start = System . currentTimeMillis ( ) ; correct = 0 ; for ( int i = 0 ; i < repeat ; i ++ ) { long rss = PSScraper . getPSData ( pid ) . rss ; if ( rss > 0 ) correct ++ ; } duration = System . currentTimeMillis ( ) - start ; per = duration / ( double ) repeat ; System . out . printf ( "%.2f ms per \"ps\" call / %d / %d correct\n" , per , correct , repeat ) ; // test linux procfs performance start = System . currentTimeMillis ( ) ; correct = 0 ; for ( int i = 0 ; i < repeat ; i ++ ) { long rss = getRSSFromProcFS ( ) ; if ( rss > 0 ) correct ++ ; } duration = System . currentTimeMillis ( ) - start ; per = duration / ( double ) repeat ; System . out . printf ( "%.2f ms per procfs read / %d / %d correct\n" , per , correct , repeat ) ; // test mac performance start = System . currentTimeMillis ( ) ; correct = 0 ; for ( int i = 0 ; i < repeat ; i ++ ) { long rss = ExecutionEngine . nativeGetRSS ( ) ; if ( rss > 0 ) correct ++ ; } duration = System . currentTimeMillis ( ) - start ; per = duration / ( double ) repeat ; System . out . printf ( "%.2f ms per ee.nativeGetRSS call / %d / %d correct\n" , per , correct , repeat ) ; }
Manual performance testing code for getting stats .
485
9
154,379
void rollbackPartial ( Session session , int start , long timestamp ) { Object [ ] list = session . rowActionList . getArray ( ) ; int limit = session . rowActionList . size ( ) ; if ( start == limit ) { return ; } for ( int i = start ; i < limit ; i ++ ) { RowAction action = ( RowAction ) list [ i ] ; if ( action != null ) { action . rollback ( session , timestamp ) ; } else { System . out . println ( "null action in rollback " + start ) ; } } // rolled back transactions can always be merged as they have never been // seen by other sessions mergeRolledBackTransaction ( session . rowActionList . getArray ( ) , start , limit ) ; rowActionMapRemoveTransaction ( session . rowActionList . getArray ( ) , start , limit , false ) ; session . rowActionList . setSize ( start ) ; }
rollback the row actions from start index in list and the given timestamp
199
14
154,380
public boolean canRead ( Session session , Row row ) { synchronized ( row ) { RowAction action = row . rowAction ; if ( action == null ) { return true ; } return action . canRead ( session ) ; } }
functional unit - accessibility of rows
48
6
154,381
public void setTransactionInfo ( CachedObject object ) { Row row = ( Row ) object ; if ( row . rowAction != null ) { return ; } RowAction rowact = ( RowAction ) rowActionMap . get ( row . position ) ; row . rowAction = rowact ; }
add transaction info to a row just loaded from the cache . called only for CACHED tables
62
19
154,382
void mergeRolledBackTransaction ( Object [ ] list , int start , int limit ) { for ( int i = start ; i < limit ; i ++ ) { RowAction rowact = ( RowAction ) list [ i ] ; if ( rowact == null || rowact . type == RowActionBase . ACTION_NONE || rowact . type == RowActionBase . ACTION_DELETE_FINAL ) { continue ; } Row row = rowact . memoryRow ; if ( row == null ) { PersistentStore store = rowact . session . sessionData . getRowStore ( rowact . table ) ; row = ( Row ) store . get ( rowact . getPos ( ) , false ) ; } if ( row == null ) { continue ; } synchronized ( row ) { rowact . mergeRollback ( row ) ; } } // } catch (Throwable t) { // System.out.println("throw in merge"); // t.printStackTrace(); // } }
merge a given list of transaction rollback action with given timestamp
209
13
154,383
void addToCommittedQueue ( Session session , Object [ ] list ) { synchronized ( committedTransactionTimestamps ) { // add the txList according to commit timestamp committedTransactions . addLast ( list ) ; // get session commit timestamp committedTransactionTimestamps . addLast ( session . actionTimestamp ) ; /* debug 190 if (committedTransactions.size() > 64) { System.out.println("******* excessive transaction queue"); } // debug 190 */ } }
add a list of actions to the end of queue
100
10
154,384
void mergeExpiredTransactions ( Session session ) { long timestamp = getFirstLiveTransactionTimestamp ( ) ; while ( true ) { long commitTimestamp = 0 ; Object [ ] actions = null ; synchronized ( committedTransactionTimestamps ) { if ( committedTransactionTimestamps . isEmpty ( ) ) { break ; } commitTimestamp = committedTransactionTimestamps . getFirst ( ) ; if ( commitTimestamp < timestamp ) { committedTransactionTimestamps . removeFirst ( ) ; actions = ( Object [ ] ) committedTransactions . removeFirst ( ) ; } else { break ; } } mergeTransaction ( session , actions , 0 , actions . length , commitTimestamp ) ; rowActionMapRemoveTransaction ( actions , 0 , actions . length , true ) ; } }
expire all committed transactions that are no longer in scope
163
11
154,385
void endTransaction ( Session session ) { try { writeLock . lock ( ) ; long timestamp = session . transactionTimestamp ; synchronized ( liveTransactionTimestamps ) { session . isTransaction = false ; int index = liveTransactionTimestamps . indexOf ( timestamp ) ; liveTransactionTimestamps . remove ( index ) ; } mergeExpiredTransactions ( session ) ; } finally { writeLock . unlock ( ) ; } }
remove session from queue when a transaction ends and expire any committed transactions that are no longer required . remove transactions ended before the first timestamp in liveTransactionsSession queue
90
32
154,386
RowAction [ ] getRowActionList ( ) { try { writeLock . lock ( ) ; Session [ ] sessions = database . sessionManager . getAllSessions ( ) ; int [ ] tIndex = new int [ sessions . length ] ; RowAction [ ] rowActions ; int rowActionCount = 0 ; { int actioncount = 0 ; for ( int i = 0 ; i < sessions . length ; i ++ ) { actioncount += sessions [ i ] . getTransactionSize ( ) ; } rowActions = new RowAction [ actioncount ] ; } while ( true ) { boolean found = false ; long minChangeNo = Long . MAX_VALUE ; int sessionIndex = 0 ; // find the lowest available SCN across all sessions for ( int i = 0 ; i < sessions . length ; i ++ ) { int tSize = sessions [ i ] . getTransactionSize ( ) ; if ( tIndex [ i ] < tSize ) { RowAction current = ( RowAction ) sessions [ i ] . rowActionList . get ( tIndex [ i ] ) ; if ( current . actionTimestamp < minChangeNo ) { minChangeNo = current . actionTimestamp ; sessionIndex = i ; } found = true ; } } if ( ! found ) { break ; } HsqlArrayList currentList = sessions [ sessionIndex ] . rowActionList ; for ( ; tIndex [ sessionIndex ] < currentList . size ( ) ; ) { RowAction current = ( RowAction ) currentList . get ( tIndex [ sessionIndex ] ) ; // if the next change no is in this session, continue adding if ( current . actionTimestamp == minChangeNo + 1 ) { minChangeNo ++ ; } if ( current . actionTimestamp == minChangeNo ) { rowActions [ rowActionCount ++ ] = current ; tIndex [ sessionIndex ] ++ ; } else { break ; } } } return rowActions ; } finally { writeLock . unlock ( ) ; } }
Return an array of all row actions sorted by System Change No .
418
13
154,387
public DoubleIntIndex getTransactionIDList ( ) { writeLock . lock ( ) ; try { DoubleIntIndex lookup = new DoubleIntIndex ( 10 , false ) ; lookup . setKeysSearchTarget ( ) ; Iterator it = this . rowActionMap . keySet ( ) . iterator ( ) ; for ( ; it . hasNext ( ) ; ) { lookup . addUnique ( it . nextInt ( ) , 0 ) ; } return lookup ; } finally { writeLock . unlock ( ) ; } }
Return a lookup of all row ids for cached tables in transactions . For auto - defrag as currently there will be no RowAction entries at the time of defrag .
106
35
154,388
public void convertTransactionIDs ( DoubleIntIndex lookup ) { writeLock . lock ( ) ; try { RowAction [ ] list = new RowAction [ rowActionMap . size ( ) ] ; Iterator it = this . rowActionMap . values ( ) . iterator ( ) ; for ( int i = 0 ; it . hasNext ( ) ; i ++ ) { list [ i ] = ( RowAction ) it . next ( ) ; } rowActionMap . clear ( ) ; for ( int i = 0 ; i < list . length ; i ++ ) { int pos = lookup . lookupFirstEqual ( list [ i ] . getPos ( ) ) ; list [ i ] . setPos ( pos ) ; rowActionMap . put ( pos , list [ i ] ) ; } } finally { writeLock . unlock ( ) ; } }
Convert row ID s for cached table rows in transactions
176
11
154,389
@ Override protected VoltMessage instantiate_local ( byte messageType ) { // instantiate a new message instance according to the id VoltMessage message = null ; switch ( messageType ) { case INITIATE_TASK_ID : message = new InitiateTaskMessage ( ) ; break ; case INITIATE_RESPONSE_ID : message = new InitiateResponseMessage ( ) ; break ; case FRAGMENT_TASK_ID : message = new FragmentTaskMessage ( ) ; break ; case FRAGMENT_RESPONSE_ID : message = new FragmentResponseMessage ( ) ; break ; case PARTICIPANT_NOTICE_ID : message = new MultiPartitionParticipantMessage ( ) ; break ; case COALESCED_HEARTBEAT_ID : message = new CoalescedHeartbeatMessage ( ) ; break ; case COMPLETE_TRANSACTION_ID : message = new CompleteTransactionMessage ( ) ; break ; case COMPLETE_TRANSACTION_RESPONSE_ID : message = new CompleteTransactionResponseMessage ( ) ; break ; case IV2_INITIATE_TASK_ID : message = new Iv2InitiateTaskMessage ( ) ; break ; case IV2_REPAIR_LOG_REQUEST : message = new Iv2RepairLogRequestMessage ( ) ; break ; case IV2_REPAIR_LOG_RESPONSE : message = new Iv2RepairLogResponseMessage ( ) ; break ; case REJOIN_RESPONSE_ID : message = new RejoinMessage ( ) ; break ; case REJOIN_DATA_ID : message = new RejoinDataMessage ( ) ; break ; case REJOIN_DATA_ACK_ID : message = new RejoinDataAckMessage ( ) ; break ; case FRAGMENT_TASK_LOG_ID : message = new FragmentTaskLogMessage ( ) ; break ; case IV2_LOG_FAULT_ID : message = new Iv2LogFaultMessage ( ) ; break ; case IV2_EOL_ID : message = new Iv2EndOfLogMessage ( ) ; break ; case DUMP : message = new DumpMessage ( ) ; break ; case MP_REPLAY_ID : message = new MpReplayMessage ( ) ; break ; case MP_REPLAY_ACK_ID : message = new MpReplayAckMessage ( ) ; break ; case SNAPSHOT_CHECK_REQUEST_ID : message = new SnapshotCheckRequestMessage ( ) ; break ; case SNAPSHOT_CHECK_RESPONSE_ID : message = new SnapshotCheckResponseMessage ( ) ; break ; case IV2_REPAIR_LOG_TRUNCATION : message = new RepairLogTruncationMessage ( ) ; break ; case DR2_MULTIPART_TASK_ID : message = new Dr2MultipartTaskMessage ( ) ; break ; case DR2_MULTIPART_RESPONSE_ID : message = new Dr2MultipartResponseMessage ( ) ; break ; case DUMMY_TRANSACTION_TASK_ID : message = new DummyTransactionTaskMessage ( ) ; break ; case DUMMY_TRANSACTION_RESPONSE_ID : message = new DummyTransactionResponseMessage ( ) ; break ; case Migrate_Partition_Leader_MESSAGE_ID : message = new MigratePartitionLeaderMessage ( ) ; break ; case DUMP_PLAN_ID : message = new DumpPlanThenExitMessage ( ) ; break ; case FLUSH_RO_TXN_MESSAGE_ID : message = new MPBacklogFlushMessage ( ) ; break ; default : message = null ; } return message ; }
Overridden by subclasses to create message types unknown by voltcore
820
13
154,390
void clearStructures ( ) { if ( schemaManager != null ) { schemaManager . clearStructures ( ) ; } granteeManager = null ; userManager = null ; nameManager = null ; schemaManager = null ; sessionManager = null ; dbInfo = null ; }
Clears the data structuress making them elligible for garbage collection .
57
15
154,391
public Result getScript ( boolean indexRoots ) { Result r = Result . newSingleColumnResult ( "COMMAND" , Type . SQL_VARCHAR ) ; String [ ] list = getSettingsSQL ( ) ; addRows ( r , list ) ; list = getGranteeManager ( ) . getSQL ( ) ; addRows ( r , list ) ; // schemas and schema objects such as tables, sequences, etc. list = schemaManager . getSQLArray ( ) ; addRows ( r , list ) ; // index roots if ( indexRoots ) { list = schemaManager . getIndexRootsSQL ( ) ; addRows ( r , list ) ; } // user session start schema names list = getUserManager ( ) . getInitialSchemaSQL ( ) ; addRows ( r , list ) ; // grantee rights list = getGranteeManager ( ) . getRightstSQL ( ) ; addRows ( r , list ) ; list = getPropertiesSQL ( ) ; addRows ( r , list ) ; return r ; }
Returns the schema and authorisation statements for the database .
228
11
154,392
private Expression readWindowSpecification ( int tokenT , Expression aggExpr ) { SortAndSlice sortAndSlice = null ; readThis ( Tokens . OPENBRACKET ) ; List < Expression > partitionByList = new ArrayList <> ( ) ; if ( token . tokenType == Tokens . PARTITION ) { read ( ) ; readThis ( Tokens . BY ) ; while ( true ) { Expression partitionExpr = XreadValueExpression ( ) ; partitionByList . add ( partitionExpr ) ; if ( token . tokenType == Tokens . COMMA ) { read ( ) ; continue ; } break ; } } if ( token . tokenType == Tokens . ORDER ) { // order by clause read ( ) ; readThis ( Tokens . BY ) ; sortAndSlice = XreadOrderBy ( ) ; } readThis ( Tokens . CLOSEBRACKET ) ; // We don't really care about aggExpr any more. It has the // aggregate expression as a non-windowed expression. We do // care about its parameters and whether it's specified as // unique though. assert ( aggExpr == null || aggExpr instanceof ExpressionAggregate ) ; Expression nodes [ ] ; boolean isDistinct ; if ( aggExpr != null ) { ExpressionAggregate winAggExpr = ( ExpressionAggregate ) aggExpr ; nodes = winAggExpr . nodes ; isDistinct = winAggExpr . isDistinctAggregate ; } else { nodes = Expression . emptyExpressionArray ; isDistinct = false ; } ExpressionWindowed windowedExpr = new ExpressionWindowed ( tokenT , nodes , isDistinct , sortAndSlice , partitionByList ) ; return windowedExpr ; }
This is a minimal parsing of the Window Specification . We only use partition by and order by lists . There is a lot of complexity in the full SQL specification which we don t parse at all .
366
40
154,393
private ExpressionLogical XStartsWithPredicateRightPart ( Expression left ) { readThis ( Tokens . WITH ) ; if ( token . tokenType == Tokens . QUESTION ) { // handle user parameter case Expression right = XreadRowValuePredicand ( ) ; if ( left . isParam ( ) && right . isParam ( ) ) { // again make sure the left side is valid throw Error . error ( ErrorCode . X_42567 ) ; } /** In this case, we make the right parameter as the lower bound, * and the right parameter concatenating a special char (greater than any other chars) as the upper bound. * It now becomes a range scan for all the strings with right parameter as its prefix. */ Expression l = new ExpressionLogical ( OpTypes . GREATER_EQUAL , left , right ) ; Expression r = new ExpressionLogical ( OpTypes . SMALLER_EQUAL , left , new ExpressionArithmetic ( OpTypes . CONCAT , right , new ExpressionValue ( "\uffff" , Type . SQL_CHAR ) ) ) ; return new ExpressionLogical ( OpTypes . AND , l , r ) ; } else { // handle plain string value and the column Expression right = XreadStringValueExpression ( ) ; return new ExpressionStartsWith ( left , right , this . isCheckOrTriggerCondition ) ; } }
Scan the right - side string value return a STARTS WITH Expression for generating XML
291
16
154,394
Expression XreadRowValueConstructor ( ) { Expression e ; e = XreadExplicitRowValueConstructorOrNull ( ) ; if ( e != null ) { return e ; } e = XreadRowOrCommonValueExpression ( ) ; if ( e != null ) { return e ; } return XreadBooleanValueExpression ( ) ; }
ISSUE - XreadCommonValueExpression and XreadBooleanValueExpression should merge
76
19
154,395
Expression XreadExplicitRowValueConstructorOrNull ( ) { Expression e ; switch ( token . tokenType ) { case Tokens . OPENBRACKET : { read ( ) ; int position = getPosition ( ) ; int brackets = readOpenBrackets ( ) ; switch ( token . tokenType ) { case Tokens . TABLE : case Tokens . VALUES : case Tokens . SELECT : rewind ( position ) ; SubQuery sq = XreadSubqueryBody ( false , OpTypes . ROW_SUBQUERY ) ; readThis ( Tokens . CLOSEBRACKET ) ; return new Expression ( OpTypes . ROW_SUBQUERY , sq ) ; default : rewind ( position ) ; e = XreadRowElementList ( true ) ; readThis ( Tokens . CLOSEBRACKET ) ; return e ; } } case Tokens . ROW : { read ( ) ; readThis ( Tokens . OPENBRACKET ) ; e = XreadRowElementList ( false ) ; readThis ( Tokens . CLOSEBRACKET ) ; return e ; } } return null ; }
must be called in conjusnction with <parenthesized ..
231
14
154,396
private Expression readCaseWhen ( final Expression l ) { readThis ( Tokens . WHEN ) ; Expression condition = null ; if ( l == null ) { condition = XreadBooleanValueExpression ( ) ; } else { while ( true ) { Expression newCondition = XreadPredicateRightPart ( l ) ; if ( l == newCondition ) { newCondition = new ExpressionLogical ( l , XreadRowValuePredicand ( ) ) ; } if ( condition == null ) { condition = newCondition ; } else { condition = new ExpressionLogical ( OpTypes . OR , condition , newCondition ) ; } if ( token . tokenType == Tokens . COMMA ) { read ( ) ; } else { break ; } } } readThis ( Tokens . THEN ) ; Expression current = XreadValueExpression ( ) ; Expression elseExpr = null ; if ( token . tokenType == Tokens . WHEN ) { elseExpr = readCaseWhen ( l ) ; } else if ( token . tokenType == Tokens . ELSE ) { read ( ) ; elseExpr = XreadValueExpression ( ) ; readThis ( Tokens . END ) ; readIfThis ( Tokens . CASE ) ; } else { elseExpr = new ExpressionValue ( ( Object ) null , Type . SQL_ALL_TYPES ) ; readThis ( Tokens . END ) ; readIfThis ( Tokens . CASE ) ; } Expression alternatives = new ExpressionOp ( OpTypes . ALTERNATIVE , current , elseExpr ) ; Expression casewhen = new ExpressionOp ( OpTypes . CASEWHEN , condition , alternatives ) ; return casewhen ; }
Reads part of a CASE .. WHEN expression
345
9
154,397
private Expression readCaseWhenExpression ( ) { Expression l = null ; read ( ) ; readThis ( Tokens . OPENBRACKET ) ; l = XreadBooleanValueExpression ( ) ; readThis ( Tokens . COMMA ) ; Expression thenelse = XreadRowValueExpression ( ) ; readThis ( Tokens . COMMA ) ; thenelse = new ExpressionOp ( OpTypes . ALTERNATIVE , thenelse , XreadValueExpression ( ) ) ; l = new ExpressionOp ( OpTypes . CASEWHEN , l , thenelse ) ; readThis ( Tokens . CLOSEBRACKET ) ; return l ; }
reads a CASEWHEN expression
135
7
154,398
private Expression readCastExpression ( ) { boolean isConvert = token . tokenType == Tokens . CONVERT ; read ( ) ; readThis ( Tokens . OPENBRACKET ) ; Expression l = this . XreadValueExpressionOrNull ( ) ; if ( isConvert ) { readThis ( Tokens . COMMA ) ; } else { readThis ( Tokens . AS ) ; } Type typeObject = readTypeDefinition ( true ) ; if ( l . isParam ( ) ) { l . setDataType ( session , typeObject ) ; } l = new ExpressionOp ( l , typeObject ) ; readThis ( Tokens . CLOSEBRACKET ) ; return l ; }
Reads a CAST or CONVERT expression
144
9
154,399
private Expression readNullIfExpression ( ) { // turn into a CASEWHEN read ( ) ; readThis ( Tokens . OPENBRACKET ) ; Expression c = XreadValueExpression ( ) ; readThis ( Tokens . COMMA ) ; Expression thenelse = new ExpressionOp ( OpTypes . ALTERNATIVE , new ExpressionValue ( ( Object ) null , ( Type ) null ) , c ) ; c = new ExpressionLogical ( c , XreadValueExpression ( ) ) ; c = new ExpressionOp ( OpTypes . CASEWHEN , c , thenelse ) ; readThis ( Tokens . CLOSEBRACKET ) ; return c ; }
Reads a NULLIF expression
140
6