idx
int64 0
165k
| question
stringlengths 73
4.15k
| target
stringlengths 5
918
| len_question
int64 21
890
| len_target
int64 3
255
|
|---|---|---|---|---|
155,500
|
public void allocateLobForResult ( ResultLob result , InputStream inputStream ) { long resultLobId = result . getLobID ( ) ; CountdownInputStream countStream ; switch ( result . getSubType ( ) ) { case ResultLob . LobResultTypes . REQUEST_CREATE_BYTES : { long blobId ; long blobLength = result . getBlockLength ( ) ; if ( inputStream == null ) { blobId = resultLobId ; inputStream = result . getInputStream ( ) ; } else { BlobData blob = session . createBlob ( blobLength ) ; blobId = blob . getId ( ) ; resultLobs . put ( resultLobId , blobId ) ; } countStream = new CountdownInputStream ( inputStream ) ; countStream . setCount ( blobLength ) ; database . lobManager . setBytesForNewBlob ( blobId , countStream , result . getBlockLength ( ) ) ; break ; } case ResultLob . LobResultTypes . REQUEST_CREATE_CHARS : { long clobId ; long clobLength = result . getBlockLength ( ) ; if ( inputStream == null ) { clobId = resultLobId ; if ( result . getReader ( ) != null ) { inputStream = new ReaderInputStream ( result . getReader ( ) ) ; } else { inputStream = result . getInputStream ( ) ; } } else { ClobData clob = session . createClob ( clobLength ) ; clobId = clob . getId ( ) ; resultLobs . put ( resultLobId , clobId ) ; } countStream = new CountdownInputStream ( inputStream ) ; countStream . setCount ( clobLength * 2 ) ; database . lobManager . setCharsForNewClob ( clobId , countStream , result . getBlockLength ( ) ) ; break ; } } }
|
allocate storage for a new LOB
| 415
| 8
|
155,501
|
@ Override public void resolveColumnIndexes ( ) { // First, assert that our topology is sane and then // recursively resolve all child/inline column indexes IndexScanPlanNode index_scan = ( IndexScanPlanNode ) getInlinePlanNode ( PlanNodeType . INDEXSCAN ) ; assert ( m_children . size ( ) == 2 && index_scan == null ) ; for ( AbstractPlanNode child : m_children ) { child . resolveColumnIndexes ( ) ; } final NodeSchema outer_schema = m_children . get ( 0 ) . getOutputSchema ( ) ; final NodeSchema inner_schema = m_children . get ( 1 ) . getOutputSchema ( ) ; final int outerSize = outer_schema . size ( ) ; final int innerSize = inner_schema . size ( ) ; // resolve predicates resolvePredicate ( m_preJoinPredicate , outer_schema , inner_schema ) ; resolvePredicate ( m_joinPredicate , outer_schema , inner_schema ) ; resolvePredicate ( m_wherePredicate , outer_schema , inner_schema ) ; // Resolve subquery expression indexes resolveSubqueryColumnIndexes ( ) ; // Resolve TVE indexes for each schema column. for ( int i = 0 ; i < m_outputSchemaPreInlineAgg . size ( ) ; ++ i ) { SchemaColumn col = m_outputSchemaPreInlineAgg . getColumn ( i ) ; // These will all be TVEs. assert ( col . getExpression ( ) instanceof TupleValueExpression ) ; TupleValueExpression tve = ( TupleValueExpression ) col . getExpression ( ) ; int index ; if ( i < outerSize ) { index = tve . setColumnIndexUsingSchema ( outer_schema ) ; } else { index = tve . setColumnIndexUsingSchema ( inner_schema ) ; index += outerSize ; } if ( index == - 1 ) { throw new RuntimeException ( "Unable to find index for column: " + col . toString ( ) ) ; } tve . setColumnIndex ( index ) ; tve . setDifferentiator ( index ) ; } // We want the output columns to be ordered like [outer table columns][inner table columns], // and further ordered by TVE index within the left- and righthand sides. // generateOutputSchema already places outer columns on the left and inner on the right, // so we just need to order the left- and righthand sides by TVE index separately. m_outputSchemaPreInlineAgg . sortByTveIndex ( 0 , outer_schema . size ( ) ) ; m_outputSchemaPreInlineAgg . sortByTveIndex ( outer_schema . size ( ) , m_outputSchemaPreInlineAgg . size ( ) ) ; m_hasSignificantOutputSchema = true ; resolveRealOutputSchema ( ) ; }
|
order and TVE indexes for the output SchemaColumns .
| 650
| 13
|
155,502
|
public void resolveSortDirection ( ) { AbstractPlanNode outerTable = m_children . get ( 0 ) ; if ( m_joinType == JoinType . FULL ) { // Disable the usual optimizations for ordering join output by // outer table only. In case of FULL join, the unmatched inner table tuples // get appended to the end of the join's output table thus invalidating // the outer table join order. m_sortDirection = SortDirectionType . INVALID ; return ; } if ( outerTable instanceof IndexSortablePlanNode ) { m_sortDirection = ( ( IndexSortablePlanNode ) outerTable ) . indexUse ( ) . getSortOrderFromIndexScan ( ) ; } }
|
right now only consider the sort direction on the outer table
| 152
| 11
|
155,503
|
protected long discountEstimatedProcessedTupleCount ( AbstractPlanNode childNode ) { // Discount estimated processed tuple count for the outer child based on the number of // filter expressions this child has with a rapidly diminishing effect // that ranges from a discount of 0.09 (ORETATION_EQAUL) // or 0.045 (all other expression types) for one post filter to a max discount approaching // 0.888... (=8/9) for many EQUALITY filters. // The discount value is less than the partial index discount (0.1) to make sure // the index wins AbstractExpression predicate = null ; if ( childNode instanceof AbstractScanPlanNode ) { predicate = ( ( AbstractScanPlanNode ) childNode ) . getPredicate ( ) ; } else if ( childNode instanceof NestLoopPlanNode ) { predicate = ( ( NestLoopPlanNode ) childNode ) . getWherePredicate ( ) ; } else if ( childNode instanceof NestLoopIndexPlanNode ) { AbstractPlanNode inlineIndexScan = ( ( NestLoopIndexPlanNode ) childNode ) . getInlinePlanNode ( PlanNodeType . INDEXSCAN ) ; assert ( inlineIndexScan != null ) ; predicate = ( ( AbstractScanPlanNode ) inlineIndexScan ) . getPredicate ( ) ; } else { return childNode . getEstimatedProcessedTupleCount ( ) ; } if ( predicate == null ) { return childNode . getEstimatedProcessedTupleCount ( ) ; } List < AbstractExpression > predicateExprs = ExpressionUtil . uncombinePredicate ( predicate ) ; // Counters to count the number of equality and all other expressions int eqCount = 0 ; int otherCount = 0 ; final double MAX_EQ_POST_FILTER_DISCOUNT = 0.09 ; final double MAX_OTHER_POST_FILTER_DISCOUNT = 0.045 ; double discountCountFactor = 1.0 ; // Discount tuple count. for ( AbstractExpression predicateExpr : predicateExprs ) { if ( ExpressionType . COMPARE_EQUAL == predicateExpr . getExpressionType ( ) ) { discountCountFactor -= Math . pow ( MAX_EQ_POST_FILTER_DISCOUNT , ++ eqCount ) ; } else { discountCountFactor -= Math . pow ( MAX_OTHER_POST_FILTER_DISCOUNT , ++ otherCount ) ; } } return ( long ) ( childNode . getEstimatedProcessedTupleCount ( ) * discountCountFactor ) ; }
|
Discount join node child estimates based on the number of its filters
| 541
| 13
|
155,504
|
public Serializable getObject ( ) { try { return InOutUtil . deserialize ( data ) ; } catch ( Exception e ) { throw Error . error ( ErrorCode . X_22521 , e . toString ( ) ) ; } }
|
This method is called from classes implementing the JDBC interfaces . Inside the engine it is used for conversion from a value of type OTHER to another type . It will throw if the OTHER is an instance of a classe that is not available .
| 53
| 48
|
155,505
|
@ VisibleForTesting static char [ ] [ ] createReplacementArray ( Map < Character , String > map ) { checkNotNull ( map ) ; // GWT specific check (do not optimize) if ( map . isEmpty ( ) ) { return EMPTY_REPLACEMENT_ARRAY ; } char max = Collections . max ( map . keySet ( ) ) ; char [ ] [ ] replacements = new char [ max + 1 ] [ ] ; for ( char c : map . keySet ( ) ) { replacements [ c ] = map . get ( c ) . toCharArray ( ) ; } return replacements ; }
|
original character value .
| 133
| 4
|
155,506
|
public int getPrecision ( int param ) throws SQLException { checkRange ( param ) ; Type type = rmd . columnTypes [ -- param ] ; if ( type . isDateTimeType ( ) ) { return type . displaySize ( ) ; } else { long size = type . precision ; if ( size > Integer . MAX_VALUE ) { size = 0 ; } return ( int ) size ; } }
|
Retrieves the designated parameter s specified column size .
| 88
| 11
|
155,507
|
public String getParameterTypeName ( int param ) throws SQLException { checkRange ( param ) ; return rmd . columnTypes [ -- param ] . getNameString ( ) ; }
|
Retrieves the designated parameter s database - specific type name .
| 40
| 13
|
155,508
|
protected static TaskLog initializeTaskLog ( String voltroot , int pid ) { // Construct task log and start logging task messages File overflowDir = new File ( voltroot , "join_overflow" ) ; return ProClass . newInstanceOf ( "org.voltdb.rejoin.TaskLogImpl" , "Join" , ProClass . HANDLER_LOG , pid , overflowDir ) ; }
|
Load the pro task log
| 85
| 5
|
155,509
|
protected void restoreBlock ( RestoreWork rejoinWork , SiteProcedureConnection siteConnection ) { kickWatchdog ( true ) ; rejoinWork . restore ( siteConnection ) ; }
|
Received a datablock . Reset the watchdog timer and hand the block to the Site .
| 38
| 19
|
155,510
|
protected void returnToTaskQueue ( boolean sourcesReady ) { if ( sourcesReady ) { // If we've done something meaningful, go ahead and return ourselves to the queue immediately m_taskQueue . offer ( this ) ; } else { // Otherwise, avoid spinning too aggressively, so wait a millisecond before requeueing VoltDB . instance ( ) . scheduleWork ( new ReturnToTaskQueueAction ( ) , 1 , - 1 , TimeUnit . MILLISECONDS ) ; } }
|
or after waiting a few milliseconds
| 102
| 6
|
155,511
|
static void putLong ( ByteBuffer buffer , long value ) { value = ( value << 1 ) ^ ( value >> 63 ) ; if ( value >>> 7 == 0 ) { buffer . put ( ( byte ) value ) ; } else { buffer . put ( ( byte ) ( ( value & 0x7F ) | 0x80 ) ) ; if ( value >>> 14 == 0 ) { buffer . put ( ( byte ) ( value >>> 7 ) ) ; } else { buffer . put ( ( byte ) ( value >>> 7 | 0x80 ) ) ; if ( value >>> 21 == 0 ) { buffer . put ( ( byte ) ( value >>> 14 ) ) ; } else { buffer . put ( ( byte ) ( value >>> 14 | 0x80 ) ) ; if ( value >>> 28 == 0 ) { buffer . put ( ( byte ) ( value >>> 21 ) ) ; } else { buffer . put ( ( byte ) ( value >>> 21 | 0x80 ) ) ; if ( value >>> 35 == 0 ) { buffer . put ( ( byte ) ( value >>> 28 ) ) ; } else { buffer . put ( ( byte ) ( value >>> 28 | 0x80 ) ) ; if ( value >>> 42 == 0 ) { buffer . put ( ( byte ) ( value >>> 35 ) ) ; } else { buffer . put ( ( byte ) ( value >>> 35 | 0x80 ) ) ; if ( value >>> 49 == 0 ) { buffer . put ( ( byte ) ( value >>> 42 ) ) ; } else { buffer . put ( ( byte ) ( value >>> 42 | 0x80 ) ) ; if ( value >>> 56 == 0 ) { buffer . put ( ( byte ) ( value >>> 49 ) ) ; } else { buffer . put ( ( byte ) ( value >>> 49 | 0x80 ) ) ; buffer . put ( ( byte ) ( value >>> 56 ) ) ; } } } } } } } } }
|
Writes a long value to the given buffer in LEB128 ZigZag encoded format
| 403
| 18
|
155,512
|
static void putInt ( ByteBuffer buffer , int value ) { value = ( value << 1 ) ^ ( value >> 31 ) ; if ( value >>> 7 == 0 ) { buffer . put ( ( byte ) value ) ; } else { buffer . put ( ( byte ) ( ( value & 0x7F ) | 0x80 ) ) ; if ( value >>> 14 == 0 ) { buffer . put ( ( byte ) ( value >>> 7 ) ) ; } else { buffer . put ( ( byte ) ( value >>> 7 | 0x80 ) ) ; if ( value >>> 21 == 0 ) { buffer . put ( ( byte ) ( value >>> 14 ) ) ; } else { buffer . put ( ( byte ) ( value >>> 14 | 0x80 ) ) ; if ( value >>> 28 == 0 ) { buffer . put ( ( byte ) ( value >>> 21 ) ) ; } else { buffer . put ( ( byte ) ( value >>> 21 | 0x80 ) ) ; buffer . put ( ( byte ) ( value >>> 28 ) ) ; } } } } }
|
Writes an int value to the given buffer in LEB128 - 64b9B ZigZag encoded format
| 223
| 23
|
155,513
|
static long getLong ( ByteBuffer buffer ) { long v = buffer . get ( ) ; long value = v & 0x7F ; if ( ( v & 0x80 ) != 0 ) { v = buffer . get ( ) ; value |= ( v & 0x7F ) << 7 ; if ( ( v & 0x80 ) != 0 ) { v = buffer . get ( ) ; value |= ( v & 0x7F ) << 14 ; if ( ( v & 0x80 ) != 0 ) { v = buffer . get ( ) ; value |= ( v & 0x7F ) << 21 ; if ( ( v & 0x80 ) != 0 ) { v = buffer . get ( ) ; value |= ( v & 0x7F ) << 28 ; if ( ( v & 0x80 ) != 0 ) { v = buffer . get ( ) ; value |= ( v & 0x7F ) << 35 ; if ( ( v & 0x80 ) != 0 ) { v = buffer . get ( ) ; value |= ( v & 0x7F ) << 42 ; if ( ( v & 0x80 ) != 0 ) { v = buffer . get ( ) ; value |= ( v & 0x7F ) << 49 ; if ( ( v & 0x80 ) != 0 ) { v = buffer . get ( ) ; value |= v << 56 ; } } } } } } } } value = ( value >>> 1 ) ^ ( - ( value & 1 ) ) ; return value ; }
|
Read an LEB128 - 64b9B ZigZag encoded long value from the given buffer
| 331
| 20
|
155,514
|
static int getInt ( ByteBuffer buffer ) { int v = buffer . get ( ) ; int value = v & 0x7F ; if ( ( v & 0x80 ) != 0 ) { v = buffer . get ( ) ; value |= ( v & 0x7F ) << 7 ; if ( ( v & 0x80 ) != 0 ) { v = buffer . get ( ) ; value |= ( v & 0x7F ) << 14 ; if ( ( v & 0x80 ) != 0 ) { v = buffer . get ( ) ; value |= ( v & 0x7F ) << 21 ; if ( ( v & 0x80 ) != 0 ) { v = buffer . get ( ) ; value |= ( v & 0x7F ) << 28 ; } } } } value = ( value >>> 1 ) ^ ( - ( value & 1 ) ) ; return value ; }
|
Read an LEB128 - 64b9B ZigZag encoded int value from the given buffer
| 194
| 20
|
155,515
|
static public void main ( String [ ] sa ) throws IOException , TarMalformatException { if ( sa . length < 1 ) { System . out . println ( RB . singleton . getString ( RB . TARREADER_SYNTAX , TarReader . class . getName ( ) ) ) ; System . out . println ( RB . singleton . getString ( RB . LISTING_FORMAT ) ) ; System . exit ( 0 ) ; } File exDir = ( sa . length > 1 && sa [ 1 ] . startsWith ( "--directory=" ) ) ? ( new File ( sa [ 1 ] . substring ( "--directory=" . length ( ) ) ) ) : null ; int firstPatInd = ( exDir == null ) ? 2 : 3 ; if ( sa . length < firstPatInd || ( ( ! sa [ 0 ] . equals ( "t" ) ) && ! sa [ 0 ] . equals ( "x" ) ) ) { throw new IllegalArgumentException ( RB . singleton . getString ( RB . TARREADER_SYNTAXERR , TarReader . class . getName ( ) ) ) ; } String [ ] patternStrings = null ; if ( sa . length > firstPatInd ) { patternStrings = new String [ sa . length - firstPatInd ] ; for ( int i = firstPatInd ; i < sa . length ; i ++ ) { patternStrings [ i - firstPatInd ] = sa [ i ] ; } } if ( sa [ 0 ] . equals ( "t" ) && exDir != null ) { throw new IllegalArgumentException ( RB . singleton . getString ( RB . DIR_X_CONFLICT ) ) ; } int dirIndex = ( exDir == null ) ? 1 : 2 ; int tarReaderMode = sa [ 0 ] . equals ( "t" ) ? LIST_MODE : EXTRACT_MODE ; new TarReader ( new File ( sa [ dirIndex ] ) , tarReaderMode , patternStrings , null , exDir ) . read ( ) ; }
|
Reads a specified tar file or stdin in order to either list or extract the file tar entries depending on the first argument being t or x using default read buffer blocks .
| 446
| 35
|
155,516
|
public static Date getDateFromUniqueId ( long uniqueId ) { long time = uniqueId >> ( COUNTER_BITS + PARTITIONID_BITS ) ; time += VOLT_EPOCH ; return new Date ( time ) ; }
|
Given a unique id return the time of its creation by examining the embedded timestamp .
| 52
| 16
|
155,517
|
public static Object createObject ( String classname ) throws ParseException { Class < ? > cl ; try { cl = Class . forName ( classname ) ; } catch ( ClassNotFoundException cnfe ) { throw new ParseException ( "Unable to find the class: " + classname ) ; } try { return cl . newInstance ( ) ; } catch ( Exception e ) { throw new ParseException ( e . getClass ( ) . getName ( ) + "; Unable to create an instance of: " + classname ) ; } }
|
Create an Object from the classname and empty constructor .
| 119
| 11
|
155,518
|
public static Number createNumber ( String str ) throws ParseException { try { if ( str . indexOf ( ' ' ) != - 1 ) { return Double . valueOf ( str ) ; } return Long . valueOf ( str ) ; } catch ( NumberFormatException e ) { throw new ParseException ( e . getMessage ( ) ) ; } }
|
Create a number from a String . If a . is present it creates a Double otherwise a Long .
| 75
| 20
|
155,519
|
private boolean fixupACL ( List < Id > authInfo , List < ACL > acl ) { if ( skipACL ) { return true ; } if ( acl == null || acl . size ( ) == 0 ) { return false ; } Iterator < ACL > it = acl . iterator ( ) ; LinkedList < ACL > toAdd = null ; while ( it . hasNext ( ) ) { ACL a = it . next ( ) ; Id id = a . getId ( ) ; if ( id . getScheme ( ) . equals ( "world" ) && id . getId ( ) . equals ( "anyone" ) ) { // wide open } else if ( id . getScheme ( ) . equals ( "auth" ) ) { // This is the "auth" id, so we have to expand it to the // authenticated ids of the requestor it . remove ( ) ; if ( toAdd == null ) { toAdd = new LinkedList < ACL > ( ) ; } boolean authIdValid = false ; for ( Id cid : authInfo ) { AuthenticationProvider ap = ProviderRegistry . getProvider ( cid . getScheme ( ) ) ; if ( ap == null ) { LOG . error ( "Missing AuthenticationProvider for " + cid . getScheme ( ) ) ; } else if ( ap . isAuthenticated ( ) ) { authIdValid = true ; toAdd . add ( new ACL ( a . getPerms ( ) , cid ) ) ; } } if ( ! authIdValid ) { return false ; } } else { AuthenticationProvider ap = ProviderRegistry . getProvider ( id . getScheme ( ) ) ; if ( ap == null ) { return false ; } if ( ! ap . isValid ( id . getId ( ) ) ) { return false ; } } } if ( toAdd != null ) { for ( ACL a : toAdd ) { acl . add ( a ) ; } } return acl . size ( ) > 0 ; }
|
This method checks out the acl making sure it isn t null or empty it has valid schemes and ids and expanding any relative ids that depend on the requestor s authentication information .
| 432
| 38
|
155,520
|
public boolean authenticate ( ClientAuthScheme scheme , String fromAddress ) { if ( m_done ) throw new IllegalStateException ( "this authentication request has a result" ) ; boolean authenticated = false ; try { authenticated = authenticateImpl ( scheme , fromAddress ) ; } catch ( Exception ex ) { m_authenticationFailure = ex ; } finally { m_done = true ; } return authenticated ; }
|
Perform the authentication request
| 86
| 5
|
155,521
|
public long run ( String symbol , TimestampType time , long seq_number , String exchange , int bidPrice , int bidSize , int askPrice , int askSize ) throws VoltAbortException { // convert bid and ask 0 values to null Integer bidPriceSafe = askPrice > 0 ? askPrice : null ; Integer askPriceSafe = askPrice > 0 ? askPrice : null ; voltQueueSQL ( insertTick , symbol , time , seq_number , exchange , bidPriceSafe , bidSize , askPriceSafe , askSize ) ; voltQueueSQL ( upsertLastTick , symbol , time , seq_number , exchange , bidPrice , bidSize , askPrice , askSize ) ; // Queue best bid and ask selects voltQueueSQL ( selectMaxBid , symbol ) ; voltQueueSQL ( selectMinAsk , symbol ) ; // Execute queued statements VoltTable results0 [ ] = voltExecuteSQL ( ) ; // Read the best bid results VoltTable tb = results0 [ 2 ] ; tb . advanceRow ( ) ; String bex = tb . getString ( 0 ) ; long bid = tb . getLong ( 1 ) ; long bsize = tb . getLong ( 2 ) ; // Read the best ask results VoltTable ta = results0 [ 3 ] ; ta . advanceRow ( ) ; String aex = ta . getString ( 0 ) ; long ask = ta . getLong ( 1 ) ; long asize = ta . getLong ( 2 ) ; // check if the tick is part of the nbbo if ( bex . equals ( exchange ) || aex . equals ( exchange ) ) { // this new quote was the best bid or ask // insert a new NBBO record // use this quote's symbol, time and sequence number voltQueueSQL ( insertNBBO , symbol , time , seq_number , bid , bsize , bex , ask , asize , aex ) ; voltExecuteSQL ( true ) ; } return ClientResponse . SUCCESS ; }
|
main method the procedure starts here .
| 429
| 7
|
155,522
|
public static < K extends Comparable < ? > , V > Builder < K , V > builder ( ) { return new Builder < K , V > ( ) ; }
|
Returns a new builder for an immutable range map .
| 35
| 10
|
155,523
|
void setLeaderState ( boolean isLeader ) { m_isLeader = isLeader ; // The leader doesn't truncate its own SP log; if promoted, // wipe out the SP portion of the existing log. This promotion // action always happens after repair is completed. if ( m_isLeader ) { if ( ! m_logSP . isEmpty ( ) ) { truncate ( m_logSP . getLast ( ) . getHandle ( ) , IS_SP ) ; } } }
|
leaders log differently
| 103
| 3
|
155,524
|
private void truncate ( long handle , boolean isSP ) { // MIN value means no work to do, is a startup condition if ( handle == Long . MIN_VALUE ) { return ; } Deque < RepairLog . Item > deq = null ; if ( isSP ) { deq = m_logSP ; if ( m_truncationHandle < handle ) { m_truncationHandle = handle ; notifyTxnCommitInterests ( handle ) ; } } else { deq = m_logMP ; } RepairLog . Item item = null ; while ( ( item = deq . peek ( ) ) != null ) { if ( item . canTruncate ( handle ) ) { deq . poll ( ) ; } else { break ; } } }
|
trim unnecessary log messages .
| 164
| 6
|
155,525
|
public List < Iv2RepairLogResponseMessage > contents ( long requestId , boolean forMPI ) { List < Item > items = new LinkedList < Item > ( ) ; // All cases include the log of MP transactions items . addAll ( m_logMP ) ; // SP repair requests also want the SP transactions if ( ! forMPI ) { items . addAll ( m_logSP ) ; } // Contents need to be sorted in increasing spHandle order Collections . sort ( items , m_handleComparator ) ; int ofTotal = items . size ( ) + 1 ; if ( repairLogger . isDebugEnabled ( ) ) { repairLogger . debug ( "Responding with " + ofTotal + " repair log parts." ) ; } List < Iv2RepairLogResponseMessage > responses = new LinkedList < Iv2RepairLogResponseMessage > ( ) ; // this constructor sets its sequence no to 0 as ack // messages are first in the sequence Iv2RepairLogResponseMessage hheader = new Iv2RepairLogResponseMessage ( requestId , ofTotal , m_lastSpHandle , m_lastMpHandle , TheHashinator . getCurrentVersionedConfigCooked ( ) ) ; responses . add ( hheader ) ; int seq = responses . size ( ) ; // = 1, as the first sequence Iterator < Item > itemator = items . iterator ( ) ; while ( itemator . hasNext ( ) ) { Item item = itemator . next ( ) ; Iv2RepairLogResponseMessage response = new Iv2RepairLogResponseMessage ( requestId , seq ++ , ofTotal , item . getHandle ( ) , item . getTxnId ( ) , item . getMessage ( ) ) ; responses . add ( response ) ; } return responses ; }
|
produce the contents of the repair log .
| 382
| 9
|
155,526
|
public final synchronized void endFragment ( String stmtName , boolean isCoordinatorTask , boolean failed , boolean sampledStmt , long duration , int resultSize , int parameterSetSize ) { if ( stmtName == null ) { return ; } StatementStats stmtStats = m_stmtStatsMap . get ( stmtName ) ; if ( stmtStats == null ) { return ; } StatsData dataToUpdate = isCoordinatorTask ? stmtStats . m_coordinatorTask : stmtStats . m_workerTask ; // m_failureCount and m_invocations need to be updated even if the current invocation is not sampled. if ( failed ) { dataToUpdate . m_failureCount ++ ; } dataToUpdate . m_invocations ++ ; // If the current invocation is not sampled, we can stop now. // Notice that this function can be called by a FragmentTask from a multi-partition procedure. // Cannot use the isRecording() value here because SP sites can have values different from the MP Site. if ( ! sampledStmt ) { return ; } // This is a sampled invocation. // Update timings and size statistics below. if ( duration < 0 ) { if ( Math . abs ( duration ) > 1000000000 ) { log . info ( "Statement: " + stmtStats . m_stmtName + " in procedure: " + m_procName + " recorded a negative execution time larger than one second: " + duration ) ; } return ; } dataToUpdate . m_timedInvocations ++ ; // sampled timings dataToUpdate . m_totalTimedExecutionTime += duration ; dataToUpdate . m_minExecutionTime = Math . min ( duration , dataToUpdate . m_minExecutionTime ) ; dataToUpdate . m_maxExecutionTime = Math . max ( duration , dataToUpdate . m_maxExecutionTime ) ; dataToUpdate . m_incrMinExecutionTime = Math . min ( duration , dataToUpdate . m_incrMinExecutionTime ) ; dataToUpdate . m_incrMaxExecutionTime = Math . max ( duration , dataToUpdate . m_incrMaxExecutionTime ) ; // sampled size statistics dataToUpdate . m_totalResultSize += resultSize ; dataToUpdate . m_minResultSize = Math . min ( resultSize , dataToUpdate . m_minResultSize ) ; dataToUpdate . m_maxResultSize = Math . max ( resultSize , dataToUpdate . m_maxResultSize ) ; dataToUpdate . m_incrMinResultSize = Math . min ( resultSize , dataToUpdate . m_incrMinResultSize ) ; dataToUpdate . m_incrMaxResultSize = Math . max ( resultSize , dataToUpdate . m_incrMaxResultSize ) ; dataToUpdate . m_totalParameterSetSize += parameterSetSize ; dataToUpdate . m_minParameterSetSize = Math . min ( parameterSetSize , dataToUpdate . m_minParameterSetSize ) ; dataToUpdate . m_maxParameterSetSize = Math . max ( parameterSetSize , dataToUpdate . m_maxParameterSetSize ) ; dataToUpdate . m_incrMinParameterSetSize = Math . min ( parameterSetSize , dataToUpdate . m_incrMinParameterSetSize ) ; dataToUpdate . m_incrMaxParameterSetSize = Math . max ( parameterSetSize , dataToUpdate . m_incrMaxParameterSetSize ) ; }
|
This function will be called after a statement finish running . It updates the data structures to maintain the statistics .
| 761
| 21
|
155,527
|
public synchronized Session newSession ( Database db , User user , boolean readonly , boolean forLog , int timeZoneSeconds ) { Session s = new Session ( db , user , ! forLog , ! forLog , readonly , sessionIdCount , timeZoneSeconds ) ; s . isProcessingLog = forLog ; sessionMap . put ( sessionIdCount , s ) ; sessionIdCount ++ ; return s ; }
|
Binds the specified Session object into this SessionManager s active Session registry . This method is typically called internally as the final step when a successful connection has been made .
| 89
| 33
|
155,528
|
public Session getSysSessionForScript ( Database db ) { Session session = new Session ( db , db . getUserManager ( ) . getSysUser ( ) , false , false , false , 0 , 0 ) ; session . isProcessingScript = true ; return session ; }
|
Retrieves a new SYS Session .
| 58
| 9
|
155,529
|
public synchronized void closeAllSessions ( ) { // don't disconnect system user; need it to save database Session [ ] sessions = getAllSessions ( ) ; for ( int i = 0 ; i < sessions . length ; i ++ ) { sessions [ i ] . close ( ) ; } }
|
Closes all Sessions registered with this SessionManager .
| 62
| 10
|
155,530
|
public HostAndPort withDefaultPort ( int defaultPort ) { checkArgument ( isValidPort ( defaultPort ) ) ; if ( hasPort ( ) || port == defaultPort ) { return this ; } return new HostAndPort ( host , defaultPort , hasBracketlessColons ) ; }
|
Provide a default port if the parsed string contained only a host .
| 63
| 14
|
155,531
|
public Runnable writeCatalogJarToFile ( String path , String name , CatalogJarWriteMode mode ) throws IOException { File catalogFile = new VoltFile ( path , name ) ; File catalogTmpFile = new VoltFile ( path , name + ".tmp" ) ; if ( mode == CatalogJarWriteMode . CATALOG_UPDATE ) { // This means a @UpdateCore case, the asynchronous writing of // jar file has finished, rename the jar file catalogFile . delete ( ) ; catalogTmpFile . renameTo ( catalogFile ) ; return null ; } if ( mode == CatalogJarWriteMode . START_OR_RESTART ) { // This happens in the beginning of , // when the catalog jar does not yet exist. Though the contents // written might be a default one and could be overwritten later // by @UAC, @UpdateClasses, etc. return m_catalogInfo . m_jarfile . writeToFile ( catalogFile ) ; } if ( mode == CatalogJarWriteMode . RECOVER ) { // we must overwrite the file (the file may have been changed) catalogFile . delete ( ) ; if ( catalogTmpFile . exists ( ) ) { // If somehow the catalog temp jar is not cleaned up, then delete it catalogTmpFile . delete ( ) ; } return m_catalogInfo . m_jarfile . writeToFile ( catalogFile ) ; } VoltDB . crashLocalVoltDB ( "Unsupported mode to write catalog jar" , true , null ) ; return null ; }
|
Write replace or update the catalog jar based on different cases . This function assumes any IOException should lead to fatal crash .
| 326
| 24
|
155,532
|
public Class < ? > classForProcedureOrUDF ( String procedureClassName ) throws LinkageError , ExceptionInInitializerError , ClassNotFoundException { return classForProcedureOrUDF ( procedureClassName , m_catalogInfo . m_jarfile . getLoader ( ) ) ; }
|
Given a class name in the catalog jar loads it from the jar even if the jar is served from an URL and isn t in the classpath .
| 67
| 30
|
155,533
|
public DeploymentType getDeployment ( ) { if ( m_memoizedDeployment == null ) { m_memoizedDeployment = CatalogUtil . getDeployment ( new ByteArrayInputStream ( m_catalogInfo . m_deploymentBytes ) ) ; // This should NEVER happen if ( m_memoizedDeployment == null ) { VoltDB . crashLocalVoltDB ( "The internal deployment bytes are invalid. This should never occur; please contact VoltDB support with your logfiles." ) ; } } return m_memoizedDeployment ; }
|
Get the JAXB XML Deployment object which is memoized
| 124
| 13
|
155,534
|
public boolean removeAfter ( Node node ) { if ( node == null || node . next == null ) { return false ; } if ( node . next == last ) { last = node ; } node . next = node . next . next ; return true ; }
|
Removes the given node to allow removel from iterators
| 54
| 13
|
155,535
|
protected ProcedurePartitionData parseCreateProcedureClauses ( ProcedureDescriptor descriptor , String clauses ) throws VoltCompilerException { // Nothing to do if there were no clauses. // Null means there's no partition data to return. // There's also no roles to add. if ( clauses == null || clauses . isEmpty ( ) ) { return null ; } ProcedurePartitionData data = null ; Matcher matcher = SQLParser . matchAnyCreateProcedureStatementClause ( clauses ) ; int start = 0 ; while ( matcher . find ( start ) ) { start = matcher . end ( ) ; if ( matcher . group ( 1 ) != null ) { // Add roles if it's an ALLOW clause. More that one ALLOW clause is okay. for ( String roleName : StringUtils . split ( matcher . group ( 1 ) , ' ' ) ) { // Don't put the same role in the list more than once. String roleNameFixed = roleName . trim ( ) . toLowerCase ( ) ; if ( ! descriptor . m_authGroups . contains ( roleNameFixed ) ) { descriptor . m_authGroups . add ( roleNameFixed ) ; } } } else { // Add partition info if it's a PARTITION clause. Only one is allowed. if ( data != null ) { throw m_compiler . new VoltCompilerException ( "Only one PARTITION clause is allowed for CREATE PROCEDURE." ) ; } data = new ProcedurePartitionData ( matcher . group ( 2 ) , matcher . group ( 3 ) , matcher . group ( 4 ) , matcher . group ( 5 ) , matcher . group ( 6 ) , matcher . group ( 7 ) ) ; } } return data ; }
|
Parse and validate the substring containing ALLOW and PARTITION clauses for CREATE PROCEDURE .
| 376
| 21
|
155,536
|
public static void interactWithTheUser ( ) throws Exception { final SQLConsoleReader interactiveReader = new SQLConsoleReader ( new FileInputStream ( FileDescriptor . in ) , System . out ) ; interactiveReader . setBellEnabled ( false ) ; FileHistory historyFile = null ; try { // Maintain persistent history in ~/.sqlcmd_history. historyFile = new FileHistory ( new File ( System . getProperty ( "user.home" ) , ".sqlcmd_history" ) ) ; interactiveReader . setHistory ( historyFile ) ; // Make Ctrl-D (EOF) exit if on an empty line, otherwise delete the next character. KeyMap keyMap = interactiveReader . getKeys ( ) ; keyMap . bind ( new Character ( KeyMap . CTRL_D ) . toString ( ) , new ActionListener ( ) { @ Override public void actionPerformed ( ActionEvent e ) { CursorBuffer cursorBuffer = interactiveReader . getCursorBuffer ( ) ; if ( cursorBuffer . length ( ) == 0 ) { // tells caller to stop (basically a goto) throw new SQLCmdEarlyExitException ( ) ; } else { try { interactiveReader . delete ( ) ; } catch ( IOException e1 ) { } } } } ) ; getInteractiveQueries ( interactiveReader ) ; } finally { // Flush input history to a file. if ( historyFile != null ) { try { historyFile . flush ( ) ; } catch ( IOException e ) { System . err . printf ( "* Unable to write history to \"%s\" *\n" , historyFile . getFile ( ) . getPath ( ) ) ; if ( m_debug ) { e . printStackTrace ( ) ; } } } // Clean up jline2 resources. if ( interactiveReader != null ) { interactiveReader . shutdown ( ) ; } } }
|
The main loop for interactive mode .
| 395
| 7
|
155,537
|
static void executeScriptFiles ( List < FileInfo > filesInfo , SQLCommandLineReader parentLineReader , DDLParserCallback callback ) throws IOException { LineReaderAdapter adapter = null ; SQLCommandLineReader reader = null ; StringBuilder statements = new StringBuilder ( ) ; if ( ! m_interactive && callback == null ) { // We have to check for the callback to avoid spewing to System.out in the "init --classes" filtering codepath. // Better logging/output handling in general would be nice to have here -- output on System.out will be consumed // by the test generators (build_eemakefield) and cause build failures. System . out . println ( ) ; StringBuilder commandString = new StringBuilder ( ) ; commandString . append ( filesInfo . get ( 0 ) . toString ( ) ) ; for ( int ii = 1 ; ii < filesInfo . size ( ) ; ii ++ ) { commandString . append ( " " + filesInfo . get ( ii ) . getFile ( ) . toString ( ) ) ; } System . out . println ( commandString . toString ( ) ) ; } for ( int ii = 0 ; ii < filesInfo . size ( ) ; ii ++ ) { FileInfo fileInfo = filesInfo . get ( ii ) ; adapter = null ; reader = null ; if ( fileInfo . getOption ( ) == FileOption . INLINEBATCH ) { // File command is a "here document" so pass in the current // input stream. reader = parentLineReader ; } else { try { reader = adapter = new LineReaderAdapter ( new FileReader ( fileInfo . getFile ( ) ) ) ; } catch ( FileNotFoundException e ) { System . err . println ( "Script file '" + fileInfo . getFile ( ) + "' could not be found." ) ; stopOrContinue ( e ) ; return ; // continue to the next line after the FILE command } // if it is a batch option, get all contents from all the files and send it as a string if ( fileInfo . getOption ( ) == FileOption . BATCH ) { String line ; // use the current reader we obtained to read from the file // and append to existing statements while ( ( line = reader . readBatchLine ( ) ) != null ) { statements . append ( line ) . append ( "\n" ) ; } // set reader to null since we finish reading from the file reader = null ; // if it is the last file, create a reader to read from the string of all files contents if ( ii == filesInfo . size ( ) - 1 ) { String allStatements = statements . toString ( ) ; byte [ ] bytes = allStatements . getBytes ( "UTF-8" ) ; ByteArrayInputStream bais = new ByteArrayInputStream ( bytes ) ; // reader LineReaderAdapter needs an input stream reader reader = adapter = new LineReaderAdapter ( new InputStreamReader ( bais ) ) ; } // NOTE - fileInfo has the last file info for batch with multiple files } } try { executeScriptFromReader ( fileInfo , reader , callback ) ; } catch ( SQLCmdEarlyExitException e ) { throw e ; } catch ( Exception x ) { stopOrContinue ( x ) ; } finally { if ( adapter != null ) { adapter . close ( ) ; } } } }
|
Reads a script file and executes its content . Note that the script file could be an inline batch i . e . a here document that is coming from the same input stream as the file directive .
| 714
| 40
|
155,538
|
private static void printUsage ( String msg ) { System . out . print ( msg ) ; System . out . println ( "\n" ) ; m_exitCode = - 1 ; printUsage ( ) ; }
|
General application support
| 44
| 3
|
155,539
|
static void printHelp ( OutputStream prtStr ) { try { InputStream is = SQLCommand . class . getResourceAsStream ( m_readme ) ; while ( is . available ( ) > 0 ) { byte [ ] bytes = new byte [ is . available ( ) ] ; // Fix for ENG-3440 is . read ( bytes , 0 , bytes . length ) ; prtStr . write ( bytes ) ; // For JUnit test } } catch ( Exception x ) { System . err . println ( x . getMessage ( ) ) ; m_exitCode = - 1 ; return ; } }
|
Default visibility is for test purposes .
| 130
| 7
|
155,540
|
public static void main ( String args [ ] ) { System . setProperty ( "voltdb_no_logging" , "true" ) ; int exitCode = mainWithReturnCode ( args ) ; System . exit ( exitCode ) ; }
|
Application entry point
| 52
| 3
|
155,541
|
private synchronized void checkTimeout ( final long timeoutMs ) { final Entry < Integer , SendWork > oldest = m_outstandingWork . firstEntry ( ) ; if ( oldest != null ) { final long now = System . currentTimeMillis ( ) ; SendWork work = oldest . getValue ( ) ; if ( ( now - work . m_ts ) > timeoutMs ) { StreamSnapshotTimeoutException exception = new StreamSnapshotTimeoutException ( String . format ( "A snapshot write task failed after a timeout (currently %d seconds outstanding). " + "Node rejoin may need to be retried" , ( now - work . m_ts ) / 1000 ) ) ; rejoinLog . error ( exception . getMessage ( ) ) ; m_writeFailed . compareAndSet ( null , exception ) ; } } }
|
Called by the watchdog from the periodic work thread to check if the oldest unacked block is older than the timeout interval .
| 174
| 25
|
155,542
|
synchronized void clearOutstanding ( ) { if ( m_outstandingWork . isEmpty ( ) && ( m_outstandingWorkCount . get ( ) == 0 ) ) { return ; } rejoinLog . trace ( "Clearing outstanding work." ) ; for ( Entry < Integer , SendWork > e : m_outstandingWork . entrySet ( ) ) { e . getValue ( ) . discard ( ) ; } m_outstandingWork . clear ( ) ; m_outstandingWorkCount . set ( 0 ) ; }
|
Idempotent synchronized method to perform all cleanup of outstanding work so buffers aren t leaked .
| 114
| 19
|
155,543
|
@ Override public synchronized void receiveAck ( int blockIndex ) { SendWork work = m_outstandingWork . get ( blockIndex ) ; // releases the BBContainers and cleans up if ( work == null || work . m_ackCounter == null ) { rejoinLog . warn ( "Received invalid blockIndex ack for targetId " + m_targetId + " for index " + String . valueOf ( blockIndex ) + ( ( work == null ) ? " already removed the block." : " ack counter haven't been initialized." ) ) ; return ; } if ( work . receiveAck ( ) ) { rejoinLog . trace ( "Received ack for targetId " + m_targetId + " removes block for index " + String . valueOf ( blockIndex ) ) ; m_outstandingWorkCount . decrementAndGet ( ) ; m_outstandingWork . remove ( blockIndex ) ; work . discard ( ) ; } else { rejoinLog . trace ( "Received ack for targetId " + m_targetId + " decrements counter for block index " + String . valueOf ( blockIndex ) ) ; } }
|
Synchronized method to handle the arrival of an Ack .
| 249
| 12
|
155,544
|
synchronized ListenableFuture < Boolean > send ( StreamSnapshotMessageType type , int blockIndex , BBContainer chunk , boolean replicatedTable ) { SettableFuture < Boolean > sendFuture = SettableFuture . create ( ) ; rejoinLog . trace ( "Sending block " + blockIndex + " of type " + ( replicatedTable ? "REPLICATED " : "PARTITIONED " ) + type . name ( ) + " from targetId " + m_targetId + " to " + CoreUtils . hsIdToString ( m_destHSId ) + ( replicatedTable ? ", " + CoreUtils . hsIdCollectionToString ( m_otherDestHostHSIds ) : "" ) ) ; SendWork sendWork = new SendWork ( type , m_targetId , m_destHSId , replicatedTable ? m_otherDestHostHSIds : null , chunk , sendFuture ) ; m_outstandingWork . put ( blockIndex , sendWork ) ; m_outstandingWorkCount . incrementAndGet ( ) ; m_sender . offer ( sendWork ) ; return sendFuture ; }
|
Send data to the rejoining node tracking what was sent for ack tracking . Synchronized to protect access to m_outstandingWork and to keep m_outstandingWorkCount in sync with m_outstandingWork .
| 242
| 46
|
155,545
|
public static String toSchemaWithoutInlineBatches ( String schema ) { StringBuilder sb = new StringBuilder ( schema ) ; int i = sb . indexOf ( batchSpecificComments ) ; if ( i != - 1 ) { sb . delete ( i , i + batchSpecificComments . length ( ) ) ; } i = sb . indexOf ( startBatch ) ; if ( i != - 1 ) { sb . delete ( i , i + startBatch . length ( ) ) ; } i = sb . indexOf ( endBatch ) ; if ( i != - 1 ) { sb . delete ( i , i + endBatch . length ( ) ) ; } return sb . toString ( ) ; }
|
Given a schema strips out inline batch statements and associated comments .
| 159
| 12
|
155,546
|
final void shutdown ( ) throws InterruptedException { // stop the old proc call reaper m_timeoutReaperHandle . cancel ( false ) ; m_ex . shutdown ( ) ; if ( CoreUtils . isJunitTest ( ) ) { m_ex . awaitTermination ( 1 , TimeUnit . SECONDS ) ; } else { m_ex . awaitTermination ( 365 , TimeUnit . DAYS ) ; } m_network . shutdown ( ) ; if ( m_cipherService != null ) { m_cipherService . shutdown ( ) ; m_cipherService = null ; } }
|
Shutdown the VoltNetwork allowing the Ports to close and free resources like memory pools
| 130
| 16
|
155,547
|
public long getPartitionForParameter ( byte typeValue , Object value ) { if ( m_hashinator == null ) { return - 1 ; } return m_hashinator . getHashedPartitionForParameter ( typeValue , value ) ; }
|
This is used by clients such as CSVLoader which puts processing into buckets .
| 52
| 15
|
155,548
|
private void refreshPartitionKeys ( boolean topologyUpdate ) { long interval = System . currentTimeMillis ( ) - m_lastPartitionKeyFetched . get ( ) ; if ( ! m_useClientAffinity && interval < PARTITION_KEYS_INFO_REFRESH_FREQUENCY ) { return ; } try { ProcedureInvocation invocation = new ProcedureInvocation ( m_sysHandle . getAndDecrement ( ) , "@GetPartitionKeys" , "INTEGER" ) ; CountDownLatch latch = null ; if ( ! topologyUpdate ) { latch = new CountDownLatch ( 1 ) ; } PartitionUpdateCallback cb = new PartitionUpdateCallback ( latch ) ; if ( ! queue ( invocation , cb , true , System . nanoTime ( ) , USE_DEFAULT_CLIENT_TIMEOUT ) ) { m_partitionUpdateStatus . set ( new ClientResponseImpl ( ClientResponseImpl . SERVER_UNAVAILABLE , new VoltTable [ 0 ] , "Fails to queue the partition update query, please try later." ) ) ; } if ( ! topologyUpdate ) { latch . await ( ) ; } m_lastPartitionKeyFetched . set ( System . currentTimeMillis ( ) ) ; } catch ( InterruptedException | IOException e ) { m_partitionUpdateStatus . set ( new ClientResponseImpl ( ClientResponseImpl . SERVER_UNAVAILABLE , new VoltTable [ 0 ] , "Fails to fetch partition keys from server:" + e . getMessage ( ) ) ) ; } }
|
Set up partitions .
| 343
| 4
|
155,549
|
public void addSortExpressions ( List < AbstractExpression > sortExprs , List < SortDirectionType > sortDirs ) { assert ( sortExprs . size ( ) == sortDirs . size ( ) ) ; for ( int i = 0 ; i < sortExprs . size ( ) ; ++ i ) { addSortExpression ( sortExprs . get ( i ) , sortDirs . get ( i ) ) ; } }
|
Add multiple sort expressions to the order - by
| 99
| 9
|
155,550
|
public void addSortExpression ( AbstractExpression sortExpr , SortDirectionType sortDir ) { assert ( sortExpr != null ) ; // PlanNodes all need private deep copies of expressions // so that the resolveColumnIndexes results // don't get bashed by other nodes or subsequent planner runs m_sortExpressions . add ( sortExpr . clone ( ) ) ; m_sortDirections . add ( sortDir ) ; }
|
Add a sort expression to the order - by
| 94
| 9
|
155,551
|
static java . util . logging . Level getPriorityForLevel ( Level level ) { switch ( level ) { case DEBUG : return java . util . logging . Level . FINEST ; case ERROR : return java . util . logging . Level . SEVERE ; case FATAL : return java . util . logging . Level . SEVERE ; case INFO : return java . util . logging . Level . INFO ; case TRACE : return java . util . logging . Level . FINER ; case WARN : return java . util . logging . Level . WARNING ; default : return null ; } }
|
Convert the VoltLogger Level to the java . ulil . logging Level
| 122
| 16
|
155,552
|
void checkAddColumn ( ColumnSchema col ) { if ( table . isText ( ) && ! table . isEmpty ( session ) ) { throw Error . error ( ErrorCode . X_S0521 ) ; } if ( table . findColumn ( col . getName ( ) . name ) != - 1 ) { throw Error . error ( ErrorCode . X_42504 ) ; } if ( col . isPrimaryKey ( ) && table . hasPrimaryKey ( ) ) { throw Error . error ( ErrorCode . X_42530 ) ; } if ( col . isIdentity ( ) && table . hasIdentityColumn ( ) ) { throw Error . error ( ErrorCode . X_42525 ) ; } if ( ! table . isEmpty ( session ) && ! col . hasDefault ( ) && ( ! col . isNullable ( ) || col . isPrimaryKey ( ) ) && ! col . isIdentity ( ) ) { throw Error . error ( ErrorCode . X_42531 ) ; } }
|
Checks if the attributes of the Column argument c are compatible with the operation of adding such a Column to the Table argument table .
| 215
| 26
|
155,553
|
void makeNewTable ( OrderedHashSet dropConstraintSet , OrderedHashSet dropIndexSet ) { Table tn = table . moveDefinition ( session , table . tableType , null , null , null , - 1 , 0 , dropConstraintSet , dropIndexSet ) ; if ( tn . indexList . length == table . indexList . length ) { database . persistentStoreCollection . releaseStore ( tn ) ; return ; } tn . moveData ( session , table , - 1 , 0 ) ; database . persistentStoreCollection . releaseStore ( table ) ; table = tn ; }
|
Drops constriants and their indexes in table . Uses set of names .
| 130
| 16
|
155,554
|
Index addIndex ( int [ ] col , HsqlName name , boolean unique , boolean migrating ) { Index newindex ; if ( table . isEmpty ( session ) || table . isIndexingMutable ( ) ) { PersistentStore store = session . sessionData . getRowStore ( table ) ; newindex = table . createIndex ( store , name , col , null , null , unique , migrating , false , false ) ; } else { newindex = table . createIndexStructure ( name , col , null , null , unique , migrating , false , false ) ; Table tn = table . moveDefinition ( session , table . tableType , null , null , newindex , - 1 , 0 , emptySet , emptySet ) ; // for all sessions move the data tn . moveData ( session , table , - 1 , 0 ) ; database . persistentStoreCollection . releaseStore ( table ) ; table = tn ; setNewTableInSchema ( table ) ; updateConstraints ( table , emptySet ) ; } database . schemaManager . addSchemaObject ( newindex ) ; database . schemaManager . recompileDependentObjects ( table ) ; return newindex ; }
|
Because of the way indexes and column data are held in memory and on disk it is necessary to recreate the table when an index is added to a non - empty cached table .
| 251
| 35
|
155,555
|
void dropIndex ( String indexName ) { Index index ; index = table . getIndex ( indexName ) ; if ( table . isIndexingMutable ( ) ) { table . dropIndex ( session , indexName ) ; } else { OrderedHashSet indexSet = new OrderedHashSet ( ) ; indexSet . add ( table . getIndex ( indexName ) . getName ( ) ) ; Table tn = table . moveDefinition ( session , table . tableType , null , null , null , - 1 , 0 , emptySet , indexSet ) ; tn . moveData ( session , table , - 1 , 0 ) ; updateConstraints ( tn , emptySet ) ; setNewTableInSchema ( tn ) ; database . persistentStoreCollection . releaseStore ( table ) ; table = tn ; } if ( ! index . isConstraint ( ) ) { database . schemaManager . removeSchemaObject ( index . getName ( ) ) ; } database . schemaManager . recompileDependentObjects ( table ) ; }
|
Because of the way indexes and column data are held in memory and on disk it is necessary to recreate the table when an index is added to or removed from a non - empty table .
| 224
| 37
|
155,556
|
void retypeColumn ( ColumnSchema oldCol , ColumnSchema newCol ) { boolean allowed = true ; int oldType = oldCol . getDataType ( ) . typeCode ; int newType = newCol . getDataType ( ) . typeCode ; if ( ! table . isEmpty ( session ) && oldType != newType ) { allowed = newCol . getDataType ( ) . canConvertFrom ( oldCol . getDataType ( ) ) ; switch ( oldType ) { case Types . SQL_BLOB : case Types . SQL_CLOB : case Types . OTHER : case Types . JAVA_OBJECT : allowed = false ; break ; } } if ( ! allowed ) { throw Error . error ( ErrorCode . X_42561 ) ; } int colIndex = table . getColumnIndex ( oldCol . getName ( ) . name ) ; // if there is a multi-column PK, do not change the PK attributes if ( newCol . isIdentity ( ) && table . hasIdentityColumn ( ) && table . identityColumn != colIndex ) { throw Error . error ( ErrorCode . X_42525 ) ; } if ( table . getPrimaryKey ( ) . length > 1 ) { newCol . setPrimaryKey ( oldCol . isPrimaryKey ( ) ) ; if ( ArrayUtil . find ( table . getPrimaryKey ( ) , colIndex ) != - 1 ) { } } else if ( table . hasPrimaryKey ( ) ) { if ( oldCol . isPrimaryKey ( ) ) { newCol . setPrimaryKey ( true ) ; } else if ( newCol . isPrimaryKey ( ) ) { throw Error . error ( ErrorCode . X_42532 ) ; } } else if ( newCol . isPrimaryKey ( ) ) { throw Error . error ( ErrorCode . X_42530 ) ; } // apply and return if only metadata change is required boolean meta = newType == oldType ; meta &= oldCol . isNullable ( ) == newCol . isNullable ( ) ; meta &= oldCol . getDataType ( ) . scale == newCol . getDataType ( ) . scale ; meta &= ( oldCol . isIdentity ( ) == newCol . isIdentity ( ) ) ; meta &= ( oldCol . getDataType ( ) . precision == newCol . getDataType ( ) . precision || ( oldCol . getDataType ( ) . precision < newCol . getDataType ( ) . precision && ( oldType == Types . SQL_VARCHAR || oldType == Types . SQL_VARBINARY ) ) ) ; if ( meta ) { // size of some types may be increased with this command // default expressions can change oldCol . setType ( newCol ) ; oldCol . setDefaultExpression ( newCol . getDefaultExpression ( ) ) ; if ( newCol . isIdentity ( ) ) { oldCol . setIdentity ( newCol . getIdentitySequence ( ) ) ; } table . setColumnTypeVars ( colIndex ) ; table . resetDefaultsFlag ( ) ; return ; } database . schemaManager . checkColumnIsReferenced ( table . getName ( ) , table . getColumn ( colIndex ) . getName ( ) ) ; table . checkColumnInCheckConstraint ( colIndex ) ; table . checkColumnInFKConstraint ( colIndex ) ; checkConvertColDataType ( oldCol , newCol ) ; retypeColumn ( newCol , colIndex ) ; }
|
Allows changing the type or addition of an IDENTITY sequence .
| 754
| 13
|
155,557
|
void setColNullability ( ColumnSchema column , boolean nullable ) { Constraint c = null ; int colIndex = table . getColumnIndex ( column . getName ( ) . name ) ; if ( column . isNullable ( ) == nullable ) { return ; } if ( nullable ) { if ( column . isPrimaryKey ( ) ) { throw Error . error ( ErrorCode . X_42526 ) ; } table . checkColumnInFKConstraint ( colIndex , Constraint . SET_NULL ) ; removeColumnNotNullConstraints ( colIndex ) ; } else { HsqlName constName = database . nameManager . newAutoName ( "CT" , table . getSchemaName ( ) , table . getName ( ) , SchemaObject . CONSTRAINT ) ; c = new Constraint ( constName , true , null , Constraint . CHECK ) ; c . check = new ExpressionLogical ( column ) ; c . prepareCheckConstraint ( session , table , true ) ; column . setNullable ( false ) ; table . addConstraint ( c ) ; table . setColumnTypeVars ( colIndex ) ; database . schemaManager . addSchemaObject ( c ) ; } }
|
performs the work for changing the nullability of a column
| 269
| 12
|
155,558
|
void setColDefaultExpression ( int colIndex , Expression def ) { if ( def == null ) { table . checkColumnInFKConstraint ( colIndex , Constraint . SET_DEFAULT ) ; } table . setDefaultExpression ( colIndex , def ) ; }
|
performs the work for changing the default value of a column
| 60
| 12
|
155,559
|
public boolean setTableType ( Session session , int newType ) { int currentType = table . getTableType ( ) ; if ( currentType == newType ) { return false ; } switch ( newType ) { case TableBase . CACHED_TABLE : break ; case TableBase . MEMORY_TABLE : break ; default : return false ; } Table tn ; try { tn = table . moveDefinition ( session , newType , null , null , null , - 1 , 0 , emptySet , emptySet ) ; tn . moveData ( session , table , - 1 , 0 ) ; updateConstraints ( tn , emptySet ) ; } catch ( HsqlException e ) { return false ; } setNewTableInSchema ( tn ) ; database . persistentStoreCollection . releaseStore ( table ) ; table = tn ; database . schemaManager . recompileDependentObjects ( table ) ; return true ; }
|
Changes the type of a table
| 199
| 6
|
155,560
|
Index addExprIndex ( int [ ] col , Expression [ ] indexExprs , HsqlName name , boolean unique , boolean migrating , Expression predicate ) { Index newindex ; if ( table . isEmpty ( session ) || table . isIndexingMutable ( ) ) { newindex = table . createAndAddExprIndexStructure ( name , col , indexExprs , unique , migrating , false ) ; } else { newindex = table . createIndexStructure ( name , col , null , null , unique , migrating , false , false ) . withExpressions ( indexExprs ) ; Table tn = table . moveDefinition ( session , table . tableType , null , null , newindex , - 1 , 0 , emptySet , emptySet ) ; // for all sessions move the data tn . moveData ( session , table , - 1 , 0 ) ; database . persistentStoreCollection . releaseStore ( table ) ; table = tn ; setNewTableInSchema ( table ) ; updateConstraints ( table , emptySet ) ; } database . schemaManager . addSchemaObject ( newindex ) ; database . schemaManager . recompileDependentObjects ( table ) ; if ( predicate != null ) { newindex = newindex . withPredicate ( predicate ) ; } return newindex ; }
|
A VoltDB extended variant of addIndex that supports indexed generalized non - column expressions .
| 281
| 17
|
155,561
|
Index addIndex ( int [ ] col , HsqlName name , boolean unique , boolean migrating , Expression predicate ) { return addIndex ( col , name , unique , migrating ) . withPredicate ( predicate ) ; }
|
A VoltDB extended variant of addIndex that supports partial index predicate .
| 45
| 14
|
155,562
|
static public ParsedColInfo fromOrderByXml ( AbstractParsedStmt parsedStmt , VoltXMLElement orderByXml ) { // A generic adjuster that just calls finalizeValueTypes ExpressionAdjuster adjuster = new ExpressionAdjuster ( ) { @ Override public AbstractExpression adjust ( AbstractExpression expr ) { ExpressionUtil . finalizeValueTypes ( expr ) ; return expr ; } } ; return fromOrderByXml ( parsedStmt , orderByXml , adjuster ) ; }
|
Construct a ParsedColInfo from Volt XML .
| 111
| 10
|
155,563
|
static public ParsedColInfo fromOrderByXml ( AbstractParsedStmt parsedStmt , VoltXMLElement orderByXml , ExpressionAdjuster adjuster ) { // make sure everything is kosher assert ( orderByXml . name . equalsIgnoreCase ( "orderby" ) ) ; // get desc/asc String desc = orderByXml . attributes . get ( "desc" ) ; boolean descending = ( desc != null ) && ( desc . equalsIgnoreCase ( "true" ) ) ; // get the columnref or other expression inside the orderby node VoltXMLElement child = orderByXml . children . get ( 0 ) ; assert ( child != null ) ; // create the orderby column ParsedColInfo orderCol = new ParsedColInfo ( ) ; orderCol . m_orderBy = true ; orderCol . m_ascending = ! descending ; AbstractExpression orderExpr = parsedStmt . parseExpressionTree ( child ) ; assert ( orderExpr != null ) ; orderCol . m_expression = adjuster . adjust ( orderExpr ) ; // Cases: // child could be columnref, in which case it's either a normal column // or an expression. // The latter could be a case if this column came from a subquery that // was optimized out. // Just make a ParsedColInfo object for it and the planner will do the // right thing later. if ( orderExpr instanceof TupleValueExpression ) { TupleValueExpression tve = ( TupleValueExpression ) orderExpr ; orderCol . m_columnName = tve . getColumnName ( ) ; orderCol . m_tableName = tve . getTableName ( ) ; orderCol . m_tableAlias = tve . getTableAlias ( ) ; if ( orderCol . m_tableAlias == null ) { orderCol . m_tableAlias = orderCol . m_tableName ; } orderCol . m_alias = tve . getColumnAlias ( ) ; } else { String alias = child . attributes . get ( "alias" ) ; orderCol . m_alias = alias ; orderCol . m_tableName = AbstractParsedStmt . TEMP_TABLE_NAME ; orderCol . m_tableAlias = AbstractParsedStmt . TEMP_TABLE_NAME ; orderCol . m_columnName = "" ; // Replace its expression to TVE after we build the ExpressionIndexMap if ( ( child . name . equals ( "operation" ) == false ) && ( child . name . equals ( "aggregation" ) == false ) && ( child . name . equals ( "win_aggregation" ) == false ) && ( child . name . equals ( "function" ) == false ) && ( child . name . equals ( "rank" ) == false ) && ( child . name . equals ( "value" ) == false ) && ( child . name . equals ( "columnref" ) == false ) ) { throw new RuntimeException ( "ORDER BY parsed with strange child node type: " + child . name ) ; } } return orderCol ; }
|
Construct a ParsedColInfo from Volt XML . Allow caller to specify actions to finalize the parsed expression .
| 669
| 22
|
155,564
|
public SchemaColumn asSchemaColumn ( ) { String columnAlias = ( m_alias == null ) ? m_columnName : m_alias ; return new SchemaColumn ( m_tableName , m_tableAlias , m_columnName , columnAlias , m_expression , m_differentiator ) ; }
|
Return this as an instance of SchemaColumn
| 67
| 9
|
155,565
|
public static void crashVoltDB ( String reason , String traces [ ] , String filename , int lineno ) { VoltLogger hostLog = new VoltLogger ( "HOST" ) ; String fn = ( filename == null ) ? "unknown" : filename ; String re = ( reason == null ) ? "Fatal EE error." : reason ; hostLog . fatal ( re + " In " + fn + ":" + lineno ) ; if ( traces != null ) { for ( String trace : traces ) { hostLog . fatal ( trace ) ; } } VoltDB . crashLocalVoltDB ( re + " In " + fn + ":" + lineno , true , null ) ; }
|
Call VoltDB . crashVoltDB on behalf of the EE
| 148
| 13
|
155,566
|
public byte [ ] nextDependencyAsBytes ( final int dependencyId ) { final VoltTable vt = m_dependencyTracker . nextDependency ( dependencyId ) ; if ( vt != null ) { final ByteBuffer buf2 = PrivateVoltTableFactory . getTableDataReference ( vt ) ; int pos = buf2 . position ( ) ; byte [ ] bytes = new byte [ buf2 . limit ( ) - pos ] ; buf2 . get ( bytes ) ; buf2 . position ( pos ) ; return bytes ; } else { return null ; } }
|
Called from the ExecutionEngine to request serialized dependencies .
| 122
| 12
|
155,567
|
public void loadCatalog ( long timestamp , String serializedCatalog ) { try { setupProcedure ( null ) ; m_fragmentContext = FragmentContext . CATALOG_LOAD ; coreLoadCatalog ( timestamp , getStringBytes ( serializedCatalog ) ) ; } finally { m_fragmentContext = FragmentContext . UNKNOWN ; } }
|
Pass the catalog to the engine
| 76
| 6
|
155,568
|
public final void updateCatalog ( final long timestamp , final boolean isStreamUpdate , final String diffCommands ) throws EEException { try { setupProcedure ( null ) ; m_fragmentContext = FragmentContext . CATALOG_UPDATE ; coreUpdateCatalog ( timestamp , isStreamUpdate , diffCommands ) ; } finally { m_fragmentContext = FragmentContext . UNKNOWN ; } }
|
Pass diffs to apply to the EE s catalog to update it
| 87
| 13
|
155,569
|
public FastDeserializer executePlanFragments ( int numFragmentIds , long [ ] planFragmentIds , long [ ] inputDepIds , Object [ ] parameterSets , DeterminismHash determinismHash , String [ ] sqlTexts , boolean [ ] isWriteFrags , int [ ] sqlCRCs , long txnId , long spHandle , long lastCommittedSpHandle , long uniqueId , long undoQuantumToken , boolean traceOn ) throws EEException { try { // For now, re-transform undoQuantumToken to readOnly. Redundancy work in site.executePlanFragments() m_fragmentContext = ( undoQuantumToken == Long . MAX_VALUE ) ? FragmentContext . RO_BATCH : FragmentContext . RW_BATCH ; // reset context for progress updates m_sqlTexts = sqlTexts ; if ( traceOn ) { final VoltTrace . TraceEventBatch traceLog = VoltTrace . log ( VoltTrace . Category . SPSITE ) ; if ( traceLog != null ) { traceLog . add ( ( ) -> VoltTrace . beginDuration ( "execplanfragment" , "txnId" , TxnEgo . txnIdToString ( txnId ) , "partition" , Integer . toString ( m_partitionId ) ) ) ; } } FastDeserializer results = coreExecutePlanFragments ( m_currentBatchIndex , numFragmentIds , planFragmentIds , inputDepIds , parameterSets , determinismHash , isWriteFrags , sqlCRCs , txnId , spHandle , lastCommittedSpHandle , uniqueId , undoQuantumToken , traceOn ) ; if ( traceOn ) { final VoltTrace . TraceEventBatch traceLog = VoltTrace . log ( VoltTrace . Category . SPSITE ) ; if ( traceLog != null ) { traceLog . add ( VoltTrace :: endDuration ) ; } } m_plannerStats . updateEECacheStats ( m_eeCacheSize , numFragmentIds - m_cacheMisses , m_cacheMisses , m_partitionId ) ; return results ; } finally { // don't count any cache misses when there's an exception. This is a lie and they // will still be used to estimate the cache size, but it's hard to count cache hits // during an exception, so we don't count cache misses either to get the right ratio. m_cacheMisses = 0 ; m_sqlTexts = null ; m_fragmentContext = FragmentContext . UNKNOWN ; } }
|
Run multiple plan fragments
| 575
| 4
|
155,570
|
public synchronized void setFlushInterval ( long delay , long seconds ) { if ( m_flush != null ) { m_flush . cancel ( false ) ; m_flush = null ; } if ( seconds > 0 ) { m_flush = m_ses . scheduleAtFixedRate ( new Runnable ( ) { @ Override public void run ( ) { try { flush ( ) ; } catch ( Exception e ) { loaderLog . error ( "Failed to flush loader buffer, some tuples may not be inserted." , e ) ; } } } , delay , seconds , TimeUnit . SECONDS ) ; } }
|
Set periodic flush interval and initial delay in seconds .
| 133
| 10
|
155,571
|
@ Override public synchronized void close ( ) { if ( isClosed ) { return ; } rollback ( false ) ; try { database . logger . writeToLog ( this , Tokens . T_DISCONNECT ) ; } catch ( HsqlException e ) { } sessionData . closeAllNavigators ( ) ; sessionData . persistentStoreCollection . clearAllTables ( ) ; sessionData . closeResultCache ( ) ; database . compiledStatementManager . removeSession ( sessionId ) ; database . sessionManager . removeSession ( this ) ; database . closeIfLast ( ) ; database = null ; user = null ; rowActionList = null ; sessionContext . savepoints = null ; intConnection = null ; sessionContext = null ; lastIdentity = null ; isClosed = true ; }
|
Closes this Session .
| 167
| 5
|
155,572
|
public void setIsolation ( int level ) { if ( isInMidTransaction ( ) ) { throw Error . error ( ErrorCode . X_25001 ) ; } if ( level == SessionInterface . TX_READ_UNCOMMITTED ) { isReadOnly = true ; } isolationMode = level ; if ( isolationMode != isolationModeDefault ) { database . logger . writeToLog ( this , getTransactionIsolationSQL ( ) ) ; } }
|
sets ISOLATION for the next transaction only
| 96
| 9
|
155,573
|
void checkDDLWrite ( ) { checkReadWrite ( ) ; if ( isProcessingScript || isProcessingLog ) { return ; } if ( database . isFilesReadOnly ( ) ) { throw Error . error ( ErrorCode . DATABASE_IS_READONLY ) ; } }
|
This is used for creating new database objects such as tables .
| 64
| 12
|
155,574
|
void addDeleteAction ( Table table , Row row ) { // tempActionHistory.add("add delete action " + actionTimestamp); if ( abortTransaction ) { // throw Error.error(ErrorCode.X_40001); } database . txManager . addDeleteAction ( this , table , row ) ; }
|
Adds a delete action to the row and the transaction manager .
| 66
| 12
|
155,575
|
@ Override public synchronized void setAutoCommit ( boolean autocommit ) { if ( isClosed ) { return ; } if ( autocommit != isAutoCommit ) { commit ( false ) ; isAutoCommit = autocommit ; } }
|
Setter for the autocommit attribute .
| 58
| 10
|
155,576
|
@ Override public synchronized void commit ( boolean chain ) { // tempActionHistory.add("commit " + actionTimestamp); if ( isClosed ) { return ; } if ( ! isTransaction ) { isReadOnly = isReadOnlyDefault ; isolationMode = isolationModeDefault ; return ; } if ( ! database . txManager . commitTransaction ( this ) ) { // tempActionHistory.add("commit aborts " + actionTimestamp); rollback ( false ) ; throw Error . error ( ErrorCode . X_40001 ) ; } endTransaction ( true ) ; }
|
Commits any uncommited transaction this Session may have open
| 121
| 12
|
155,577
|
@ Override public synchronized void rollback ( boolean chain ) { // tempActionHistory.add("rollback " + actionTimestamp); if ( isClosed ) { return ; } if ( ! isTransaction ) { isReadOnly = isReadOnlyDefault ; isolationMode = isolationModeDefault ; return ; } try { database . logger . writeToLog ( this , Tokens . T_ROLLBACK ) ; } catch ( HsqlException e ) { } database . txManager . rollback ( this ) ; endTransaction ( false ) ; }
|
Rolls back any uncommited transaction this Session may have open .
| 113
| 14
|
155,578
|
@ Override public synchronized void savepoint ( String name ) { int index = sessionContext . savepoints . getIndex ( name ) ; if ( index != - 1 ) { sessionContext . savepoints . remove ( name ) ; sessionContext . savepointTimestamps . remove ( index ) ; } sessionContext . savepoints . add ( name , ValuePool . getInt ( rowActionList . size ( ) ) ) ; sessionContext . savepointTimestamps . addLast ( actionTimestamp ) ; try { database . logger . writeToLog ( this , getSavepointSQL ( name ) ) ; } catch ( HsqlException e ) { } }
|
Registers a transaction SAVEPOINT . A new SAVEPOINT with the name of an existing one replaces the old SAVEPOINT .
| 137
| 30
|
155,579
|
@ Override public synchronized void rollbackToSavepoint ( String name ) { if ( isClosed ) { return ; } int index = sessionContext . savepoints . getIndex ( name ) ; if ( index < 0 ) { throw Error . error ( ErrorCode . X_3B001 , name ) ; } database . txManager . rollbackSavepoint ( this , index ) ; try { database . logger . writeToLog ( this , getSavepointRollbackSQL ( name ) ) ; } catch ( HsqlException e ) { } }
|
Performs a partial transaction ROLLBACK to savepoint .
| 115
| 12
|
155,580
|
public synchronized void rollbackToSavepoint ( ) { if ( isClosed ) { return ; } String name = ( String ) sessionContext . savepoints . getKey ( 0 ) ; database . txManager . rollbackSavepoint ( this , 0 ) ; try { database . logger . writeToLog ( this , getSavepointRollbackSQL ( name ) ) ; } catch ( HsqlException e ) { } }
|
Performs a partial transaction ROLLBACK of current savepoint level .
| 88
| 14
|
155,581
|
@ Override public synchronized void releaseSavepoint ( String name ) { // remove this and all later savepoints int index = sessionContext . savepoints . getIndex ( name ) ; if ( index < 0 ) { throw Error . error ( ErrorCode . X_3B001 , name ) ; } while ( sessionContext . savepoints . size ( ) > index ) { sessionContext . savepoints . remove ( sessionContext . savepoints . size ( ) - 1 ) ; sessionContext . savepointTimestamps . removeLast ( ) ; } }
|
Releases a savepoint
| 114
| 5
|
155,582
|
public void setReadOnly ( boolean readonly ) { if ( ! readonly && database . databaseReadOnly ) { throw Error . error ( ErrorCode . DATABASE_IS_READONLY ) ; } if ( isInMidTransaction ( ) ) { throw Error . error ( ErrorCode . X_25001 ) ; } isReadOnly = readonly ; }
|
sets READ ONLY for next transaction only
| 78
| 7
|
155,583
|
private Result executeResultUpdate ( Result cmd ) { long id = cmd . getResultId ( ) ; int actionType = cmd . getActionType ( ) ; Result result = sessionData . getDataResult ( id ) ; if ( result == null ) { return Result . newErrorResult ( Error . error ( ErrorCode . X_24501 ) ) ; } Object [ ] pvals = cmd . getParameterData ( ) ; Type [ ] types = cmd . metaData . columnTypes ; StatementQuery statement = ( StatementQuery ) result . getStatement ( ) ; QueryExpression qe = statement . queryExpression ; Table baseTable = qe . getBaseTable ( ) ; int [ ] columnMap = qe . getBaseTableColumnMap ( ) ; sessionContext . rowUpdateStatement . setRowActionProperties ( actionType , baseTable , types , columnMap ) ; Result resultOut = executeCompiledStatement ( sessionContext . rowUpdateStatement , pvals ) ; return resultOut ; }
|
Retrieves the result of inserting updating or deleting a row from an updatable result .
| 209
| 18
|
155,584
|
HsqlName getSchemaHsqlName ( String name ) { return name == null ? currentSchema : database . schemaManager . getSchemaHsqlName ( name ) ; }
|
If schemaName is null return the current schema name else return the HsqlName object for the schema . If schemaName does not exist throw .
| 39
| 29
|
155,585
|
public String getSchemaName ( String name ) { return name == null ? currentSchema . name : database . schemaManager . getSchemaName ( name ) ; }
|
Same as above but return string
| 36
| 6
|
155,586
|
public Table defineLocalTable ( HsqlName tableName , HsqlName [ ] colNames , Type [ ] colTypes ) { // I'm not sure the table type, here TableBase.CACHED_TABLE, matters // all that much. assert ( localTables != null ) ; Table newTable = TableUtil . newTable ( database , TableBase . CACHED_TABLE , tableName ) ; TableUtil . setColumnsInSchemaTable ( newTable , colNames , colTypes ) ; newTable . createPrimaryKey ( new int [ 0 ] ) ; localTables . put ( tableName . name , newTable ) ; return newTable ; }
|
Define a local table with the given name column names and column types .
| 143
| 15
|
155,587
|
public void updateLocalTable ( HsqlName queryName , Type [ ] finalTypes ) { assert ( localTables != null ) ; Table tbl = getLocalTable ( queryName . name ) ; assert ( tbl != null ) ; TableUtil . updateColumnTypes ( tbl , finalTypes ) ; }
|
Update the local table with new types . This is very dubious .
| 66
| 13
|
155,588
|
void logSequences ( ) { OrderedHashSet set = sessionData . sequenceUpdateSet ; if ( set == null || set . isEmpty ( ) ) { return ; } for ( int i = 0 , size = set . size ( ) ; i < size ; i ++ ) { NumberSequence sequence = ( NumberSequence ) set . get ( i ) ; database . logger . writeSequenceStatement ( this , sequence ) ; } sessionData . sequenceUpdateSet . clear ( ) ; }
|
SEQUENCE current values
| 104
| 5
|
155,589
|
public void addLiteralSchema ( String ddlText ) throws IOException { File temp = File . createTempFile ( "literalschema" , "sql" ) ; temp . deleteOnExit ( ) ; FileWriter out = new FileWriter ( temp ) ; out . write ( ddlText ) ; out . close ( ) ; addSchema ( URLEncoder . encode ( temp . getAbsolutePath ( ) , "UTF-8" ) ) ; }
|
This is test code written by Ryan even though it was committed by John .
| 102
| 15
|
155,590
|
public void addSchema ( String schemaURL ) { try { schemaURL = URLDecoder . decode ( schemaURL , "UTF-8" ) ; } catch ( final UnsupportedEncodingException e ) { e . printStackTrace ( ) ; System . exit ( - 1 ) ; } assert ( m_schemas . contains ( schemaURL ) == false ) ; final File schemaFile = new File ( schemaURL ) ; assert ( schemaFile != null ) ; assert ( schemaFile . isDirectory ( ) == false ) ; // this check below fails in some valid cases (like when the file is in a jar) //assert schemaFile.canRead() // : "can't read file: " + schemaPath; m_schemas . add ( schemaURL ) ; }
|
Add a schema based on a URL .
| 165
| 8
|
155,591
|
private static boolean isParameterized ( VoltXMLElement elm ) { final String name = elm . name ; if ( name . equals ( "value" ) ) { return elm . getBoolAttribute ( "isparam" , false ) ; } else if ( name . equals ( "vector" ) || name . equals ( "row" ) ) { return elm . children . stream ( ) . anyMatch ( ExpressionUtil :: isParameterized ) ; } else if ( name . equals ( "columnref" ) || name . equals ( "function" ) || name . equals ( "tablesubquery" ) ) { return false ; } else { assert name . equals ( "operation" ) : "unknown VoltXMLElement type: " + name ; final ExpressionType op = mapOfVoltXMLOpType . get ( elm . attributes . get ( "optype" ) ) ; assert op != null ; switch ( op ) { case CONJUNCTION_OR : // two operators case CONJUNCTION_AND : case COMPARE_GREATERTHAN : case COMPARE_LESSTHAN : case COMPARE_EQUAL : case COMPARE_NOTEQUAL : case COMPARE_GREATERTHANOREQUALTO : case COMPARE_LESSTHANOREQUALTO : case OPERATOR_PLUS : case OPERATOR_MINUS : case OPERATOR_MULTIPLY : case OPERATOR_DIVIDE : case OPERATOR_CONCAT : case OPERATOR_MOD : case COMPARE_IN : return isParameterized ( elm . children . get ( 0 ) ) || isParameterized ( elm . children . get ( 1 ) ) ; case OPERATOR_IS_NULL : // one operator case OPERATOR_EXISTS : case OPERATOR_NOT : case OPERATOR_UNARY_MINUS : return isParameterized ( elm . children . get ( 0 ) ) ; default : assert false ; return false ; } } }
|
Helper to check if a VoltXMLElement contains parameter .
| 426
| 12
|
155,592
|
private static String getType ( Database db , VoltXMLElement elm ) { final String type = elm . getStringAttribute ( "valuetype" , "" ) ; if ( ! type . isEmpty ( ) ) { return type ; } else if ( elm . name . equals ( "columnref" ) ) { final String tblName = elm . getStringAttribute ( "table" , "" ) ; final int colIndex = elm . getIntAttribute ( "index" , 0 ) ; return StreamSupport . stream ( db . getTables ( ) . spliterator ( ) , false ) . filter ( tbl -> tbl . getTypeName ( ) . equals ( tblName ) ) . findAny ( ) . flatMap ( tbl -> StreamSupport . stream ( tbl . getColumns ( ) . spliterator ( ) , false ) . filter ( col -> col . getIndex ( ) == colIndex ) . findAny ( ) ) . map ( Column :: getType ) . map ( typ -> VoltType . get ( ( byte ) ( ( int ) typ ) ) . getName ( ) ) . orElse ( "" ) ; } else { return "" ; } }
|
Get the underlying type of the VoltXMLElement node . Need reference to the catalog for PVE
| 255
| 20
|
155,593
|
private static String guessParameterType ( Database db , VoltXMLElement elm ) { if ( ! isParameterized ( elm ) || ! elm . name . equals ( "operation" ) ) { return "" ; } else { final ExpressionType op = mapOfVoltXMLOpType . get ( elm . attributes . get ( "optype" ) ) ; assert op != null ; switch ( op ) { case CONJUNCTION_OR : case CONJUNCTION_AND : case OPERATOR_NOT : return "boolean" ; case COMPARE_GREATERTHAN : // For these 2 operator-ops, the type is what the non-parameterized part gets set to. case COMPARE_LESSTHAN : case COMPARE_EQUAL : case COMPARE_NOTEQUAL : case COMPARE_GREATERTHANOREQUALTO : case COMPARE_LESSTHANOREQUALTO : case OPERATOR_PLUS : case OPERATOR_MINUS : case OPERATOR_MULTIPLY : case OPERATOR_DIVIDE : case OPERATOR_CONCAT : case OPERATOR_MOD : case COMPARE_IN : final VoltXMLElement left = elm . children . get ( 0 ) , right = elm . children . get ( 1 ) ; return isParameterized ( left ) ? getType ( db , right ) : getType ( db , left ) ; case OPERATOR_UNARY_MINUS : return "integer" ; case OPERATOR_IS_NULL : case OPERATOR_EXISTS : return "" ; default : assert false ; return "" ; } } }
|
Guess from a parent node what are the parameter type of its child node should one of its child node contain parameter .
| 350
| 24
|
155,594
|
public static boolean reduce ( AbstractExpression expr , Predicate < AbstractExpression > pred ) { final boolean current = pred . test ( expr ) ; if ( current ) { return true ; } else if ( expr == null ) { return pred . test ( null ) ; } else { return pred . test ( expr . getLeft ( ) ) || pred . test ( expr . getRight ( ) ) || expr . getArgs ( ) != null && expr . getArgs ( ) . stream ( ) . anyMatch ( pred ) ; } }
|
Check if any node of given expression tree satisfies given predicate
| 112
| 11
|
155,595
|
public static Collection < AbstractExpression > uncombineAny ( AbstractExpression expr ) { ArrayDeque < AbstractExpression > out = new ArrayDeque < AbstractExpression > ( ) ; if ( expr != null ) { ArrayDeque < AbstractExpression > in = new ArrayDeque < AbstractExpression > ( ) ; // this chunk of code breaks the code into a list of expression that // all have to be true for the where clause to be true in . add ( expr ) ; AbstractExpression inExpr = null ; while ( ( inExpr = in . poll ( ) ) != null ) { if ( inExpr . getExpressionType ( ) == ExpressionType . CONJUNCTION_AND ) { in . add ( inExpr . getLeft ( ) ) ; in . add ( inExpr . getRight ( ) ) ; } else { out . add ( inExpr ) ; } } } return out ; }
|
Convert one or more predicates potentially in an arbitrarily nested conjunction tree into a flattened collection . Similar to uncombine but for arbitrary tree shapes and with no guarantee of the result collection type or of any ordering within the collection . In fact it currently fills an ArrayDeque via a left = to - right breadth first traversal but for no particular reason so that s all subject to change .
| 202
| 79
|
155,596
|
public static List < TupleValueExpression > getTupleValueExpressions ( AbstractExpression input ) { ArrayList < TupleValueExpression > tves = new ArrayList < TupleValueExpression > ( ) ; // recursive stopping steps if ( input == null ) { return tves ; } else if ( input instanceof TupleValueExpression ) { tves . add ( ( TupleValueExpression ) input ) ; return tves ; } // recursive calls tves . addAll ( getTupleValueExpressions ( input . m_left ) ) ; tves . addAll ( getTupleValueExpressions ( input . m_right ) ) ; if ( input . m_args != null ) { for ( AbstractExpression argument : input . m_args ) { tves . addAll ( getTupleValueExpressions ( argument ) ) ; } } return tves ; }
|
Recursively walk an expression and return a list of all the tuple value expressions it contains .
| 191
| 19
|
155,597
|
private static boolean subqueryRequiresScalarValueExpressionFromContext ( AbstractExpression parentExpr ) { if ( parentExpr == null ) { // No context: we are a top-level expression. E.g, an item on the // select list. In this case, assume the expression must be scalar. return true ; } // Exists and comparison operators can handle non-scalar subqueries. if ( parentExpr . getExpressionType ( ) == ExpressionType . OPERATOR_EXISTS || parentExpr instanceof ComparisonExpression ) { return false ; } // There is already a ScalarValueExpression above the subquery. if ( parentExpr instanceof ScalarValueExpression ) { return false ; } // By default, assume that the subquery must produce a single value. return true ; }
|
Return true if we must insert a ScalarValueExpression between a subquery and its parent expression .
| 178
| 21
|
155,598
|
private static AbstractExpression addScalarValueExpression ( SelectSubqueryExpression expr ) { if ( expr . getSubqueryScan ( ) . getOutputSchema ( ) . size ( ) != 1 ) { throw new PlanningErrorException ( "Scalar subquery can have only one output column" ) ; } expr . changeToScalarExprType ( ) ; AbstractExpression scalarExpr = new ScalarValueExpression ( ) ; scalarExpr . setLeft ( expr ) ; scalarExpr . setValueType ( expr . getValueType ( ) ) ; scalarExpr . setValueSize ( expr . getValueSize ( ) ) ; return scalarExpr ; }
|
Add a ScalarValueExpression on top of the SubqueryExpression
| 152
| 15
|
155,599
|
public ClientResponseImpl shouldAccept ( String name , AuthSystem . AuthUser user , final StoredProcedureInvocation task , final Procedure catProc ) { if ( user . isAuthEnabled ( ) ) { InvocationPermissionPolicy deniedPolicy = null ; InvocationPermissionPolicy . PolicyResult res = InvocationPermissionPolicy . PolicyResult . DENY ; for ( InvocationPermissionPolicy policy : m_permissionpolicies ) { res = policy . shouldAccept ( user , task , catProc ) ; if ( res == InvocationPermissionPolicy . PolicyResult . ALLOW ) { deniedPolicy = null ; break ; } if ( res == InvocationPermissionPolicy . PolicyResult . DENY ) { if ( deniedPolicy == null ) { //Take first denied response only. deniedPolicy = policy ; } } } if ( deniedPolicy != null ) { return deniedPolicy . getErrorResponse ( user , task , catProc ) ; } //We must have an explicit allow on of the policy must grant access. assert ( res == InvocationPermissionPolicy . PolicyResult . ALLOW ) ; return null ; } //User authentication is disabled. (auth disabled user) return null ; }
|
For auth disabled user the first policy will return ALLOW breaking the loop .
| 253
| 15
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.