idx
int64 0
165k
| question
stringlengths 73
4.15k
| target
stringlengths 5
918
| len_question
int64 21
890
| len_target
int64 3
255
|
|---|---|---|---|---|
154,700
|
private static < E > ListIterator < E > constrainedListIterator ( ListIterator < E > listIterator , Constraint < ? super E > constraint ) { return new ConstrainedListIterator < E > ( listIterator , constraint ) ; }
|
Returns a constrained view of the specified list iterator using the specified constraint . Any operations that would add new elements to the underlying list will be verified by the constraint .
| 51
| 32
|
154,701
|
public final Index createIndex ( PersistentStore store , HsqlName name , int [ ] columns , boolean [ ] descending , boolean [ ] nullsLast , boolean unique , boolean migrating , boolean constraint , boolean forward ) { Index newIndex = createAndAddIndexStructure ( name , columns , descending , nullsLast , unique , migrating , constraint , forward ) ; return newIndex ; }
|
Create new memory - resident index . For MEMORY and TEXT tables .
| 81
| 14
|
154,702
|
public Type getCombinedType ( Type other , int operation ) { if ( operation != OpTypes . CONCAT ) { return getAggregateType ( other ) ; } Type newType ; long newPrecision = precision + other . precision ; switch ( other . typeCode ) { case Types . SQL_ALL_TYPES : return this ; case Types . SQL_BIT : newType = this ; break ; case Types . SQL_BIT_VARYING : newType = other ; break ; case Types . SQL_BINARY : case Types . SQL_VARBINARY : case Types . SQL_BLOB : return other . getCombinedType ( this , operation ) ; default : throw Error . error ( ErrorCode . X_42562 ) ; } if ( newPrecision > maxBitPrecision ) { if ( typeCode == Types . SQL_BIT ) { // Standard disallows type length reduction throw Error . error ( ErrorCode . X_42570 ) ; } newPrecision = maxBitPrecision ; } return getBitType ( newType . typeCode , newPrecision ) ; }
|
Returns type for concat
| 234
| 5
|
154,703
|
public VoltTable [ ] run ( SystemProcedureExecutionContext ctx ) { // Choose the lowest site ID on this host to actually flip the bit if ( ctx . isLowestSiteId ( ) ) { VoltDBInterface voltdb = VoltDB . instance ( ) ; OperationMode opMode = voltdb . getMode ( ) ; if ( LOG . isDebugEnabled ( ) ) { LOG . debug ( "voltdb opmode is " + opMode ) ; } ZooKeeper zk = voltdb . getHostMessenger ( ) . getZK ( ) ; try { Stat stat ; OperationMode zkMode = null ; Code code ; do { stat = new Stat ( ) ; code = Code . BADVERSION ; try { byte [ ] data = zk . getData ( VoltZK . operationMode , false , stat ) ; if ( LOG . isDebugEnabled ( ) ) { LOG . debug ( "zkMode is " + ( zkMode == null ? "(null)" : OperationMode . valueOf ( data ) ) ) ; } zkMode = data == null ? opMode : OperationMode . valueOf ( data ) ; if ( zkMode == PAUSED ) { if ( LOG . isDebugEnabled ( ) ) { LOG . debug ( "read node at version " + stat . getVersion ( ) + ", txn " + ll ( stat . getMzxid ( ) ) ) ; } break ; } stat = zk . setData ( VoltZK . operationMode , PAUSED . getBytes ( ) , stat . getVersion ( ) ) ; code = Code . OK ; zkMode = PAUSED ; if ( LOG . isDebugEnabled ( ) ) { LOG . debug ( "!WROTE! node at version " + stat . getVersion ( ) + ", txn " + ll ( stat . getMzxid ( ) ) ) ; } break ; } catch ( BadVersionException ex ) { code = ex . code ( ) ; } } while ( zkMode != PAUSED && code == Code . BADVERSION ) ; m_stat = stat ; voltdb . getHostMessenger ( ) . pause ( ) ; voltdb . setMode ( PAUSED ) ; // for snmp SnmpTrapSender snmp = voltdb . getSnmpTrapSender ( ) ; if ( snmp != null ) { snmp . pause ( "Cluster paused." ) ; } } catch ( Exception e ) { throw new RuntimeException ( e ) ; } } // Force a tick so that stats will be updated. // Primarily added to get latest table stats for DR pause and empty db check. ctx . getSiteProcedureConnection ( ) . tick ( ) ; VoltTable t = new VoltTable ( VoltSystemProcedure . STATUS_SCHEMA ) ; t . addRow ( VoltSystemProcedure . STATUS_OK ) ; return ( new VoltTable [ ] { t } ) ; }
|
Enter admin mode
| 639
| 3
|
154,704
|
@ Override public void setGeneratedColumnInfo ( int generate , ResultMetaData meta ) { // can support INSERT_SELECT also if ( type != StatementTypes . INSERT ) { return ; } int colIndex = baseTable . getIdentityColumnIndex ( ) ; if ( colIndex == - 1 ) { return ; } switch ( generate ) { case ResultConstants . RETURN_NO_GENERATED_KEYS : return ; case ResultConstants . RETURN_GENERATED_KEYS_COL_INDEXES : int [ ] columnIndexes = meta . getGeneratedColumnIndexes ( ) ; if ( columnIndexes . length != 1 ) { return ; } if ( columnIndexes [ 0 ] != colIndex ) { return ; } // $FALL-THROUGH$ case ResultConstants . RETURN_GENERATED_KEYS : generatedIndexes = new int [ ] { colIndex } ; break ; case ResultConstants . RETURN_GENERATED_KEYS_COL_NAMES : String [ ] columnNames = meta . getGeneratedColumnNames ( ) ; if ( columnNames . length != 1 ) { return ; } if ( baseTable . findColumn ( columnNames [ 0 ] ) != colIndex ) { return ; } generatedIndexes = new int [ ] { colIndex } ; break ; } generatedResultMetaData = ResultMetaData . newResultMetaData ( generatedIndexes . length ) ; for ( int i = 0 ; i < generatedIndexes . length ; i ++ ) { ColumnSchema column = baseTable . getColumn ( generatedIndexes [ i ] ) ; generatedResultMetaData . columns [ i ] = column ; } generatedResultMetaData . prepareData ( ) ; }
|
For the creation of the statement
| 371
| 6
|
154,705
|
void checkAccessRights ( Session session ) { if ( targetTable != null && ! targetTable . isTemp ( ) ) { targetTable . checkDataReadOnly ( ) ; session . checkReadWrite ( ) ; } if ( session . isAdmin ( ) ) { return ; } for ( int i = 0 ; i < sequences . length ; i ++ ) { session . getGrantee ( ) . checkAccess ( sequences [ i ] ) ; } for ( int i = 0 ; i < routines . length ; i ++ ) { if ( routines [ i ] . isLibraryRoutine ( ) ) { continue ; } session . getGrantee ( ) . checkAccess ( routines [ i ] ) ; } for ( int i = 0 ; i < rangeVariables . length ; i ++ ) { RangeVariable range = rangeVariables [ i ] ; if ( range . rangeTable . getSchemaName ( ) == SqlInvariants . SYSTEM_SCHEMA_HSQLNAME ) { continue ; } session . getGrantee ( ) . checkSelect ( range . rangeTable , range . usedColumns ) ; } switch ( type ) { case StatementTypes . CALL : { break ; } case StatementTypes . INSERT : { session . getGrantee ( ) . checkInsert ( targetTable , insertCheckColumns ) ; break ; } case StatementTypes . SELECT_CURSOR : break ; case StatementTypes . DELETE_WHERE : { session . getGrantee ( ) . checkDelete ( targetTable ) ; break ; } case StatementTypes . UPDATE_WHERE : { session . getGrantee ( ) . checkUpdate ( targetTable , updateCheckColumns ) ; break ; } case StatementTypes . MERGE : { session . getGrantee ( ) . checkInsert ( targetTable , insertCheckColumns ) ; session . getGrantee ( ) . checkUpdate ( targetTable , updateCheckColumns ) ; break ; } } }
|
Determines if the authorizations are adequate to execute the compiled object . Completion requires the list of all database objects in a compiled statement .
| 408
| 29
|
154,706
|
@ Override public ResultMetaData getResultMetaData ( ) { switch ( type ) { case StatementTypes . DELETE_WHERE : case StatementTypes . INSERT : case StatementTypes . UPDATE_WHERE : case StatementTypes . MIGRATE_WHERE : return ResultMetaData . emptyResultMetaData ; default : throw Error . runtimeError ( ErrorCode . U_S0500 , "CompiledStatement.getResultMetaData()" ) ; } }
|
Returns the metadata which is empty if the CompiledStatement does not generate a Result .
| 97
| 17
|
154,707
|
@ Override public String describe ( Session session ) { try { return describeImpl ( session ) ; } catch ( Exception e ) { e . printStackTrace ( ) ; return e . toString ( ) ; } }
|
Retrieves a String representation of this object .
| 46
| 10
|
154,708
|
static boolean isGroupByColumn ( QuerySpecification select , int index ) { if ( ! select . isGrouped ) { return false ; } for ( int ii = 0 ; ii < select . groupIndex . getColumnCount ( ) ; ii ++ ) { if ( index == select . groupIndex . getColumns ( ) [ ii ] ) { return true ; } } return false ; }
|
Returns true if the specified exprColumn index is in the list of column indices specified by groupIndex
| 82
| 19
|
154,709
|
private static List < Expression > getDisplayColumnsForSetOp ( QueryExpression queryExpr ) { assert ( queryExpr != null ) ; if ( queryExpr . getLeftQueryExpression ( ) == null ) { // end of recursion. This is a QuerySpecification assert ( queryExpr instanceof QuerySpecification ) ; QuerySpecification select = ( QuerySpecification ) queryExpr ; return select . displayCols ; } else { // recurse return getDisplayColumnsForSetOp ( queryExpr . getLeftQueryExpression ( ) ) ; } }
|
Return a list of the display columns for the left most statement from a set op
| 123
| 16
|
154,710
|
protected static List < VoltXMLElement > voltGetLimitOffsetXMLFromSortAndSlice ( Session session , SortAndSlice sortAndSlice ) throws HSQLParseException { List < VoltXMLElement > result = new ArrayList <> ( ) ; if ( sortAndSlice == null || sortAndSlice == SortAndSlice . noSort ) { return result ; } if ( sortAndSlice . limitCondition != null ) { Expression limitCondition = sortAndSlice . limitCondition ; if ( limitCondition . nodes . length != 2 ) { throw new HSQLParseException ( "Parser did not create limit and offset expression for LIMIT." ) ; } try { // read offset. it may be a parameter token. VoltXMLElement offset = new VoltXMLElement ( "offset" ) ; Expression offsetExpr = limitCondition . getLeftNode ( ) ; if ( offsetExpr . isParam == false ) { Integer offsetValue = ( Integer ) offsetExpr . getValue ( session ) ; if ( offsetValue > 0 ) { Expression expr = new ExpressionValue ( offsetValue , org . hsqldb_voltpatches . types . Type . SQL_BIGINT ) ; offset . children . add ( expr . voltGetXML ( session ) ) ; offset . attributes . put ( "offset" , offsetValue . toString ( ) ) ; } } else { offset . attributes . put ( "offset_paramid" , offsetExpr . getUniqueId ( session ) ) ; } result . add ( offset ) ; // Limit may be null (offset with no limit), or // it may be a parameter Expression limitExpr = limitCondition . getRightNode ( ) ; if ( limitExpr != null ) { VoltXMLElement limit = new VoltXMLElement ( "limit" ) ; if ( limitExpr . isParam == false ) { Integer limitValue = ( Integer ) limitExpr . getValue ( session ) ; Expression expr = new ExpressionValue ( limitValue , org . hsqldb_voltpatches . types . Type . SQL_BIGINT ) ; limit . children . add ( expr . voltGetXML ( session ) ) ; limit . attributes . put ( "limit" , limitValue . toString ( ) ) ; } else { limit . attributes . put ( "limit_paramid" , limitExpr . getUniqueId ( session ) ) ; } result . add ( limit ) ; } } catch ( HsqlException ex ) { // XXX really? ex . printStackTrace ( ) ; } } return result ; }
|
return a list of VoltXMLElements that need to be added to the statement XML for LIMIT and OFFSET
| 551
| 24
|
154,711
|
public static Object getObjectFromString ( VoltType type , String value ) throws ParseException { Object ret = null ; switch ( type ) { // NOTE: All runtime integer parameters are actually Longs,so we will have problems // if we actually try to convert the object to one of the smaller numeric sizes // -------------------------------- // INTEGERS // -------------------------------- case TINYINT : //ret = Byte.valueOf(value); //break; case SMALLINT : //ret = Short.valueOf(value); //break; case INTEGER : //ret = Integer.valueOf(value); //break; case BIGINT : ret = Long . valueOf ( value ) ; break ; // -------------------------------- // FLOATS // -------------------------------- case FLOAT : ret = Double . valueOf ( value ) ; break ; // -------------------------------- // STRINGS // -------------------------------- case STRING : ret = value ; break ; case DECIMAL : case VARBINARY : if ( value != null ) { throw new RuntimeException ( "Only NULL default values for DECIMAL " + "and VARBINARY columns are supported right now" ) ; } break ; // -------------------------------- // TIMESTAMP // -------------------------------- case TIMESTAMP : { // Support either long values (microseconds since epoch) or timestamp strings. try { // Try to parse it as a long first. ret = new TimestampType ( Long . parseLong ( value ) ) ; } catch ( NumberFormatException e ) { // It failed to parse as a long - parse it as a timestamp string. Date date = new SimpleDateFormat ( "EEE MMM dd HH:mm:ss zzz yyyy" ) . parse ( value ) ; ret = new TimestampType ( date . getTime ( ) * 1000 ) ; } break ; } // -------------------------------- // INVALID // -------------------------------- default : LOG . severe ( "ERROR: Unable to get object from string for invalid ValueType '" + type + "'" ) ; } return ( ret ) ; }
|
Returns a casted object of the input value string based on the given type
| 412
| 15
|
154,712
|
public static VoltType getNumericLiteralType ( VoltType vt , String value ) { try { Long . parseLong ( value ) ; } catch ( NumberFormatException e ) { // Our DECIMAL may not be bigger/smaller enough to store the constant value return VoltType . DECIMAL ; } return vt ; }
|
If the type is NUMERIC from hsqldb VoltDB has to decide its real type . It s either INTEGER or DECIMAL according to the SQL Standard . Thanks for Hsqldb 1 . 9 FLOAT literal values have been handled well with E sign .
| 72
| 59
|
154,713
|
private static void writeLength ( ByteBuffer buf , int length ) { assert ( length >= 0 ) ; assert ( length == ( length & ( ~ length + 1 ) ) ) ; // check if power of two // shockingly fast log_2 uses intrinsics in JDK >= 1.7 byte log2size = ( byte ) ( 32 - Integer . numberOfLeadingZeros ( length ) ) ; buf . put ( log2size ) ; }
|
Over - clever way to store lots of lengths in a single byte Length must be a power of two > = 0 .
| 93
| 24
|
154,714
|
public OpsAgent getAgent ( OpsSelector selector ) { OpsAgent agent = m_agents . get ( selector ) ; assert ( agent != null ) ; return agent ; }
|
Return the OpsAgent for the specified selector .
| 36
| 9
|
154,715
|
public void shutdown ( ) { for ( Entry < OpsSelector , OpsAgent > entry : m_agents . entrySet ( ) ) { try { entry . getValue ( ) . shutdown ( ) ; } catch ( InterruptedException e ) { } } m_agents . clear ( ) ; }
|
Shutdown all the OpsAgent s executor services . Should be possible to eventually consolidate all of them into a single executor service .
| 62
| 27
|
154,716
|
public String add ( ProcedureDescriptor descriptor ) throws VoltCompilerException { assert descriptor != null ; String className = descriptor . m_className ; assert className != null && ! className . trim ( ) . isEmpty ( ) ; String shortName = deriveShortProcedureName ( className ) ; if ( m_procedureMap . containsKey ( shortName ) ) { throw m_compiler . new VoltCompilerException ( String . format ( "Procedure \"%s\" is already defined" , className ) ) ; } m_procedureMap . put ( shortName , descriptor ) ; return shortName ; }
|
Tracks the given procedure descriptor if it is not already tracked
| 138
| 12
|
154,717
|
public void removeProcedure ( String procName , boolean ifExists ) throws VoltCompilerException { assert procName != null && ! procName . trim ( ) . isEmpty ( ) ; String shortName = deriveShortProcedureName ( procName ) ; if ( m_procedureMap . containsKey ( shortName ) ) { m_procedureMap . remove ( shortName ) ; } else if ( ! ifExists ) { throw m_compiler . new VoltCompilerException ( String . format ( "Dropped Procedure \"%s\" is not defined" , procName ) ) ; } }
|
Searches for and removes the Procedure provided in prior DDL statements
| 132
| 14
|
154,718
|
public void addProcedurePartitionInfoTo ( String procedureName , ProcedurePartitionData data ) throws VoltCompilerException { ProcedureDescriptor descriptor = m_procedureMap . get ( procedureName ) ; if ( descriptor == null ) { throw m_compiler . new VoltCompilerException ( String . format ( "Partition references an undefined procedure \"%s\"" , procedureName ) ) ; } // need to re-instantiate as descriptor fields are final if ( descriptor . m_stmtLiterals == null ) { // the longer form constructor asserts on singleStatement descriptor = m_compiler . new ProcedureDescriptor ( descriptor . m_authGroups , descriptor . m_class , data ) ; } else { descriptor = m_compiler . new ProcedureDescriptor ( descriptor . m_authGroups , descriptor . m_className , descriptor . m_stmtLiterals , descriptor . m_joinOrder , data , false , descriptor . m_class ) ; } m_procedureMap . put ( procedureName , descriptor ) ; }
|
Associates the given partition info to the given tracked procedure
| 229
| 12
|
154,719
|
void addExportedTable ( String tableName , String targetName , boolean isStream ) { assert tableName != null && ! tableName . trim ( ) . isEmpty ( ) ; assert targetName != null && ! targetName . trim ( ) . isEmpty ( ) ; // store uppercase in the catalog as typename targetName = targetName . toUpperCase ( ) ; if ( isStream ) { // insert the table's name into the export group NavigableSet < String > tableGroup = m_exportsByTargetName . get ( targetName ) ; if ( tableGroup == null ) { tableGroup = new TreeSet <> ( ) ; m_exportsByTargetName . put ( targetName , tableGroup ) ; } tableGroup . add ( tableName ) ; return ; } m_persistentTableTargetMap . put ( tableName , targetName ) ; }
|
Track an exported table
| 188
| 4
|
154,720
|
static < E > ImmutableList < E > asImmutableList ( Object [ ] elements , int length ) { switch ( length ) { case 0 : return of ( ) ; case 1 : @ SuppressWarnings ( "unchecked" ) // collection had only Es in it ImmutableList < E > list = new SingletonImmutableList < E > ( ( E ) elements [ 0 ] ) ; return list ; default : if ( length < elements . length ) { elements = arraysCopyOf ( elements , length ) ; } return new RegularImmutableList < E > ( elements ) ; } }
|
Views the array as an immutable list . Copies if the specified range does not cover the complete array . Does not check for nulls .
| 127
| 29
|
154,721
|
@ CanIgnoreReturnValue // TODO(kak): Consider removing this public < E extends T > E min ( Iterable < E > iterable ) { return min ( iterable . iterator ( ) ) ; }
|
Returns the least of the specified values according to this ordering . If there are multiple least values the first of those is returned .
| 46
| 25
|
154,722
|
static long calculateAverage ( long currAvg , long currInvoc , long rowAvg , long rowInvoc ) { long currTtl = currAvg * currInvoc ; long rowTtl = rowAvg * rowInvoc ; // If both are 0, then currTtl, rowTtl are also 0. if ( ( currInvoc + rowInvoc ) == 0L ) { return 0L ; } else { return ( currTtl + rowTtl ) / ( currInvoc + rowInvoc ) ; } }
|
Given a running average and the running invocation total as well as a new row s average and invocation total return a new running average
| 121
| 25
|
154,723
|
static void addToRecentConnectionSettings ( Hashtable settings , ConnectionSetting newSetting ) throws IOException { settings . put ( newSetting . getName ( ) , newSetting ) ; ConnectionDialogCommon . storeRecentConnectionSettings ( settings ) ; }
|
Adds the new settings name if it does not nexist or overwrites the old one .
| 50
| 19
|
154,724
|
private static void storeRecentConnectionSettings ( Hashtable settings ) { try { if ( recentSettings == null ) { setHomeDir ( ) ; if ( homedir == null ) { return ; } recentSettings = new File ( homedir , fileName ) ; if ( ! recentSettings . exists ( ) ) { // recentSettings.createNewFile(); } } if ( settings == null || settings . size ( ) == 0 ) { return ; } // setup a stream to a physical file on the filesystem FileOutputStream out = new FileOutputStream ( recentSettings ) ; ObjectOutputStream objStream = new ObjectOutputStream ( out ) ; Enumeration en = settings . elements ( ) ; while ( en . hasMoreElements ( ) ) { objStream . writeObject ( en . nextElement ( ) ) ; } objStream . flush ( ) ; objStream . close ( ) ; out . close ( ) ; } catch ( Throwable t ) { } }
|
Here s a non - secure method of storing recent connection settings .
| 201
| 13
|
154,725
|
static void deleteRecentConnectionSettings ( ) { try { if ( recentSettings == null ) { setHomeDir ( ) ; if ( homedir == null ) { return ; } recentSettings = new File ( homedir , fileName ) ; } if ( ! recentSettings . exists ( ) ) { recentSettings = null ; return ; } recentSettings . delete ( ) ; recentSettings = null ; } catch ( Throwable t ) { } }
|
Removes the recent connection settings file store .
| 93
| 9
|
154,726
|
public Map < Integer , ClientAffinityStats > getAffinityStats ( ) { Map < Integer , ClientAffinityStats > retval = new TreeMap < Integer , ClientAffinityStats > ( ) ; for ( Entry < Integer , ClientAffinityStats > e : m_currentAffinity . entrySet ( ) ) { if ( m_baselineAffinity . containsKey ( e . getKey ( ) ) ) { retval . put ( e . getKey ( ) , ClientAffinityStats . diff ( e . getValue ( ) , m_baselineAffinity . get ( e . getKey ( ) ) ) ) ; } else { retval . put ( e . getKey ( ) , ( ClientAffinityStats ) e . getValue ( ) . clone ( ) ) ; } } return retval ; }
|
Get the client affinity stats . Will only be populated if client affinity is enabled .
| 174
| 16
|
154,727
|
public ClientAffinityStats getAggregateAffinityStats ( ) { long afWrites = 0 ; long afReads = 0 ; long rrWrites = 0 ; long rrReads = 0 ; Map < Integer , ClientAffinityStats > affinityStats = getAffinityStats ( ) ; for ( Entry < Integer , ClientAffinityStats > e : affinityStats . entrySet ( ) ) { afWrites += e . getValue ( ) . getAffinityWrites ( ) ; afReads += e . getValue ( ) . getAffinityReads ( ) ; rrWrites += e . getValue ( ) . getRrWrites ( ) ; rrReads += e . getValue ( ) . getRrReads ( ) ; } ClientAffinityStats retval = new ClientAffinityStats ( Integer . MAX_VALUE , afWrites , rrWrites , afReads , rrReads ) ; return retval ; }
|
Roll up the per - partition affinity stats and return the totals for each of the four categories . Will only be populated if client affinity is enabled .
| 207
| 29
|
154,728
|
public static HSQLDDLInfo preprocessHSQLDDL ( String ddl ) { ddl = SQLLexer . stripComments ( ddl ) ; Matcher matcher = HSQL_DDL_PREPROCESSOR . matcher ( ddl ) ; if ( matcher . find ( ) ) { String verbString = matcher . group ( "verb" ) ; HSQLDDLInfo . Verb verb = HSQLDDLInfo . Verb . get ( verbString ) ; if ( verb == null ) { return null ; } String nounString = matcher . group ( "object" ) ; HSQLDDLInfo . Noun noun = HSQLDDLInfo . Noun . get ( nounString ) ; if ( noun == null ) { return null ; } boolean createStream = verb . equals ( HSQLDDLInfo . Verb . CREATE ) && noun . equals ( HSQLDDLInfo . Noun . STREAM ) ; String name = matcher . group ( "name" ) ; if ( name == null ) { return null ; } String secondName = matcher . group ( "subject" ) ; if ( secondName != null ) { secondName = secondName . toLowerCase ( ) ; } // cascade/if exists are interesting on alters and drops boolean cascade = false ; boolean ifexists = false ; if ( verb != HSQLDDLInfo . Verb . CREATE ) { matcher = DDL_IFEXISTS_OR_CASCADE_CHECK . matcher ( ddl ) ; if ( matcher . matches ( ) ) { // Don't be too sensitive to regex specifics by assuming null always // indicates a missing clause. Look for empty too. String existsClause = matcher . group ( "exists" ) ; String cascadeClause = matcher . group ( "cascade" ) ; ifexists = existsClause != null && ! existsClause . isEmpty ( ) ; cascade = cascadeClause != null && ! cascadeClause . isEmpty ( ) ; } } return new HSQLDDLInfo ( verb , noun , name . toLowerCase ( ) , secondName , cascade , ifexists , createStream ) ; } return null ; }
|
Glean some basic info about DDL statements sent to HSQLDB
| 471
| 14
|
154,729
|
public void doRestart ( List < Long > masters , Map < Integer , Long > partitionMasters ) { List < Long > copy = new ArrayList < Long > ( masters ) ; m_restartMasters . set ( copy ) ; m_restartMastersMap . set ( Maps . newHashMap ( partitionMasters ) ) ; }
|
Update the list of partition masters to be used when this transaction is restarted . Currently thread - safe because we call this before poisoning the MP Transaction to restart it and only do this sequentially from the repairing thread .
| 74
| 43
|
154,730
|
public boolean activate ( SystemProcedureExecutionContext context , boolean undo , byte [ ] predicates ) { if ( ! context . activateTableStream ( m_tableId , m_type , undo , predicates ) ) { String tableName = CatalogUtil . getTableNameFromId ( context . getDatabase ( ) , m_tableId ) ; log . debug ( "Attempted to activate a table stream of type " + m_type + "for table " + tableName + " and failed" ) ; return false ; } return true ; }
|
Activate the stream with the given predicates on the given table .
| 117
| 14
|
154,731
|
public Pair < ListenableFuture < ? > , Boolean > streamMore ( SystemProcedureExecutionContext context , List < DBBPool . BBContainer > outputBuffers , int [ ] rowCountAccumulator ) { ListenableFuture < ? > writeFuture = null ; prepareBuffers ( outputBuffers ) ; Pair < Long , int [ ] > serializeResult = context . tableStreamSerializeMore ( m_tableId , m_type , outputBuffers ) ; if ( serializeResult . getFirst ( ) == SERIALIZATION_ERROR ) { // Cancel the snapshot here for ( DBBPool . BBContainer container : outputBuffers ) { container . discard ( ) ; } SnapshotSerializationException ex = new SnapshotSerializationException ( "Snapshot of table " + m_tableId + " failed to complete." ) ; for ( SnapshotTableTask task : m_tableTasks ) { task . m_target . reportSerializationFailure ( ex ) ; } return Pair . of ( null , false ) ; } if ( serializeResult . getSecond ( ) [ 0 ] > 0 ) { if ( rowCountAccumulator != null && rowCountAccumulator . length == 1 ) { rowCountAccumulator [ 0 ] += getTupleDataRowCount ( outputBuffers ) ; } writeFuture = writeBlocksToTargets ( outputBuffers , serializeResult . getSecond ( ) ) ; } else { // Return all allocated snapshot output buffers for ( DBBPool . BBContainer container : outputBuffers ) { container . discard ( ) ; } } return Pair . of ( writeFuture , serializeResult . getFirst ( ) > 0 ) ; }
|
Streams more tuples from the table .
| 362
| 9
|
154,732
|
private void prepareBuffers ( List < DBBPool . BBContainer > buffers ) { Preconditions . checkArgument ( buffers . size ( ) == m_tableTasks . size ( ) ) ; UnmodifiableIterator < SnapshotTableTask > iterator = m_tableTasks . iterator ( ) ; for ( DBBPool . BBContainer container : buffers ) { int headerSize = iterator . next ( ) . m_target . getHeaderSize ( ) ; final ByteBuffer buf = container . b ( ) ; buf . clear ( ) ; buf . position ( headerSize ) ; } }
|
Set the positions of the buffers to the start of the content leaving some room for the headers .
| 127
| 19
|
154,733
|
private ListenableFuture < ? > writeBlocksToTargets ( Collection < DBBPool . BBContainer > outputBuffers , int [ ] serialized ) { Preconditions . checkArgument ( m_tableTasks . size ( ) == serialized . length ) ; Preconditions . checkArgument ( outputBuffers . size ( ) == serialized . length ) ; final List < ListenableFuture < ? > > writeFutures = new ArrayList < ListenableFuture < ? > > ( outputBuffers . size ( ) ) ; // The containers, the data targets, and the serialized byte counts should all line up Iterator < DBBPool . BBContainer > containerIter = outputBuffers . iterator ( ) ; int serializedIndex = 0 ; for ( SnapshotTableTask task : m_tableTasks ) { final DBBPool . BBContainer container = containerIter . next ( ) ; /* * Finalize the buffer by setting position to 0 and limit to the last used byte */ final ByteBuffer buf = container . b ( ) ; buf . limit ( serialized [ serializedIndex ++ ] + task . m_target . getHeaderSize ( ) ) ; buf . position ( 0 ) ; Callable < DBBPool . BBContainer > valueForTarget = Callables . returning ( container ) ; if ( task . m_filters != null ) { for ( SnapshotDataFilter filter : task . m_filters ) { valueForTarget = filter . filter ( valueForTarget ) ; } } ListenableFuture < ? > writeFuture = task . m_target . write ( valueForTarget , m_tableId ) ; if ( writeFuture != null ) { writeFutures . add ( writeFuture ) ; } } // Wraps all write futures in one future return Futures . allAsList ( writeFutures ) ; }
|
Finalize the output buffers and write them to the corresponding data targets
| 397
| 13
|
154,734
|
public void recordValue ( final long value ) throws ArrayIndexOutOfBoundsException { long criticalValueAtEnter = recordingPhaser . writerCriticalSectionEnter ( ) ; try { activeHistogram . recordValue ( value ) ; } finally { recordingPhaser . writerCriticalSectionExit ( criticalValueAtEnter ) ; } }
|
Record a value
| 67
| 3
|
154,735
|
public static < K , V > HashBiMap < K , V > create ( int expectedSize ) { return new HashBiMap < K , V > ( expectedSize ) ; }
|
Constructs a new empty bimap with the specified expected size .
| 38
| 14
|
154,736
|
public void repairSurvivors ( ) { // cancel() and repair() must be synchronized by the caller (the deliver lock, // currently). If cancelled and the last repair message arrives, don't send // out corrections! if ( this . m_promotionResult . isCancelled ( ) ) { repairLogger . debug ( m_whoami + "Skipping repair message creation for cancelled Term." ) ; return ; } int queued = 0 ; if ( repairLogger . isDebugEnabled ( ) ) { repairLogger . debug ( m_whoami + "received all repair logs and is repairing surviving replicas." ) ; } for ( Iv2RepairLogResponseMessage li : m_repairLogUnion ) { if ( repairLogger . isDebugEnabled ( ) ) { repairLogger . debug ( m_whoami + "RepairResponse:\n" + li ) ; } List < Long > needsRepair = new ArrayList < Long > ( 5 ) ; for ( Entry < Long , ReplicaRepairStruct > entry : m_replicaRepairStructs . entrySet ( ) ) { if ( entry . getValue ( ) . needs ( li . getHandle ( ) ) ) { ++ queued ; if ( repairLogger . isDebugEnabled ( ) ) { repairLogger . debug ( m_whoami + "repairing " + CoreUtils . hsIdToString ( entry . getKey ( ) ) + ". Max seen " + TxnEgo . txnIdToString ( entry . getValue ( ) . m_maxSpHandleSeen ) + ". Repairing with " + TxnEgo . txnIdToString ( li . getHandle ( ) ) ) ; } needsRepair . add ( entry . getKey ( ) ) ; } } if ( ! needsRepair . isEmpty ( ) ) { if ( repairLogger . isDebugEnabled ( ) ) { repairLogger . debug ( m_whoami + "repairing: " + CoreUtils . hsIdCollectionToString ( needsRepair ) + " with message: " + li . getPayload ( ) ) ; } m_mailbox . repairReplicasWith ( needsRepair , li . getPayload ( ) ) ; } } if ( repairLogger . isDebugEnabled ( ) ) { repairLogger . debug ( m_whoami + "finished queuing " + queued + " replica repair messages." ) ; } m_promotionResult . set ( new RepairResult ( m_maxSeenTxnId ) ) ; }
|
Send missed - messages to survivors .
| 549
| 7
|
154,737
|
void init ( ResultMetaData meta , HsqlProperties props ) throws SQLException { resultMetaData = meta ; columnCount = resultMetaData . getColumnCount ( ) ; // fredt - props is null for internal connections, so always use the // default behaviour in this case // JDBCDriver.getPropertyInfo says // default is true useColumnName = ( props == null ) ? true : props . isPropertyTrue ( "get_column_name" , true ) ; }
|
Initializes this JDBCResultSetMetaData object from the specified Result and HsqlProperties objects .
| 104
| 21
|
154,738
|
Map < Long , Long > reconfigureOnFault ( Set < Long > hsIds , FaultMessage fm ) { return reconfigureOnFault ( hsIds , fm , new HashSet < Long > ( ) ) ; }
|
Convenience wrapper for tests that don t care about unknown sites
| 53
| 13
|
154,739
|
public Map < Long , Long > reconfigureOnFault ( Set < Long > hsIds , FaultMessage fm , Set < Long > unknownFaultedSites ) { boolean proceed = false ; do { Discard ignoreIt = mayIgnore ( hsIds , fm ) ; if ( Discard . DoNot == ignoreIt ) { m_inTrouble . put ( fm . failedSite , fm . witnessed || fm . decided ) ; m_recoveryLog . info ( "Agreement, Processing " + fm ) ; proceed = true ; } else { ignoreIt . log ( fm ) ; } if ( Discard . Unknown == ignoreIt ) { unknownFaultedSites . add ( fm . failedSite ) ; } fm = ( FaultMessage ) m_mailbox . recv ( justFailures ) ; } while ( fm != null ) ; if ( ! proceed ) { return ImmutableMap . of ( ) ; } m_inTroubleCount = m_inTrouble . size ( ) ; // we are here if failed site was not previously recorded // or it was previously recorded but it became witnessed from unwitnessed m_seeker . startSeekingFor ( Sets . difference ( hsIds , m_failedSites ) , m_inTrouble ) ; if ( m_recoveryLog . isDebugEnabled ( ) ) { m_recoveryLog . debug ( String . format ( "\n %s\n %s\n %s\n %s\n %s" , m_seeker . dumpAlive ( ) , m_seeker . dumpDead ( ) , m_seeker . dumpReported ( ) , m_seeker . dumpSurvivors ( ) , dumpInTrouble ( ) ) ) ; } discoverGlobalFaultData_send ( hsIds ) ; while ( discoverGlobalFaultData_rcv ( hsIds ) ) { Map < Long , Long > lastTxnIdByFailedSite = extractGlobalFaultData ( hsIds ) ; if ( lastTxnIdByFailedSite . isEmpty ( ) ) { return ImmutableMap . of ( ) ; } Set < Long > witnessed = Maps . filterValues ( m_inTrouble , equalTo ( Boolean . TRUE ) ) . keySet ( ) ; Set < Long > notClosed = Sets . difference ( witnessed , lastTxnIdByFailedSite . keySet ( ) ) ; if ( ! notClosed . isEmpty ( ) ) { m_recoveryLog . warn ( "Agreement, witnessed but not decided: [" + CoreUtils . hsIdCollectionToString ( notClosed ) + "] seeker: " + m_seeker ) ; } if ( ! notifyOnKill ( hsIds , lastTxnIdByFailedSite ) ) { continue ; } m_failedSites . addAll ( lastTxnIdByFailedSite . keySet ( ) ) ; m_failedSitesCount = m_failedSites . size ( ) ; m_recoveryLog . info ( "Agreement, Adding " + CoreUtils . hsIdCollectionToString ( lastTxnIdByFailedSite . keySet ( ) ) + " to failed sites history" ) ; clearInTrouble ( lastTxnIdByFailedSite . keySet ( ) ) ; m_seeker . clear ( ) ; return lastTxnIdByFailedSite ; } return ImmutableMap . of ( ) ; }
|
Process the fault message and if necessary start arbitration .
| 773
| 10
|
154,740
|
public static AbstractExpression createIndexExpressionForTable ( Table table , Map < Integer , Integer > ranges ) { HashRangeExpression predicate = new HashRangeExpression ( ) ; predicate . setRanges ( ranges ) ; predicate . setHashColumnIndex ( table . getPartitioncolumn ( ) . getIndex ( ) ) ; return predicate ; }
|
Create the expression used to build elastic index for a given table .
| 73
| 13
|
154,741
|
public void initialize ( ) throws Exception { List < Long > acctList = new ArrayList < Long > ( config . custcount * 2 ) ; List < String > stList = new ArrayList < String > ( config . custcount * 2 ) ; // generate customers System . out . println ( "generating " + config . custcount + " customers..." ) ; for ( int c = 0 ; c < config . custcount ; c ++ ) { if ( c % 10000 == 0 ) { System . out . println ( " " + c ) ; } PersonGenerator . Person p = gen . newPerson ( ) ; //int ac = rand.nextInt(areaCodes.length); client . callProcedure ( new BenchmarkCallback ( "CUSTOMER.insert" ) , "CUSTOMER.insert" , c , p . firstname , p . lastname , "Anytown" , p . state , p . phonenumber , p . dob , p . sex ) ; int accts = rand . nextInt ( 5 ) ; for ( int a = 0 ; a < accts ; a ++ ) { int acct_no = ( c * 100 ) + a ; client . callProcedure ( new BenchmarkCallback ( "ACCOUNT.insert" ) , "ACCOUNT.insert" , acct_no , c , rand . nextInt ( 10000 ) , rand . nextInt ( 10000 ) , new Date ( ) , "Y" ) ; acctList . add ( Long . valueOf ( acct_no ) ) ; stList . add ( p . state ) ; } } accounts = acctList . toArray ( new Long [ acctList . size ( ) ] ) ; acct_states = stList . toArray ( new String [ stList . size ( ) ] ) ; // generate vendor offers System . out . println ( "generating " + config . vendorcount + " vendors..." ) ; for ( int v = 0 ; v < config . vendorcount ; v ++ ) { if ( v % 10000 == 0 ) { System . out . println ( " " + v ) ; } client . callProcedure ( new BenchmarkCallback ( "VENDOR_OFFERS.insert" ) , "VENDOR_OFFERS.insert" , v , rand . nextInt ( 5 ) + 1 , 0 , rand . nextInt ( 5 ) + 1 , ( double ) rand . nextInt ( 100 ) , 0 , offers [ rand . nextInt ( offers . length ) ] ) ; } }
|
this gets run once before the benchmark begins
| 546
| 8
|
154,742
|
public static void createHierarchy ( ZooKeeper zk ) { LinkedList < ZKUtil . StringCallback > callbacks = new LinkedList < ZKUtil . StringCallback > ( ) ; for ( String node : CoreZK . ZK_HIERARCHY ) { ZKUtil . StringCallback cb = new ZKUtil . StringCallback ( ) ; callbacks . add ( cb ) ; zk . create ( node , null , Ids . OPEN_ACL_UNSAFE , CreateMode . PERSISTENT , cb , null ) ; } try { for ( ZKUtil . StringCallback cb : callbacks ) { cb . get ( ) ; } } catch ( Exception e ) { org . voltdb . VoltDB . crashLocalVoltDB ( e . getMessage ( ) , false , e ) ; } }
|
Creates the ZK directory nodes . Only the leader should do this .
| 192
| 15
|
154,743
|
public static int createRejoinNodeIndicator ( ZooKeeper zk , int hostId ) { try { zk . create ( rejoin_node_blocker , ByteBuffer . allocate ( 4 ) . putInt ( hostId ) . array ( ) , Ids . OPEN_ACL_UNSAFE , CreateMode . PERSISTENT ) ; } catch ( KeeperException e ) { if ( e . code ( ) == KeeperException . Code . NODEEXISTS ) { try { return ByteBuffer . wrap ( zk . getData ( rejoin_node_blocker , false , null ) ) . getInt ( ) ; } catch ( KeeperException e1 ) { if ( e1 . code ( ) != KeeperException . Code . NONODE ) { org . voltdb . VoltDB . crashLocalVoltDB ( "Unable to get the current rejoining node indicator" ) ; } } catch ( InterruptedException e1 ) { } } else { org . voltdb . VoltDB . crashLocalVoltDB ( "Unable to create rejoin node Indicator" , true , e ) ; } } catch ( InterruptedException e ) { org . voltdb . VoltDB . crashLocalVoltDB ( "Unable to create rejoin node Indicator" , true , e ) ; } return - 1 ; }
|
Creates a rejoin blocker for the given rejoining host . This prevents other hosts from rejoining at the same time .
| 288
| 25
|
154,744
|
public static boolean removeRejoinNodeIndicatorForHost ( ZooKeeper zk , int hostId ) { try { Stat stat = new Stat ( ) ; final int rejoiningHost = ByteBuffer . wrap ( zk . getData ( rejoin_node_blocker , false , stat ) ) . getInt ( ) ; if ( hostId == rejoiningHost ) { zk . delete ( rejoin_node_blocker , stat . getVersion ( ) ) ; return true ; } } catch ( KeeperException e ) { if ( e . code ( ) == KeeperException . Code . NONODE || e . code ( ) == KeeperException . Code . BADVERSION ) { // Okay if the rejoin blocker for the given hostId is already gone. return true ; } } catch ( InterruptedException e ) { return false ; } return false ; }
|
Removes the rejoin blocker if the current rejoin blocker contains the given host ID .
| 180
| 18
|
154,745
|
public static boolean removeJoinNodeIndicatorForHost ( ZooKeeper zk , int hostId ) { try { Stat stat = new Stat ( ) ; String path = ZKUtil . joinZKPath ( readyjoininghosts , Integer . toString ( hostId ) ) ; zk . getData ( path , false , stat ) ; zk . delete ( path , stat . getVersion ( ) ) ; return true ; } catch ( KeeperException e ) { if ( e . code ( ) == KeeperException . Code . NONODE || e . code ( ) == KeeperException . Code . BADVERSION ) { // Okay if the join indicator for the given hostId is already gone. return true ; } } catch ( InterruptedException e ) { return false ; } return false ; }
|
Removes the join indicator for the given host ID .
| 166
| 11
|
154,746
|
public static boolean isPartitionCleanupInProgress ( ZooKeeper zk ) throws KeeperException , InterruptedException { List < String > children = zk . getChildren ( VoltZK . leaders_initiators , null ) ; List < ZKUtil . ChildrenCallback > childrenCallbacks = Lists . newArrayList ( ) ; for ( String child : children ) { ZKUtil . ChildrenCallback callback = new ZKUtil . ChildrenCallback ( ) ; zk . getChildren ( ZKUtil . joinZKPath ( VoltZK . leaders_initiators , child ) , false , callback , null ) ; childrenCallbacks . add ( callback ) ; } for ( ZKUtil . ChildrenCallback callback : childrenCallbacks ) { if ( callback . get ( ) . isEmpty ( ) ) { return true ; } } return false ; }
|
Checks if the cluster suffered an aborted join or node shutdown and is still in the process of cleaning up .
| 185
| 22
|
154,747
|
public boolean isNullable ( ) { boolean isNullable = super . isNullable ( ) ; if ( isNullable ) { if ( dataType . isDomainType ( ) ) { return dataType . userTypeModifier . isNullable ( ) ; } } return isNullable ; }
|
Is column nullable .
| 63
| 5
|
154,748
|
Object getDefaultValue ( Session session ) { return defaultExpression == null ? null : defaultExpression . getValue ( session , dataType ) ; }
|
Returns default value in the session context .
| 32
| 8
|
154,749
|
Object getGeneratedValue ( Session session ) { return generatingExpression == null ? null : generatingExpression . getValue ( session , dataType ) ; }
|
Returns generated value in the session context .
| 33
| 8
|
154,750
|
public String getDefaultSQL ( ) { String ddl = null ; ddl = defaultExpression == null ? null : defaultExpression . getSQL ( ) ; return ddl ; }
|
Returns SQL for default value .
| 39
| 6
|
154,751
|
Expression getDefaultExpression ( ) { if ( defaultExpression == null ) { if ( dataType . isDomainType ( ) ) { return dataType . userTypeModifier . getDefaultClause ( ) ; } return null ; } else { return defaultExpression ; } }
|
Returns default expression for the column .
| 60
| 7
|
154,752
|
public static ZKUtil . StringCallback createSnapshotCompletionNode ( String path , String pathType , String nonce , long txnId , boolean isTruncation , String truncReqId ) { if ( ! ( txnId > 0 ) ) { VoltDB . crashGlobalVoltDB ( "Txnid must be greather than 0" , true , null ) ; } byte nodeBytes [ ] = null ; try { JSONStringer stringer = new JSONStringer ( ) ; stringer . object ( ) ; stringer . keySymbolValuePair ( "txnId" , txnId ) ; stringer . keySymbolValuePair ( "isTruncation" , isTruncation ) ; stringer . keySymbolValuePair ( "didSucceed" , true ) ; stringer . keySymbolValuePair ( "hostCount" , - 1 ) ; stringer . keySymbolValuePair ( SnapshotUtil . JSON_PATH , path ) ; stringer . keySymbolValuePair ( SnapshotUtil . JSON_PATH_TYPE , pathType ) ; stringer . keySymbolValuePair ( SnapshotUtil . JSON_NONCE , nonce ) ; stringer . keySymbolValuePair ( "truncReqId" , truncReqId ) ; stringer . key ( "exportSequenceNumbers" ) . object ( ) . endObject ( ) ; stringer . endObject ( ) ; JSONObject jsonObj = new JSONObject ( stringer . toString ( ) ) ; nodeBytes = jsonObj . toString ( 4 ) . getBytes ( Charsets . UTF_8 ) ; } catch ( Exception e ) { VoltDB . crashLocalVoltDB ( "Error serializing snapshot completion node JSON" , true , e ) ; } ZKUtil . StringCallback cb = new ZKUtil . StringCallback ( ) ; final String snapshotPath = VoltZK . completed_snapshots + "/" + txnId ; VoltDB . instance ( ) . getHostMessenger ( ) . getZK ( ) . create ( snapshotPath , nodeBytes , Ids . OPEN_ACL_UNSAFE , CreateMode . PERSISTENT , cb , null ) ; return cb ; }
|
Create the completion node for the snapshot identified by the txnId . It assumes that all hosts will race to call this so it doesn t fail if the node already exists .
| 498
| 35
|
154,753
|
public static void logParticipatingHostCount ( long txnId , int participantCount ) { ZooKeeper zk = VoltDB . instance ( ) . getHostMessenger ( ) . getZK ( ) ; final String snapshotPath = VoltZK . completed_snapshots + "/" + txnId ; boolean success = false ; while ( ! success ) { Stat stat = new Stat ( ) ; byte data [ ] = null ; try { data = zk . getData ( snapshotPath , false , stat ) ; } catch ( KeeperException e ) { if ( e . code ( ) == KeeperException . Code . NONODE ) { // If snapshot creation failed for some reason, the node won't exist. ignore return ; } VoltDB . crashLocalVoltDB ( "Failed to get snapshot completion node" , true , e ) ; } catch ( InterruptedException e ) { VoltDB . crashLocalVoltDB ( "Interrupted getting snapshot completion node" , true , e ) ; } if ( data == null ) { VoltDB . crashLocalVoltDB ( "Data should not be null if the node exists" , false , null ) ; } try { JSONObject jsonObj = new JSONObject ( new String ( data , Charsets . UTF_8 ) ) ; if ( jsonObj . getLong ( "txnId" ) != txnId ) { VoltDB . crashLocalVoltDB ( "TxnId should match" , false , null ) ; } int hostCount = jsonObj . getInt ( "hostCount" ) ; // +1 because hostCount was initialized to -1 jsonObj . put ( "hostCount" , hostCount + participantCount + 1 ) ; zk . setData ( snapshotPath , jsonObj . toString ( 4 ) . getBytes ( Charsets . UTF_8 ) , stat . getVersion ( ) ) ; } catch ( KeeperException . BadVersionException e ) { continue ; } catch ( Exception e ) { VoltDB . crashLocalVoltDB ( "This ZK call should never fail" , true , e ) ; } success = true ; } }
|
Once participating host count is set SnapshotCompletionMonitor can check this ZK node to determine whether the snapshot has finished or not .
| 450
| 27
|
154,754
|
public synchronized void close ( ) { this . isPoolClosed = true ; while ( this . connectionsInactive . size ( ) > 0 ) { PooledConnection connection = dequeueFirstIfAny ( ) ; if ( connection != null ) { closePhysically ( connection , "closing inactive connection when connection pool was closed." ) ; } } }
|
Closes this connection pool . No further connections can be obtained from it after this . All inactive connections are physically closed before the call returns . Active connections are not closed . There may still be active connections in use after this method returns . When these connections are closed and returned to the pool they will be physically closed .
| 73
| 63
|
154,755
|
public synchronized void closeImmediatedly ( ) { close ( ) ; Iterator iterator = this . connectionsInUse . iterator ( ) ; while ( iterator . hasNext ( ) ) { PooledConnection connection = ( PooledConnection ) iterator . next ( ) ; SessionConnectionWrapper sessionWrapper = ( SessionConnectionWrapper ) this . sessionConnectionWrappers . get ( connection ) ; closeSessionWrapper ( sessionWrapper , "Error closing session wrapper. Connection pool was shutdown immediatedly." ) ; } }
|
Closes this connection
| 107
| 4
|
154,756
|
public void setDriverClassName ( String driverClassName ) { if ( driverClassName . equals ( JDBCConnectionPoolDataSource . driver ) ) { return ; } /** @todo: Use a HSQLDB RuntimeException subclass */ throw new RuntimeException ( "This class only supports JDBC driver '" + JDBCConnectionPoolDataSource . driver + "'" ) ; }
|
For compatibility .
| 79
| 3
|
154,757
|
@ Override public void toJSONString ( JSONStringer stringer ) throws JSONException { super . toJSONString ( stringer ) ; stringer . key ( "AGGREGATE_COLUMNS" ) . array ( ) ; for ( int ii = 0 ; ii < m_aggregateTypes . size ( ) ; ii ++ ) { stringer . object ( ) ; stringer . keySymbolValuePair ( Members . AGGREGATE_TYPE . name ( ) , m_aggregateTypes . get ( ii ) . name ( ) ) ; stringer . keySymbolValuePair ( Members . AGGREGATE_OUTPUT_COLUMN . name ( ) , m_aggregateOutputColumns . get ( ii ) ) ; AbstractExpression . toJSONArray ( stringer , Members . AGGREGATE_EXPRESSIONS . name ( ) , m_aggregateExpressions . get ( ii ) ) ; stringer . endObject ( ) ; } stringer . endArray ( ) ; AbstractExpression . toJSONArray ( stringer , Members . PARTITIONBY_EXPRESSIONS . name ( ) , m_partitionByExpressions ) ; AbstractExpression . toJSONArrayFromSortList ( stringer , m_orderByExpressions , null ) ; }
|
Serialize to JSON . We only serialize the expressions and not the directions . We won t need them in the executor . The directions will be in the order by plan node in any case .
| 277
| 40
|
154,758
|
@ Override public void loadFromJSONObject ( JSONObject jobj , Database db ) throws JSONException { helpLoadFromJSONObject ( jobj , db ) ; JSONArray jarray = jobj . getJSONArray ( Members . AGGREGATE_COLUMNS . name ( ) ) ; int size = jarray . length ( ) ; for ( int i = 0 ; i < size ; i ++ ) { // We only expect one of these for now. assert ( i == 0 ) ; JSONObject tempObj = jarray . getJSONObject ( i ) ; m_aggregateTypes . add ( ExpressionType . get ( tempObj . getString ( Members . AGGREGATE_TYPE . name ( ) ) ) ) ; m_aggregateOutputColumns . add ( tempObj . getInt ( Members . AGGREGATE_OUTPUT_COLUMN . name ( ) ) ) ; m_aggregateExpressions . add ( AbstractExpression . loadFromJSONArrayChild ( null , tempObj , Members . AGGREGATE_EXPRESSIONS . name ( ) , null ) ) ; } m_partitionByExpressions = AbstractExpression . loadFromJSONArrayChild ( null , jobj , Members . PARTITIONBY_EXPRESSIONS . name ( ) , null ) ; m_orderByExpressions = new ArrayList <> ( ) ; AbstractExpression . loadSortListFromJSONArray ( m_orderByExpressions , null , jobj ) ; }
|
Deserialize a PartitionByPlanNode from JSON . Since we don t need the sort directions and we don t serialize them in toJSONString then we can t in general get them here .
| 315
| 41
|
154,759
|
@ Override public void run ( ) { Thread . currentThread ( ) . setName ( "Latency Watchdog" ) ; LOG . info ( String . format ( "Latency Watchdog enabled -- threshold:%d(ms) " + "wakeup_interval:%d(ms) min_log_interval:%d(ms)\n" , WATCHDOG_THRESHOLD , WAKEUP_INTERVAL , MIN_LOG_INTERVAL ) ) ; while ( true ) { for ( Entry < Thread , AtomicLong > entry : sLatencyMap . entrySet ( ) ) { Thread t = entry . getKey ( ) ; long timestamp = entry . getValue ( ) . get ( ) ; long now = System . currentTimeMillis ( ) ; if ( ( now - timestamp > WATCHDOG_THRESHOLD ) && t . getState ( ) != Thread . State . TERMINATED ) { StringBuilder sb = new StringBuilder ( ) ; String format = t . getName ( ) + " has been delayed for more than " + WATCHDOG_THRESHOLD + " milliseconds\n %s" ; for ( StackTraceElement ste : t . getStackTrace ( ) ) { sb . append ( ste ) ; sb . append ( "\n" ) ; } RateLimitedLogger . tryLogForMessage ( now , MIN_LOG_INTERVAL , TimeUnit . MILLISECONDS , LOG , Level . INFO , format , sb . toString ( ) ) ; } } try { Thread . sleep ( WAKEUP_INTERVAL ) ; } catch ( Exception e ) { e . printStackTrace ( ) ; } } }
|
The watchdog thread will be invoked every WAKEUP_INTERVAL time to check if any thread that be monitored has not updated its time stamp more than WATCHDOG_THRESHOLD millisecond . Same stack trace messages are rate limited by MIN_LOG_INTERVAL .
| 363
| 56
|
154,760
|
public static ProcedurePartitionData fromPartitionInfoString ( String partitionInfoString ) { if ( partitionInfoString == null || partitionInfoString . trim ( ) . isEmpty ( ) ) { return new ProcedurePartitionData ( ) ; } String [ ] partitionInfoParts = new String [ 0 ] ; partitionInfoParts = partitionInfoString . split ( "," ) ; assert ( partitionInfoParts . length <= 2 ) ; if ( partitionInfoParts . length == 2 ) { ProcedurePartitionData partitionInfo = fromPartitionInfoString ( partitionInfoParts [ 0 ] ) ; ProcedurePartitionData partitionInfo2 = fromPartitionInfoString ( partitionInfoParts [ 1 ] ) ; partitionInfo . addSecondPartitionInfo ( partitionInfo2 ) ; return partitionInfo ; } String subClause = partitionInfoParts [ 0 ] ; // split on the colon String [ ] parts = subClause . split ( ":" ) ; assert ( parts . length == 2 ) ; // relabel the parts for code readability String columnInfo = parts [ 0 ] . trim ( ) ; String paramIndex = parts [ 1 ] . trim ( ) ; // split the columninfo parts = columnInfo . split ( "\\." ) ; assert ( parts . length == 2 ) ; // relabel the parts for code readability String tableName = parts [ 0 ] . trim ( ) ; String columnName = parts [ 1 ] . trim ( ) ; return new ProcedurePartitionData ( tableName , columnName , paramIndex ) ; }
|
For Testing usage ONLY . From a partition information string to
| 316
| 11
|
154,761
|
@ Override public void runDDL ( String ddl ) { String modifiedDdl = transformDDL ( ddl ) ; printTransformedSql ( ddl , modifiedDdl ) ; super . runDDL ( modifiedDdl , false ) ; }
|
Modifies DDL statements in such a way that PostGIS results will match VoltDB results and then passes the remaining work to the base class version .
| 55
| 31
|
154,762
|
public void allHostNTProcedureCallback ( ClientResponse clientResponse ) { synchronized ( m_allHostCallbackLock ) { int hostId = Integer . parseInt ( clientResponse . getAppStatusString ( ) ) ; boolean removed = m_outstandingAllHostProcedureHostIds . remove ( hostId ) ; // log this for now... I don't expect it to ever happen, but will be interesting to see... if ( ! removed ) { tmLog . error ( String . format ( "ProcedureRunnerNT.allHostNTProcedureCallback for procedure %s received late or unexepected response from hostID %d." , m_procedureName , hostId ) ) ; return ; } m_allHostResponses . put ( hostId , clientResponse ) ; if ( m_outstandingAllHostProcedureHostIds . size ( ) == 0 ) { m_outstandingAllHostProc . set ( false ) ; m_allHostFut . complete ( m_allHostResponses ) ; } } }
|
This is called when an all - host proc responds from a particular node . It completes the future when all of the
| 227
| 23
|
154,763
|
protected CompletableFuture < Map < Integer , ClientResponse > > callAllNodeNTProcedure ( String procName , Object ... params ) { // only one of these at a time if ( ! m_outstandingAllHostProc . compareAndSet ( false , true ) ) { throw new VoltAbortException ( new IllegalStateException ( "Only one AllNodeNTProcedure operation can be running at a time." ) ) ; } StoredProcedureInvocation invocation = new StoredProcedureInvocation ( ) ; invocation . setProcName ( procName ) ; invocation . setParams ( params ) ; invocation . setClientHandle ( m_id ) ; final Iv2InitiateTaskMessage workRequest = new Iv2InitiateTaskMessage ( m_mailbox . getHSId ( ) , m_mailbox . getHSId ( ) , TransactionInfoBaseMessage . UNUSED_TRUNC_HANDLE , m_id , m_id , true , false , invocation , m_id , ClientInterface . NT_REMOTE_PROC_CID , false ) ; m_allHostFut = new CompletableFuture <> ( ) ; m_allHostResponses = new HashMap <> ( ) ; // hold this lock while getting the count of live nodes // also held when long [ ] hsids ; synchronized ( m_allHostCallbackLock ) { // collect the set of live client interface mailbox ids m_outstandingAllHostProcedureHostIds = VoltDB . instance ( ) . getHostMessenger ( ) . getLiveHostIds ( ) ; // convert host ids to hsids hsids = m_outstandingAllHostProcedureHostIds . stream ( ) . mapToLong ( hostId -> CoreUtils . getHSIdFromHostAndSite ( hostId , HostMessenger . CLIENT_INTERFACE_SITE_ID ) ) . toArray ( ) ; } // send the invocation to all live nodes // n.b. can't combine this step with above because sometimes the callbacks comeback so fast // you get a concurrent modification exception for ( long hsid : hsids ) { m_mailbox . send ( hsid , workRequest ) ; } return m_allHostFut ; }
|
Send an invocation directly to each host s CI mailbox . This ONLY works for NT procedures . Track responses and complete the returned future when they re all accounted for .
| 494
| 32
|
154,764
|
private void completeCall ( ClientResponseImpl response ) { // if we're keeping track, calculate result size if ( m_perCallStats . samplingProcedure ( ) ) { m_perCallStats . setResultSize ( response . getResults ( ) ) ; } m_statsCollector . endProcedure ( response . getStatus ( ) == ClientResponse . USER_ABORT , ( response . getStatus ( ) != ClientResponse . USER_ABORT ) && ( response . getStatus ( ) != ClientResponse . SUCCESS ) , m_perCallStats ) ; // allow the GC to collect per-call stats if this proc isn't called for a while m_perCallStats = null ; // send the response to the caller // must be done as IRM to CI mailbox for backpressure accounting response . setClientHandle ( m_clientHandle ) ; InitiateResponseMessage irm = InitiateResponseMessage . messageForNTProcResponse ( m_ciHandle , m_ccxn . connectionId ( ) , response ) ; m_mailbox . deliver ( irm ) ; m_ntProcService . handleNTProcEnd ( ProcedureRunnerNT . this ) ; }
|
Send a response back to the proc caller . Refactored out of coreCall for both regular and exceptional paths .
| 254
| 23
|
154,765
|
public void processAnyCallbacksFromFailedHosts ( Set < Integer > failedHosts ) { synchronized ( m_allHostCallbackLock ) { failedHosts . stream ( ) . forEach ( i -> { if ( m_outstandingAllHostProcedureHostIds . contains ( i ) ) { ClientResponseImpl cri = new ClientResponseImpl ( ClientResponse . CONNECTION_LOST , new VoltTable [ 0 ] , "Host " + i + " failed, connection lost" ) ; // embed the hostid as a string in app status string // because the recipient expects this hack cri . setAppStatusString ( String . valueOf ( i ) ) ; allHostNTProcedureCallback ( cri ) ; } } ) ; } }
|
For all - host NT procedures use site failures to call callbacks for hosts that will obviously never respond .
| 162
| 21
|
154,766
|
public final static void log ( int logger , int level , String statement ) { if ( logger < loggers . length ) { switch ( level ) { case trace : loggers [ logger ] . trace ( statement ) ; break ; case debug : loggers [ logger ] . debug ( statement ) ; break ; case error : loggers [ logger ] . error ( statement ) ; break ; case fatal : loggers [ logger ] . fatal ( statement ) ; break ; case info : loggers [ logger ] . info ( statement ) ; break ; case warn : loggers [ logger ] . warn ( statement ) ; break ; default : throw new RuntimeException ( "Unhandled log level " + level ) ; } } else { throw new RuntimeException ( "Attempted to log to logger index " + logger + " which doesn't exist" ) ; } }
|
All EE loggers will call this static method from C and specify the logger and level they want to log to . The level will be checked again in Java .
| 175
| 32
|
154,767
|
public static void restoreFile ( String sourceName , String destName ) throws IOException { RandomAccessFile source = new RandomAccessFile ( sourceName , "r" ) ; RandomAccessFile dest = new RandomAccessFile ( destName , "rw" ) ; while ( source . getFilePointer ( ) != source . length ( ) ) { int size = source . readInt ( ) ; long position = source . readLong ( ) ; byte [ ] buffer = new byte [ size ] ; source . read ( buffer ) ; dest . seek ( position ) ; dest . write ( buffer ) ; } dest . seek ( DataFileCache . LONG_FREE_POS_POS ) ; long length = dest . readLong ( ) ; JavaSystem . setRAFileLength ( dest , length ) ; source . close ( ) ; dest . close ( ) ; }
|
buggy database files had size == position == 0 at the end
| 178
| 13
|
154,768
|
String getHTMLForAdminPage ( Map < String , String > params ) { try { String template = m_htmlTemplates . get ( "admintemplate.html" ) ; for ( Entry < String , String > e : params . entrySet ( ) ) { String key = e . getKey ( ) . toUpperCase ( ) ; String value = e . getValue ( ) ; if ( key == null ) continue ; if ( value == null ) value = "NULL" ; template = template . replace ( "#" + key + "#" , value ) ; } return template ; } catch ( Exception e ) { e . printStackTrace ( ) ; } return "<html><body>An unrecoverable error was encountered while generating this page.</body></html>" ; }
|
Load a template for the admin page fill it out and return the value .
| 166
| 15
|
154,769
|
public void start ( ) throws InterruptedException , ExecutionException { Future < ? > task = es . submit ( handlePartitionChange ) ; task . get ( ) ; }
|
Start monitoring the leaders . This is a blocking operation .
| 36
| 11
|
154,770
|
public static Long update ( final long now ) { final long estNow = EstTime . m_now ; if ( estNow == now ) { return null ; } EstTime . m_now = now ; /* * Check if updating the estimated time was especially tardy. * I am concerned that the thread responsible for updating the estimated * time might be blocking on something and want to be able to log if * that happens */ if ( now - estNow > ESTIMATED_TIME_WARN_INTERVAL ) { /* * Only report the error every 60 seconds to cut down on log spam */ if ( lastErrorReport > now ) { //Time moves backwards on occasion, check and reset lastErrorReport = now ; } if ( now - lastErrorReport > maxErrorReportInterval ) { lastErrorReport = now ; return now - estNow ; } } return null ; }
|
Don t call this unless you have paused the updater and intend to update yourself
| 181
| 16
|
154,771
|
StatementDMQL compileMigrateStatement ( RangeVariable [ ] outerRanges ) { final Expression condition ; assert token . tokenType == Tokens . MIGRATE ; read ( ) ; readThis ( Tokens . FROM ) ; RangeVariable [ ] rangeVariables = { readSimpleRangeVariable ( StatementTypes . MIGRATE_WHERE ) } ; Table table = rangeVariables [ 0 ] . getTable ( ) ; if ( token . tokenType == Tokens . WHERE ) { read ( ) ; condition = XreadBooleanValueExpression ( ) ; HsqlList unresolved = condition . resolveColumnReferences ( outerRanges , null ) ; unresolved = Expression . resolveColumnSet ( rangeVariables , unresolved , null ) ; ExpressionColumn . checkColumnsResolved ( unresolved ) ; condition . resolveTypes ( session , null ) ; if ( condition . isParam ( ) ) { condition . dataType = Type . SQL_BOOLEAN ; } if ( condition . getDataType ( ) != Type . SQL_BOOLEAN ) { throw Error . error ( ErrorCode . X_42568 ) ; } } else { throw Error . error ( ErrorCode . X_47000 ) ; } // check WHERE condition RangeVariableResolver resolver = new RangeVariableResolver ( rangeVariables , condition , compileContext ) ; resolver . processConditions ( ) ; rangeVariables = resolver . rangeVariables ; return new StatementDML ( session , table , rangeVariables , compileContext ) ; }
|
Creates a MIGRATE - type statement from this parser context ( i . e . MIGRATE FROM tbl WHERE ...
| 316
| 28
|
154,772
|
StatementDMQL compileDeleteStatement ( RangeVariable [ ] outerRanges ) { Expression condition = null ; boolean truncate = false ; boolean restartIdentity = false ; switch ( token . tokenType ) { case Tokens . TRUNCATE : { read ( ) ; readThis ( Tokens . TABLE ) ; truncate = true ; break ; } case Tokens . DELETE : { read ( ) ; readThis ( Tokens . FROM ) ; break ; } } RangeVariable [ ] rangeVariables = { readSimpleRangeVariable ( StatementTypes . DELETE_WHERE ) } ; Table table = rangeVariables [ 0 ] . getTable ( ) ; Table baseTable = table . getBaseTable ( ) ; /* A VoltDB Extension. * Views from Streams are now updatable. * Comment out this guard and check if it is a view * from Stream or PersistentTable in planner. if (!table.isUpdatable()) { throw Error.error(ErrorCode.X_42000); } A VoltDB Extension */ if ( truncate ) { switch ( token . tokenType ) { case Tokens . CONTINUE : { read ( ) ; readThis ( Tokens . IDENTITY ) ; break ; } case Tokens . RESTART : { read ( ) ; readThis ( Tokens . IDENTITY ) ; restartIdentity = true ; break ; } } for ( int i = 0 ; i < table . constraintList . length ; i ++ ) { if ( table . constraintList [ i ] . getConstraintType ( ) == Constraint . MAIN ) { throw Error . error ( ErrorCode . X_23501 ) ; } } } if ( truncate && table != baseTable ) { throw Error . error ( ErrorCode . X_42000 ) ; } if ( ! truncate && token . tokenType == Tokens . WHERE ) { read ( ) ; condition = XreadBooleanValueExpression ( ) ; HsqlList unresolved = condition . resolveColumnReferences ( outerRanges , null ) ; unresolved = Expression . resolveColumnSet ( rangeVariables , unresolved , null ) ; ExpressionColumn . checkColumnsResolved ( unresolved ) ; condition . resolveTypes ( session , null ) ; if ( condition . isParam ( ) ) { condition . dataType = Type . SQL_BOOLEAN ; } if ( condition . getDataType ( ) != Type . SQL_BOOLEAN ) { throw Error . error ( ErrorCode . X_42568 ) ; } } // VoltDB Extension: // baseTable could be null for stream views. if ( baseTable != null && table != baseTable ) { QuerySpecification select = ( ( TableDerived ) table ) . getQueryExpression ( ) . getMainSelect ( ) ; if ( condition != null ) { condition = condition . replaceColumnReferences ( rangeVariables [ 0 ] , select . exprColumns ) ; } rangeVariables [ 0 ] = new RangeVariable ( select . rangeVariables [ 0 ] ) ; condition = ExpressionLogical . andExpressions ( select . queryCondition , condition ) ; } if ( condition != null ) { RangeVariableResolver resolver = new RangeVariableResolver ( rangeVariables , condition , compileContext ) ; resolver . processConditions ( ) ; rangeVariables = resolver . rangeVariables ; } // VoltDB Extension: // This needs to be done before building the compiled statement // so that parameters in LIMIT or OFFSET are retrieved from // the compileContext SortAndSlice sas = voltGetSortAndSliceForDelete ( rangeVariables ) ; StatementDMQL cs = new StatementDML ( session , table , rangeVariables , compileContext , restartIdentity ) ; // VoltDB Extension: voltAppendDeleteSortAndSlice ( ( StatementDML ) cs , sas ) ; return cs ; }
|
Creates a DELETE - type Statement from this parse context .
| 808
| 14
|
154,773
|
StatementDMQL compileUpdateStatement ( RangeVariable [ ] outerRanges ) { read ( ) ; Expression [ ] updateExpressions ; int [ ] columnMap ; boolean [ ] columnCheckList ; OrderedHashSet colNames = new OrderedHashSet ( ) ; HsqlArrayList exprList = new HsqlArrayList ( ) ; RangeVariable [ ] rangeVariables = { readSimpleRangeVariable ( StatementTypes . UPDATE_WHERE ) } ; Table table = rangeVariables [ 0 ] . rangeTable ; Table baseTable = table . getBaseTable ( ) ; readThis ( Tokens . SET ) ; readSetClauseList ( rangeVariables , colNames , exprList ) ; columnMap = table . getColumnIndexes ( colNames ) ; columnCheckList = table . getColumnCheckList ( columnMap ) ; updateExpressions = new Expression [ exprList . size ( ) ] ; exprList . toArray ( updateExpressions ) ; Expression condition = null ; if ( token . tokenType == Tokens . WHERE ) { read ( ) ; condition = XreadBooleanValueExpression ( ) ; HsqlList unresolved = condition . resolveColumnReferences ( outerRanges , null ) ; unresolved = Expression . resolveColumnSet ( rangeVariables , unresolved , null ) ; ExpressionColumn . checkColumnsResolved ( unresolved ) ; condition . resolveTypes ( session , null ) ; if ( condition . isParam ( ) ) { condition . dataType = Type . SQL_BOOLEAN ; } else if ( condition . getDataType ( ) != Type . SQL_BOOLEAN ) { throw Error . error ( ErrorCode . X_42568 ) ; } } resolveUpdateExpressions ( table , rangeVariables , columnMap , updateExpressions , outerRanges ) ; if ( baseTable != null && table != baseTable ) { QuerySpecification select = ( ( TableDerived ) table ) . getQueryExpression ( ) . getMainSelect ( ) ; if ( condition != null ) { condition = condition . replaceColumnReferences ( rangeVariables [ 0 ] , select . exprColumns ) ; } rangeVariables [ 0 ] = new RangeVariable ( select . rangeVariables [ 0 ] ) ; condition = ExpressionLogical . andExpressions ( select . queryCondition , condition ) ; } if ( condition != null ) { RangeVariableResolver resolver = new RangeVariableResolver ( rangeVariables , condition , compileContext ) ; resolver . processConditions ( ) ; rangeVariables = resolver . rangeVariables ; } if ( baseTable != null && table != baseTable ) { int [ ] baseColumnMap = table . getBaseTableColumnMap ( ) ; int [ ] newColumnMap = new int [ columnMap . length ] ; ArrayUtil . projectRow ( baseColumnMap , columnMap , newColumnMap ) ; columnMap = newColumnMap ; } StatementDMQL cs = new StatementDML ( session , table , rangeVariables , columnMap , updateExpressions , columnCheckList , compileContext ) ; return cs ; }
|
Creates an UPDATE - type Statement from this parse context .
| 644
| 12
|
154,774
|
private void readMergeWhen ( OrderedHashSet insertColumnNames , OrderedHashSet updateColumnNames , HsqlArrayList insertExpressions , HsqlArrayList updateExpressions , RangeVariable [ ] targetRangeVars , RangeVariable sourceRangeVar ) { Table table = targetRangeVars [ 0 ] . rangeTable ; int columnCount = table . getColumnCount ( ) ; readThis ( Tokens . WHEN ) ; if ( token . tokenType == Tokens . MATCHED ) { if ( updateExpressions . size ( ) != 0 ) { throw Error . error ( ErrorCode . X_42547 ) ; } read ( ) ; readThis ( Tokens . THEN ) ; readThis ( Tokens . UPDATE ) ; readThis ( Tokens . SET ) ; readSetClauseList ( targetRangeVars , updateColumnNames , updateExpressions ) ; } else if ( token . tokenType == Tokens . NOT ) { if ( insertExpressions . size ( ) != 0 ) { throw Error . error ( ErrorCode . X_42548 ) ; } read ( ) ; readThis ( Tokens . MATCHED ) ; readThis ( Tokens . THEN ) ; readThis ( Tokens . INSERT ) ; // parse INSERT statement // optional column list int brackets = readOpenBrackets ( ) ; if ( brackets == 1 ) { readSimpleColumnNames ( insertColumnNames , targetRangeVars [ 0 ] ) ; readThis ( Tokens . CLOSEBRACKET ) ; brackets = 0 ; } readThis ( Tokens . VALUES ) ; Expression e = XreadContextuallyTypedTable ( columnCount ) ; if ( e . nodes . length != 1 ) { throw Error . error ( ErrorCode . X_21000 ) ; } insertExpressions . add ( e ) ; } else { throw unexpectedToken ( ) ; } if ( token . tokenType == Tokens . WHEN ) { readMergeWhen ( insertColumnNames , updateColumnNames , insertExpressions , updateExpressions , targetRangeVars , sourceRangeVar ) ; } }
|
Parses a WHEN clause from a MERGE statement . This can be either a WHEN MATCHED or WHEN NOT MATCHED clause or both and the appropriate values will be updated .
| 427
| 38
|
154,775
|
StatementDMQL compileCallStatement ( RangeVariable [ ] outerRanges , boolean isStrictlyProcedure ) { read ( ) ; if ( isIdentifier ( ) ) { checkValidCatalogName ( token . namePrePrefix ) ; RoutineSchema routineSchema = ( RoutineSchema ) database . schemaManager . findSchemaObject ( token . tokenString , session . getSchemaName ( token . namePrefix ) , SchemaObject . PROCEDURE ) ; if ( routineSchema != null ) { read ( ) ; HsqlArrayList list = new HsqlArrayList ( ) ; readThis ( Tokens . OPENBRACKET ) ; if ( token . tokenType == Tokens . CLOSEBRACKET ) { read ( ) ; } else { while ( true ) { Expression e = XreadValueExpression ( ) ; list . add ( e ) ; if ( token . tokenType == Tokens . COMMA ) { read ( ) ; } else { readThis ( Tokens . CLOSEBRACKET ) ; break ; } } } Expression [ ] arguments = new Expression [ list . size ( ) ] ; list . toArray ( arguments ) ; Routine routine = routineSchema . getSpecificRoutine ( arguments . length ) ; HsqlList unresolved = null ; for ( int i = 0 ; i < arguments . length ; i ++ ) { Expression e = arguments [ i ] ; if ( e . isParam ( ) ) { e . setAttributesAsColumn ( routine . getParameter ( i ) , routine . getParameter ( i ) . isWriteable ( ) ) ; } else { int paramMode = routine . getParameter ( i ) . getParameterMode ( ) ; unresolved = arguments [ i ] . resolveColumnReferences ( outerRanges , unresolved ) ; if ( paramMode != SchemaObject . ParameterModes . PARAM_IN ) { if ( e . getType ( ) != OpTypes . VARIABLE ) { throw Error . error ( ErrorCode . X_42603 ) ; } } } } ExpressionColumn . checkColumnsResolved ( unresolved ) ; for ( int i = 0 ; i < arguments . length ; i ++ ) { arguments [ i ] . resolveTypes ( session , null ) ; } StatementDMQL cs = new StatementProcedure ( session , routine , arguments , compileContext ) ; return cs ; } } if ( isStrictlyProcedure ) { throw Error . error ( ErrorCode . X_42501 , token . tokenString ) ; } Expression expression = this . XreadValueExpression ( ) ; HsqlList unresolved = expression . resolveColumnReferences ( outerRanges , null ) ; ExpressionColumn . checkColumnsResolved ( unresolved ) ; expression . resolveTypes ( session , null ) ; // expression.paramMode = PARAM_OUT; StatementDMQL cs = new StatementProcedure ( session , expression , compileContext ) ; return cs ; }
|
to do call argument name and type resolution
| 619
| 8
|
154,776
|
private SortAndSlice voltGetSortAndSliceForDelete ( RangeVariable [ ] rangeVariables ) { SortAndSlice sas = XreadOrderByExpression ( ) ; if ( sas == null || sas == SortAndSlice . noSort ) return SortAndSlice . noSort ; // Resolve columns in the ORDER BY clause. This code modified // from how compileDelete resolves columns in its WHERE clause for ( int i = 0 ; i < sas . exprList . size ( ) ; ++ i ) { Expression e = ( Expression ) sas . exprList . get ( i ) ; HsqlList unresolved = e . resolveColumnReferences ( RangeVariable . emptyArray , null ) ; unresolved = Expression . resolveColumnSet ( rangeVariables , unresolved , null ) ; ExpressionColumn . checkColumnsResolved ( unresolved ) ; e . resolveTypes ( session , null ) ; } return sas ; }
|
This is a Volt extension to allow DELETE FROM tab ORDER BY c LIMIT 1 Adds a SortAndSlice object to the statement if the next tokens are ORDER BY LIMIT or OFFSET .
| 195
| 42
|
154,777
|
public ClassNameMatchStatus addPattern ( String classNamePattern ) { boolean matchFound = false ; if ( m_classList == null ) { m_classList = getClasspathClassFileNames ( ) ; } String preppedName = classNamePattern . trim ( ) ; // include only full classes // for nested classes, include the parent pattern int indexOfDollarSign = classNamePattern . indexOf ( ' ' ) ; if ( indexOfDollarSign >= 0 ) { classNamePattern = classNamePattern . substring ( 0 , indexOfDollarSign ) ; } // Substitution order is critical. // Keep track of whether or not this is a wildcard expression. // '.' is specifically not a wildcard. String regExPreppedName = preppedName . replace ( "." , "[.]" ) ; boolean isWildcard = regExPreppedName . contains ( "*" ) ; if ( isWildcard ) { regExPreppedName = regExPreppedName . replace ( "**" , "[\\w.\\$]+" ) ; regExPreppedName = regExPreppedName . replace ( "*" , "[\\w\\$]*" ) ; } String regex = "^" + // (line start) regExPreppedName + "$" ; // (line end) Pattern pattern = Pattern . compile ( regex , Pattern . MULTILINE ) ; Matcher matcher = pattern . matcher ( m_classList ) ; while ( matcher . find ( ) ) { String match = matcher . group ( ) ; // skip nested classes; the base class will include them if ( match . contains ( "$" ) ) { continue ; } matchFound = true ; m_classNameMatches . add ( match ) ; } if ( matchFound ) { return ClassNameMatchStatus . MATCH_FOUND ; } else { if ( isWildcard ) { return ClassNameMatchStatus . NO_WILDCARD_MATCH ; } else { return ClassNameMatchStatus . NO_EXACT_MATCH ; } } }
|
Add a pattern that matches classes from the classpath and add any matching classnames to m_classNameMatches .
| 442
| 24
|
154,778
|
private static void processPathPart ( String path , Set < String > classes ) { File rootFile = new File ( path ) ; if ( rootFile . isDirectory ( ) == false ) { return ; } File [ ] files = rootFile . listFiles ( ) ; for ( File f : files ) { // classes in the anonymous package if ( f . getName ( ) . endsWith ( ".class" ) ) { String className = f . getName ( ) ; // trim the trailing .class from the end className = className . substring ( 0 , className . length ( ) - ".class" . length ( ) ) ; classes . add ( className ) ; } if ( f . isDirectory ( ) ) { Package p = new Package ( null , f ) ; p . process ( classes ) ; } } }
|
For a given classpath root scan it for packages and classes adding all found classnames to the given classes param .
| 175
| 23
|
154,779
|
static String getClasspathClassFileNames ( ) { String classpath = System . getProperty ( "java.class.path" ) ; String [ ] pathParts = classpath . split ( File . pathSeparator ) ; Set < String > classes = new TreeSet < String > ( ) ; for ( String part : pathParts ) { processPathPart ( part , classes ) ; } StringBuilder sb = new StringBuilder ( ) ; for ( String className : classes ) { sb . append ( className ) . append ( ' ' ) ; } return sb . toString ( ) ; }
|
Get a single string that contains all of the non - jar classfiles in the current classpath separated by newlines . Classfiles are represented by their Java dot names not filenames .
| 128
| 38
|
154,780
|
double getMemoryLimitSize ( String sizeStr ) { if ( sizeStr == null || sizeStr . length ( ) == 0 ) { return 0 ; } try { if ( sizeStr . charAt ( sizeStr . length ( ) - 1 ) == ' ' ) { // size as a percentage of total available memory int perc = Integer . parseInt ( sizeStr . substring ( 0 , sizeStr . length ( ) - 1 ) ) ; if ( perc < 0 || perc > 99 ) { throw new IllegalArgumentException ( "Invalid memory limit percentage: " + sizeStr ) ; } return PlatformProperties . getPlatformProperties ( ) . ramInMegabytes * 1048576L * perc / 100.0 ; } else { // size in GB double size = Double . parseDouble ( sizeStr ) * 1073741824L ; if ( size < 0 ) { throw new IllegalArgumentException ( "Invalid memory limit value: " + sizeStr ) ; } return size ; } } catch ( NumberFormatException e ) { throw new IllegalArgumentException ( "Invalid memory limit value " + sizeStr + ". Memory limit must be configued as a percentage of total available memory or as GB value" ) ; } }
|
package - private for junit
| 260
| 6
|
154,781
|
public static void validatePath ( String path ) throws IllegalArgumentException { if ( path == null ) { throw new IllegalArgumentException ( "Path cannot be null" ) ; } if ( path . length ( ) == 0 ) { throw new IllegalArgumentException ( "Path length must be > 0" ) ; } if ( path . charAt ( 0 ) != ' ' ) { throw new IllegalArgumentException ( "Path \"" + path + "\" must start with / character" ) ; } if ( path . length ( ) == 1 ) { // done checking - it's the root return ; } if ( path . charAt ( path . length ( ) - 1 ) == ' ' ) { throw new IllegalArgumentException ( "Path must not end with / character" ) ; } String reason = null ; char lastc = ' ' ; char chars [ ] = path . toCharArray ( ) ; char c ; for ( int i = 1 ; i < chars . length ; lastc = chars [ i ] , i ++ ) { c = chars [ i ] ; if ( c == 0 ) { reason = "null character not allowed @" + i ; break ; } else if ( c == ' ' && lastc == ' ' ) { reason = "empty node name specified @" + i ; break ; } else if ( c == ' ' && lastc == ' ' ) { if ( chars [ i - 2 ] == ' ' && ( ( i + 1 == chars . length ) || chars [ i + 1 ] == ' ' ) ) { reason = "relative paths not allowed @" + i ; break ; } } else if ( c == ' ' ) { if ( chars [ i - 1 ] == ' ' && ( ( i + 1 == chars . length ) || chars [ i + 1 ] == ' ' ) ) { reason = "relative paths not allowed @" + i ; break ; } } else if ( c > ' ' && c < ' ' || c > ' ' && c < ' ' || c > ' ' && c < ' ' || c > ' ' && c < ' ' ) { reason = "invalid charater @" + i ; break ; } } if ( reason != null ) { throw new IllegalArgumentException ( "Invalid path string \"" + path + "\" caused by " + reason ) ; } }
|
Validate the provided znode path string
| 499
| 8
|
154,782
|
protected void produceCopyForTransformation ( AbstractPlanNode copy ) { copy . m_outputSchema = m_outputSchema ; copy . m_hasSignificantOutputSchema = m_hasSignificantOutputSchema ; copy . m_outputColumnHints = m_outputColumnHints ; copy . m_estimatedOutputTupleCount = m_estimatedOutputTupleCount ; copy . m_estimatedProcessedTupleCount = m_estimatedProcessedTupleCount ; // clone is not yet implemented for every node. assert ( m_inlineNodes . size ( ) == 0 ) ; assert ( m_isInline == false ) ; // the api requires the copy is not (yet) connected assert ( copy . m_parents . size ( ) == 0 ) ; assert ( copy . m_children . size ( ) == 0 ) ; }
|
Create a PlanNode that clones the configuration information but is not inserted in the plan graph and has a unique plan node id .
| 185
| 25
|
154,783
|
public void generateOutputSchema ( Database db ) { // default behavior: just copy the input schema // to the output schema assert ( m_children . size ( ) == 1 ) ; AbstractPlanNode childNode = m_children . get ( 0 ) ; childNode . generateOutputSchema ( db ) ; // Replace the expressions in our children's columns with TVEs. When // we resolve the indexes in these TVEs they will point back at the // correct input column, which we are assuming that the child node // has filled in with whatever expression was here before the replacement. // Output schemas defined using this standard algorithm // are just cached "fillers" that satisfy the legacy // resolveColumnIndexes/generateOutputSchema/getOutputSchema protocol // until it can be fixed up -- see the FIXME comment on generateOutputSchema. m_hasSignificantOutputSchema = false ; m_outputSchema = childNode . getOutputSchema ( ) . copyAndReplaceWithTVE ( ) ; }
|
Generate the output schema for this node based on the output schemas of its children . The generated schema consists of the complete set of columns but is not yet ordered .
| 214
| 34
|
154,784
|
public void getTablesAndIndexes ( Map < String , StmtTargetTableScan > tablesRead , Collection < String > indexes ) { for ( AbstractPlanNode node : m_inlineNodes . values ( ) ) { node . getTablesAndIndexes ( tablesRead , indexes ) ; } for ( AbstractPlanNode node : m_children ) { node . getTablesAndIndexes ( tablesRead , indexes ) ; } getTablesAndIndexesFromSubqueries ( tablesRead , indexes ) ; }
|
Recursively build sets of tables read and index names used .
| 109
| 13
|
154,785
|
protected void getTablesAndIndexesFromSubqueries ( Map < String , StmtTargetTableScan > tablesRead , Collection < String > indexes ) { for ( AbstractExpression expr : findAllSubquerySubexpressions ( ) ) { assert ( expr instanceof AbstractSubqueryExpression ) ; AbstractSubqueryExpression subquery = ( AbstractSubqueryExpression ) expr ; AbstractPlanNode subqueryNode = subquery . getSubqueryNode ( ) ; assert ( subqueryNode != null ) ; subqueryNode . getTablesAndIndexes ( tablesRead , indexes ) ; } }
|
Collect read tables read and index names used in the current node subquery expressions .
| 125
| 16
|
154,786
|
public boolean isOutputOrdered ( List < AbstractExpression > sortExpressions , List < SortDirectionType > sortDirections ) { assert ( sortExpressions . size ( ) == sortDirections . size ( ) ) ; if ( m_children . size ( ) == 1 ) { return m_children . get ( 0 ) . isOutputOrdered ( sortExpressions , sortDirections ) ; } return false ; }
|
Does the plan guarantee a result sorted according to the required sort order . The default implementation delegates the question to its child if there is only one child .
| 90
| 30
|
154,787
|
public final NodeSchema getTrueOutputSchema ( boolean resetBack ) throws PlanningErrorException { AbstractPlanNode child ; NodeSchema answer = null ; // // Note: This code is translated from the C++ code in // AbstractPlanNode::getOutputSchema. It's considerably // different there, but I think this has the corner // cases covered correctly. for ( child = this ; child != null ; child = ( child . getChildCount ( ) == 0 ) ? null : child . getChild ( 0 ) ) { NodeSchema childSchema ; if ( child . m_hasSignificantOutputSchema ) { childSchema = child . getOutputSchema ( ) ; assert ( childSchema != null ) ; answer = childSchema ; break ; } AbstractPlanNode childProj = child . getInlinePlanNode ( PlanNodeType . PROJECTION ) ; if ( childProj != null ) { AbstractPlanNode schemaSrc = null ; AbstractPlanNode inlineInsertNode = childProj . getInlinePlanNode ( PlanNodeType . INSERT ) ; if ( inlineInsertNode != null ) { schemaSrc = inlineInsertNode ; } else { schemaSrc = childProj ; } childSchema = schemaSrc . getOutputSchema ( ) ; if ( childSchema != null ) { answer = childSchema ; break ; } } } if ( child == null ) { // We've gone to the end of the plan. This is a // failure in the EE. assert ( false ) ; throw new PlanningErrorException ( "AbstractPlanNode with no true output schema. Please notify VoltDB Support." ) ; } // Trace back the chain of parents and reset the // output schemas of the parent. These will all be // exactly the same. Note that the source of the // schema may be an inline plan node. So we need // to set the child's output schema to be the answer. // If the schema source is the child node itself, this will // set the the output schema to itself, so no harm // will be done. if ( resetBack ) { do { if ( child instanceof AbstractJoinPlanNode ) { // In joins with inlined aggregation, the inlined // aggregate node is the one that determines the schema. // (However, the enclosing join node still has its // "m_hasSignificantOutputSchema" bit set.) // // The method resolveColumnIndexes will overwrite // a join node's schema if there is aggregation. In order // to avoid undoing the work we've done here, we must // also update the inlined aggregate node. AggregatePlanNode aggNode = AggregatePlanNode . getInlineAggregationNode ( child ) ; if ( aggNode != null ) { aggNode . setOutputSchema ( answer ) ; } } if ( ! child . m_hasSignificantOutputSchema ) { child . setOutputSchema ( answer ) ; } child = ( child . getParentCount ( ) == 0 ) ? null : child . getParent ( 0 ) ; } while ( child != null ) ; } return answer ; }
|
Find the true output schema . This may be in some child node . This seems to be the search order when constructing a plan node in the EE .
| 657
| 30
|
154,788
|
public void addAndLinkChild ( AbstractPlanNode child ) { assert ( child != null ) ; m_children . add ( child ) ; child . m_parents . add ( this ) ; }
|
Add a child and link this node child s parent .
| 41
| 11
|
154,789
|
public void setAndLinkChild ( int index , AbstractPlanNode child ) { assert ( child != null ) ; m_children . set ( index , child ) ; child . m_parents . add ( this ) ; }
|
Used to re - link the child without changing the order .
| 46
| 12
|
154,790
|
public void unlinkChild ( AbstractPlanNode child ) { assert ( child != null ) ; m_children . remove ( child ) ; child . m_parents . remove ( this ) ; }
|
Remove child from this node .
| 40
| 6
|
154,791
|
public boolean replaceChild ( AbstractPlanNode oldChild , AbstractPlanNode newChild ) { assert ( oldChild != null ) ; assert ( newChild != null ) ; int idx = 0 ; for ( AbstractPlanNode child : m_children ) { if ( child . equals ( oldChild ) ) { oldChild . m_parents . clear ( ) ; setAndLinkChild ( idx , newChild ) ; return true ; } ++ idx ; } return false ; }
|
Replace an existing child with a new one preserving the child s position .
| 99
| 15
|
154,792
|
public void addIntermediary ( AbstractPlanNode node ) { // transfer this node's children to node Iterator < AbstractPlanNode > it = m_children . iterator ( ) ; while ( it . hasNext ( ) ) { AbstractPlanNode child = it . next ( ) ; it . remove ( ) ; // remove this.child from m_children assert child . getParentCount ( ) == 1 ; child . clearParents ( ) ; // and reset child's parents list node . addAndLinkChild ( child ) ; // set node.child and child.parent } // and add node to this node's children assert ( m_children . size ( ) == 0 ) ; addAndLinkChild ( node ) ; }
|
Interject the provided node between this node and this node s current children
| 150
| 14
|
154,793
|
public boolean hasInlinedIndexScanOfTable ( String tableName ) { for ( int i = 0 ; i < getChildCount ( ) ; i ++ ) { AbstractPlanNode child = getChild ( i ) ; if ( child . hasInlinedIndexScanOfTable ( tableName ) == true ) { return true ; } } return false ; }
|
Refer to the override implementation on NestLoopIndexJoin node .
| 73
| 12
|
154,794
|
protected void findAllExpressionsOfClass ( Class < ? extends AbstractExpression > aeClass , Set < AbstractExpression > collected ) { // Check the inlined plan nodes for ( AbstractPlanNode inlineNode : getInlinePlanNodes ( ) . values ( ) ) { // For inline node we MUST go recursive to its children!!!!! inlineNode . findAllExpressionsOfClass ( aeClass , collected ) ; } // and the output column expressions if there were no projection NodeSchema schema = getOutputSchema ( ) ; if ( schema != null ) { schema . addAllSubexpressionsOfClassFromNodeSchema ( collected , aeClass ) ; } }
|
Collect a unique list of expressions of a given type that this node has including its inlined nodes
| 142
| 19
|
154,795
|
public String toDOTString ( ) { StringBuilder sb = new StringBuilder ( ) ; // id [label=id: value-type <value-type-attributes>]; // id -> child_id; // id -> child_id; sb . append ( m_id ) . append ( " [label=\"" ) . append ( m_id ) . append ( ": " ) . append ( getPlanNodeType ( ) ) . append ( "\" " ) ; sb . append ( getValueTypeDotString ( ) ) ; sb . append ( "];\n" ) ; for ( AbstractPlanNode node : m_inlineNodes . values ( ) ) { sb . append ( m_id ) . append ( " -> " ) . append ( node . getPlanNodeId ( ) . intValue ( ) ) . append ( ";\n" ) ; sb . append ( node . toDOTString ( ) ) ; } for ( AbstractPlanNode node : m_children ) { sb . append ( m_id ) . append ( " -> " ) . append ( node . getPlanNodeId ( ) . intValue ( ) ) . append ( ";\n" ) ; } return sb . toString ( ) ; }
|
produce a file that can imported into graphviz for easier visualization
| 273
| 14
|
154,796
|
private String getValueTypeDotString ( ) { PlanNodeType pnt = getPlanNodeType ( ) ; if ( isInline ( ) ) { return "fontcolor=\"white\" style=\"filled\" fillcolor=\"red\"" ; } if ( pnt == PlanNodeType . SEND || pnt == PlanNodeType . RECEIVE || pnt == PlanNodeType . MERGERECEIVE ) { return "fontcolor=\"white\" style=\"filled\" fillcolor=\"black\"" ; } return "" ; }
|
maybe not worth polluting
| 112
| 5
|
154,797
|
public void getScanNodeList_recurse ( ArrayList < AbstractScanPlanNode > collected , HashSet < AbstractPlanNode > visited ) { if ( visited . contains ( this ) ) { assert ( false ) : "do not expect loops in plangraph." ; return ; } visited . add ( this ) ; for ( AbstractPlanNode n : m_children ) { n . getScanNodeList_recurse ( collected , visited ) ; } for ( AbstractPlanNode node : m_inlineNodes . values ( ) ) { node . getScanNodeList_recurse ( collected , visited ) ; } }
|
postorder adding scan nodes
| 129
| 5
|
154,798
|
public void getPlanNodeList_recurse ( ArrayList < AbstractPlanNode > collected , HashSet < AbstractPlanNode > visited ) { if ( visited . contains ( this ) ) { assert ( false ) : "do not expect loops in plangraph." ; return ; } visited . add ( this ) ; for ( AbstractPlanNode n : m_children ) { n . getPlanNodeList_recurse ( collected , visited ) ; } collected . add ( this ) ; }
|
postorder add nodes
| 101
| 4
|
154,799
|
private static Object nullValueForType ( final Class < ? > expectedClz ) { if ( expectedClz == long . class ) { return VoltType . NULL_BIGINT ; } else if ( expectedClz == int . class ) { return VoltType . NULL_INTEGER ; } else if ( expectedClz == short . class ) { return VoltType . NULL_SMALLINT ; } else if ( expectedClz == byte . class ) { return VoltType . NULL_TINYINT ; } else if ( expectedClz == double . class ) { return VoltType . NULL_FLOAT ; } // all non-primitive types can handle null return null ; }
|
Get the appropriate and compatible null value for a given parameter type .
| 147
| 13
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.