idx
int64 0
165k
| question
stringlengths 73
4.15k
| target
stringlengths 5
918
| len_question
int64 21
890
| len_target
int64 3
255
|
|---|---|---|---|---|
155,300
|
private static String getActionString ( int action ) { switch ( action ) { case Constraint . RESTRICT : return Tokens . T_RESTRICT ; case Constraint . CASCADE : return Tokens . T_CASCADE ; case Constraint . SET_DEFAULT : return Tokens . T_SET + ' ' + Tokens . T_DEFAULT ; case Constraint . SET_NULL : return Tokens . T_SET + ' ' + Tokens . T_NULL ; default : return Tokens . T_NO + ' ' + Tokens . T_ACTION ; } }
|
Returns the foreign key action rule .
| 124
| 7
|
155,301
|
boolean isUniqueWithColumns ( int [ ] cols ) { if ( constType != UNIQUE || core . mainCols . length != cols . length ) { return false ; } return ArrayUtil . haveEqualSets ( core . mainCols , cols , cols . length ) ; }
|
Compares this with another constraint column set . This is used only for UNIQUE constraints .
| 69
| 19
|
155,302
|
boolean isEquivalent ( Table mainTable , int [ ] mainCols , Table refTable , int [ ] refCols ) { if ( constType != Constraint . MAIN && constType != Constraint . FOREIGN_KEY ) { return false ; } if ( mainTable != core . mainTable || refTable != core . refTable ) { return false ; } return ArrayUtil . areEqualSets ( core . mainCols , mainCols ) && ArrayUtil . areEqualSets ( core . refCols , refCols ) ; }
|
Compares this with another constraint column set . This implementation only checks FOREIGN KEY constraints .
| 126
| 19
|
155,303
|
void updateTable ( Session session , Table oldTable , Table newTable , int colIndex , int adjust ) { if ( oldTable == core . mainTable ) { core . mainTable = newTable ; if ( core . mainIndex != null ) { core . mainIndex = core . mainTable . getIndex ( core . mainIndex . getName ( ) . name ) ; core . mainCols = ArrayUtil . toAdjustedColumnArray ( core . mainCols , colIndex , adjust ) ; } } if ( oldTable == core . refTable ) { core . refTable = newTable ; if ( core . refIndex != null ) { core . refIndex = core . refTable . getIndex ( core . refIndex . getName ( ) . name ) ; core . refCols = ArrayUtil . toAdjustedColumnArray ( core . refCols , colIndex , adjust ) ; } } // CHECK if ( constType == CHECK ) { recompile ( session , newTable ) ; } }
|
Used to update constrains to reflect structural changes in a table . Prior checks must ensure that this method does not throw .
| 216
| 25
|
155,304
|
void checkInsert ( Session session , Table table , Object [ ] row ) { switch ( constType ) { case CHECK : if ( ! isNotNull ) { checkCheckConstraint ( session , table , row ) ; } return ; case FOREIGN_KEY : PersistentStore store = session . sessionData . getRowStore ( core . mainTable ) ; if ( ArrayUtil . hasNull ( row , core . refCols ) ) { if ( core . matchType == OpTypes . MATCH_SIMPLE ) { return ; } if ( core . refCols . length == 1 ) { return ; } if ( ArrayUtil . hasAllNull ( row , core . refCols ) ) { return ; } // core.matchType == OpTypes.MATCH_FULL } else if ( core . mainIndex . exists ( session , store , row , core . refCols ) ) { return ; } else if ( core . mainTable == core . refTable ) { // special case: self referencing table and self referencing row int compare = core . mainIndex . compareRowNonUnique ( row , core . refCols , row ) ; if ( compare == 0 ) { return ; } } String [ ] info = new String [ ] { core . refName . name , core . mainTable . getName ( ) . name } ; throw Error . error ( ErrorCode . X_23502 , ErrorCode . CONSTRAINT , info ) ; } }
|
Checks for foreign key or check constraint violation when inserting a row into the child table .
| 313
| 18
|
155,305
|
boolean checkHasMainRef ( Session session , Object [ ] row ) { if ( ArrayUtil . hasNull ( row , core . refCols ) ) { return false ; } PersistentStore store = session . sessionData . getRowStore ( core . mainTable ) ; boolean exists = core . mainIndex . exists ( session , store , row , core . refCols ) ; if ( ! exists ) { String [ ] info = new String [ ] { core . refName . name , core . mainTable . getName ( ) . name } ; throw Error . error ( ErrorCode . X_23502 , ErrorCode . CONSTRAINT , info ) ; } return exists ; }
|
For the candidate table row finds any referring node in the main table . This is used to check referential integrity when updating a node . We have to make sure that the main table still holds a valid main record . returns true If a valid row is found false if there are null in the data Otherwise a INTEGRITY VIOLATION Exception gets thrown .
| 147
| 73
|
155,306
|
void checkReferencedRows ( Session session , Table table , int [ ] rowColArray ) { Index mainIndex = getMainIndex ( ) ; PersistentStore store = session . sessionData . getRowStore ( table ) ; RowIterator it = table . rowIterator ( session ) ; while ( true ) { Row row = it . getNextRow ( ) ; if ( row == null ) { break ; } Object [ ] rowData = row . getData ( ) ; if ( ArrayUtil . hasNull ( rowData , rowColArray ) ) { if ( core . matchType == OpTypes . MATCH_SIMPLE ) { continue ; } } else if ( mainIndex . exists ( session , store , rowData , rowColArray ) ) { continue ; } if ( ArrayUtil . hasAllNull ( rowData , rowColArray ) ) { continue ; } String colValues = "" ; for ( int i = 0 ; i < rowColArray . length ; i ++ ) { Object o = rowData [ rowColArray [ i ] ] ; colValues += table . getColumnTypes ( ) [ i ] . convertToString ( o ) ; colValues += "," ; } String [ ] info = new String [ ] { getName ( ) . name , getMain ( ) . getName ( ) . name } ; throw Error . error ( ErrorCode . X_23502 , ErrorCode . CONSTRAINT , info ) ; } }
|
Check used before creating a new foreign key cosntraint this method checks all rows of a table to ensure they all have a corresponding row in the main table .
| 308
| 32
|
155,307
|
@ VisibleForTesting static int chooseTableSize ( int setSize ) { if ( setSize == 1 ) { return 2 ; } // Correct the size for open addressing to match desired load factor. // Round up to the next highest power of 2. int tableSize = Integer . highestOneBit ( setSize - 1 ) << 1 ; while ( tableSize * DESIRED_LOAD_FACTOR < setSize ) { tableSize <<= 1 ; } return tableSize ; }
|
Returns an array size suitable for the backing array of a hash table that uses open addressing with linear probing in its implementation . The returned size is the smallest power of two that can hold setSize elements with the desired load factor .
| 100
| 45
|
155,308
|
@ Override public String [ ] decode ( long generation , String tableName , List < VoltType > types , List < String > names , String [ ] to , Object [ ] fields ) throws RuntimeException { Preconditions . checkArgument ( fields != null && fields . length > m_firstFieldOffset , "null or inapropriately sized export row array" ) ; /* * Builds a list of string formatters that reflects the row * column types. */ StringFieldDecoder [ ] fieldDecoders ; if ( ! m_fieldDecoders . containsKey ( generation ) ) { int fieldCount = 0 ; Map < String , DecodeType > typeMap = getTypeMap ( generation , types , names ) ; ImmutableList . Builder < StringFieldDecoder > lb = ImmutableList . builder ( ) ; for ( org . voltdb . exportclient . decode . DecodeType dt : typeMap . values ( ) ) { lb . add ( dt . accept ( decodingVisitor , fieldCount ++ , null ) ) ; } fieldDecoders = lb . build ( ) . toArray ( new StringFieldDecoder [ 0 ] ) ; m_fieldDecoders . put ( generation , fieldDecoders ) ; } else { fieldDecoders = m_fieldDecoders . get ( generation ) ; } if ( to == null || to . length < fieldDecoders . length ) { to = new String [ fieldDecoders . length ] ; } for ( int i = m_firstFieldOffset , j = 0 ; i < fields . length && j < fieldDecoders . length ; ++ i , ++ j ) { fieldDecoders [ j ] . decode ( to , fields [ i ] ) ; } return to ; }
|
Converts an object array containing an exported row values into an array of their string representations
| 378
| 17
|
155,309
|
Iv2InFlight findHandle ( long ciHandle ) { assert ( ! shouldCheckThreadIdAssertion ( ) || m_expectedThreadId == Thread . currentThread ( ) . getId ( ) ) ; /* * Check the partition specific queue of handles */ int partitionId = getPartIdFromHandle ( ciHandle ) ; PartitionInFlightTracker partitionStuff = m_trackerMap . get ( partitionId ) ; if ( partitionStuff == null ) { // whoa, bad tmLog . error ( "Unable to find handle list for partition: " + partitionId + ", client interface handle: " + ciHandle ) ; return null ; } Iv2InFlight inFlight = partitionStuff . m_inFlights . remove ( ciHandle ) ; if ( inFlight != null ) { m_acg . reduceBackpressure ( inFlight . m_messageSize ) ; m_outstandingTxns -- ; return inFlight ; } return null ; }
|
Retrieve the client information for the specified handle
| 209
| 9
|
155,310
|
void freeOutstandingTxns ( ) { assert ( ! shouldCheckThreadIdAssertion ( ) || m_expectedThreadId == Thread . currentThread ( ) . getId ( ) ) ; for ( PartitionInFlightTracker tracker : m_trackerMap . values ( ) ) { for ( Iv2InFlight inflight : tracker . m_inFlights . values ( ) ) { m_outstandingTxns -- ; m_acg . reduceBackpressure ( inflight . m_messageSize ) ; } } }
|
When a connection goes away free all resources held by that connection This opens a small window of opportunity for mischief in that work may still be outstanding in the cluster but once the client goes away so does does the mapping to the resources allocated to it .
| 112
| 49
|
155,311
|
void loadSchema ( Reader reader , Database db , DdlProceduresToLoad whichProcs ) throws VoltCompiler . VoltCompilerException { int currLineNo = 1 ; DDLStatement stmt = getNextStatement ( reader , m_compiler , currLineNo ) ; while ( stmt != null ) { // Some statements are processed by VoltDB and the rest are handled by HSQL. processVoltDBStatements ( db , whichProcs , stmt ) ; stmt = getNextStatement ( reader , m_compiler , stmt . endLineNo ) ; } try { reader . close ( ) ; } catch ( IOException e ) { throw m_compiler . new VoltCompilerException ( "Error closing schema file" ) ; } // process extra classes m_tracker . addExtraClasses ( m_classMatcher . getMatchedClassList ( ) ) ; // possibly save some memory m_classMatcher . clear ( ) ; }
|
Compile a DDL schema from an abstract reader
| 210
| 10
|
155,312
|
private String generateDDLForDRConflictsTable ( Database currentDB , Database previousDBIfAny , boolean isCurrentXDCR ) { StringBuilder sb = new StringBuilder ( ) ; if ( isCurrentXDCR ) { createDRConflictTables ( sb , previousDBIfAny ) ; } else { dropDRConflictTablesIfNeeded ( sb ) ; } return sb . toString ( ) ; }
|
Generate DDL to create or drop the DR conflict table
| 93
| 12
|
155,313
|
private void processCreateStreamStatement ( DDLStatement stmt , Database db , DdlProceduresToLoad whichProcs ) throws VoltCompilerException { String statement = stmt . statement ; Matcher statementMatcher = SQLParser . matchCreateStream ( statement ) ; if ( statementMatcher . matches ( ) ) { // check the table portion String tableName = checkIdentifierStart ( statementMatcher . group ( 1 ) , statement ) ; String targetName = null ; String columnName = null ; // Parse the EXPORT and PARTITION clauses. if ( ( statementMatcher . groupCount ( ) > 1 ) && ( statementMatcher . group ( 2 ) != null ) && ( ! statementMatcher . group ( 2 ) . isEmpty ( ) ) ) { String clauses = statementMatcher . group ( 2 ) ; Matcher matcher = SQLParser . matchAnyCreateStreamStatementClause ( clauses ) ; int start = 0 ; while ( matcher . find ( start ) ) { start = matcher . end ( ) ; if ( matcher . group ( 1 ) != null ) { // Add target info if it's an Export clause. Only one is allowed if ( targetName != null ) { throw m_compiler . new VoltCompilerException ( "Only one Export clause is allowed for CREATE STREAM." ) ; } targetName = matcher . group ( 1 ) ; } else { // Add partition info if it's a PARTITION clause. Only one is allowed. if ( columnName != null ) { throw m_compiler . new VoltCompilerException ( "Only one PARTITION clause is allowed for CREATE STREAM." ) ; } columnName = matcher . group ( 2 ) ; } } } VoltXMLElement tableXML = m_schema . findChild ( "table" , tableName . toUpperCase ( ) ) ; if ( tableXML != null ) { tableXML . attributes . put ( "stream" , "true" ) ; } else { throw m_compiler . new VoltCompilerException ( String . format ( "Invalid STREAM statement: table %s does not exist" , tableName ) ) ; } // process partition if specified if ( columnName != null ) { tableXML . attributes . put ( "partitioncolumn" , columnName . toUpperCase ( ) ) ; // Column validity check done by VoltCompiler in post-processing // mark the table as dirty for the purposes of caching sql statements m_compiler . markTableAsDirty ( tableName ) ; } // process export targetName = ( targetName != null ) ? checkIdentifierStart ( targetName , statement ) : Constants . DEFAULT_EXPORT_CONNECTOR_NAME ; if ( tableXML . attributes . containsKey ( "drTable" ) && "ENABLE" . equals ( tableXML . attributes . get ( "drTable" ) ) ) { throw m_compiler . new VoltCompilerException ( String . format ( "Invalid CREATE STREAM statement: table %s is a DR table." , tableName ) ) ; } else { tableXML . attributes . put ( "export" , targetName ) ; } } else { throw m_compiler . new VoltCompilerException ( String . format ( "Invalid CREATE STREAM statement: \"%s\", " + "expected syntax: CREATE STREAM <table> [PARTITION ON COLUMN <column-name>] [EXPORT TO TARGET <target>] (column datatype, ...); " , statement . substring ( 0 , statement . length ( ) - 1 ) ) ) ; } }
|
Process a VoltDB - specific create stream DDL statement
| 773
| 11
|
155,314
|
private void fillTrackerFromXML ( ) { for ( VoltXMLElement e : m_schema . children ) { if ( e . name . equals ( "table" ) ) { String tableName = e . attributes . get ( "name" ) ; String partitionCol = e . attributes . get ( "partitioncolumn" ) ; String export = e . attributes . get ( "export" ) ; String drTable = e . attributes . get ( "drTable" ) ; String migrateTarget = e . attributes . get ( "migrateExport" ) ; export = StringUtil . isEmpty ( export ) ? migrateTarget : export ; final boolean isStream = ( e . attributes . get ( "stream" ) != null ) ; if ( partitionCol != null ) { m_tracker . addPartition ( tableName , partitionCol ) ; } else { m_tracker . removePartition ( tableName ) ; } if ( ! StringUtil . isEmpty ( export ) ) { m_tracker . addExportedTable ( tableName , export , isStream ) ; } else { m_tracker . removeExportedTable ( tableName , isStream ) ; } if ( drTable != null ) { m_tracker . addDRedTable ( tableName , drTable ) ; } } } }
|
requested from the compiler
| 280
| 5
|
155,315
|
private static boolean indexesAreDups ( Index idx1 , Index idx2 ) { // same attributes? if ( idx1 . getType ( ) != idx2 . getType ( ) ) { return false ; } if ( idx1 . getCountable ( ) != idx2 . getCountable ( ) ) { return false ; } if ( idx1 . getUnique ( ) != idx2 . getUnique ( ) ) { return false ; } if ( idx1 . getAssumeunique ( ) != idx2 . getAssumeunique ( ) ) { return false ; } // same column count? if ( idx1 . getColumns ( ) . size ( ) != idx2 . getColumns ( ) . size ( ) ) { return false ; } //TODO: For index types like HASH that support only random access vs. scanned ranges, indexes on different // permutations of the same list of columns/expressions could be considered dupes. This code skips that edge // case optimization in favor of using a simpler more exact permutation-sensitive algorithm for all indexes. if ( ! ( idx1 . getExpressionsjson ( ) . equals ( idx2 . getExpressionsjson ( ) ) ) ) { return false ; } // Simple column indexes have identical empty expression strings so need to be distinguished other ways. // More complex expression indexes that have the same expression strings always have the same set of (base) // columns referenced in the same order, but we fall through and check them, anyway. // sort in index order the columns of idx1, each identified by its index in the base table int [ ] idx1baseTableOrder = new int [ idx1 . getColumns ( ) . size ( ) ] ; for ( ColumnRef cref : idx1 . getColumns ( ) ) { int index = cref . getIndex ( ) ; int baseTableIndex = cref . getColumn ( ) . getIndex ( ) ; idx1baseTableOrder [ index ] = baseTableIndex ; } // sort in index order the columns of idx2, each identified by its index in the base table int [ ] idx2baseTableOrder = new int [ idx2 . getColumns ( ) . size ( ) ] ; for ( ColumnRef cref : idx2 . getColumns ( ) ) { int index = cref . getIndex ( ) ; int baseTableIndex = cref . getColumn ( ) . getIndex ( ) ; idx2baseTableOrder [ index ] = baseTableIndex ; } // Duplicate indexes have identical columns in identical order. if ( ! Arrays . equals ( idx1baseTableOrder , idx2baseTableOrder ) ) { return false ; } // Check the predicates if ( idx1 . getPredicatejson ( ) . length ( ) > 0 ) { return idx1 . getPredicatejson ( ) . equals ( idx2 . getPredicatejson ( ) ) ; } if ( idx2 . getPredicatejson ( ) . length ( ) > 0 ) { return idx2 . getPredicatejson ( ) . equals ( idx1 . getPredicatejson ( ) ) ; } return true ; }
|
Return true if the two indexes are identical with a different name .
| 691
| 13
|
155,316
|
private void addConstraintToCatalog ( Table table , VoltXMLElement node , Map < String , String > indexReplacementMap , Map < String , Index > indexMap ) throws VoltCompilerException { assert node . name . equals ( "constraint" ) ; String name = node . attributes . get ( "name" ) ; String typeName = node . attributes . get ( "constrainttype" ) ; ConstraintType type = ConstraintType . valueOf ( typeName ) ; String tableName = table . getTypeName ( ) ; if ( type == ConstraintType . LIMIT ) { int tupleLimit = Integer . parseInt ( node . attributes . get ( "rowslimit" ) ) ; if ( tupleLimit < 0 ) { throw m_compiler . new VoltCompilerException ( "Invalid constraint limit number '" + tupleLimit + "'" ) ; } if ( tableLimitConstraintCounter . contains ( tableName ) ) { throw m_compiler . new VoltCompilerException ( "Too many table limit constraints for table " + tableName ) ; } else { tableLimitConstraintCounter . add ( tableName ) ; } table . setTuplelimit ( tupleLimit ) ; String deleteStmt = node . attributes . get ( "rowslimitdeletestmt" ) ; if ( deleteStmt != null ) { Statement catStmt = table . getTuplelimitdeletestmt ( ) . add ( "limit_delete" ) ; catStmt . setSqltext ( deleteStmt ) ; validateTupleLimitDeleteStmt ( catStmt ) ; } return ; } if ( type == ConstraintType . CHECK ) { String msg = "VoltDB does not enforce check constraints. " ; msg += "Constraint on table " + tableName + " will be ignored." ; m_compiler . addWarn ( msg ) ; return ; } else if ( type == ConstraintType . FOREIGN_KEY ) { String msg = "VoltDB does not enforce foreign key references and constraints. " ; msg += "Constraint on table " + tableName + " will be ignored." ; m_compiler . addWarn ( msg ) ; return ; } else if ( type == ConstraintType . MAIN ) { // should never see these assert ( false ) ; } else if ( type == ConstraintType . NOT_NULL ) { // these get handled by table metadata inspection return ; } else if ( type != ConstraintType . PRIMARY_KEY && type != ConstraintType . UNIQUE ) { throw m_compiler . new VoltCompilerException ( "Invalid constraint type '" + typeName + "'" ) ; } // else, create the unique index below // primary key code is in other places as well // The constraint is backed by an index, therefore we need to create it // TODO: We need to be able to use indexes for foreign keys. I am purposely // leaving those out right now because HSQLDB just makes too many of them. Constraint catalog_const = table . getConstraints ( ) . add ( name ) ; String indexName = node . attributes . get ( "index" ) ; assert ( indexName != null ) ; // handle replacements from duplicate index pruning if ( indexReplacementMap . containsKey ( indexName ) ) { indexName = indexReplacementMap . get ( indexName ) ; } Index catalog_index = indexMap . get ( indexName ) ; // Attach the index to the catalog constraint (catalog_const). if ( catalog_index != null ) { catalog_const . setIndex ( catalog_index ) ; // This may be redundant. catalog_index . setUnique ( true ) ; boolean assumeUnique = Boolean . parseBoolean ( node . attributes . get ( "assumeunique" ) ) ; catalog_index . setAssumeunique ( assumeUnique ) ; } catalog_const . setType ( type . getValue ( ) ) ; }
|
Add a constraint on a given table to the catalog
| 857
| 10
|
155,317
|
private static AbstractExpression buildPartialIndexPredicate ( AbstractParsedStmt dummy , String indexName , VoltXMLElement predicateXML , Table table , VoltCompiler compiler ) throws VoltCompilerException { // Make sure all column expressions refer to the same index table // before we can parse the XML to avoid the AbstractParsedStmt // exception/assertion String tableName = table . getTypeName ( ) ; assert ( tableName != null ) ; StringBuffer msg = new StringBuffer ( "Partial index \"" + indexName + "\" " ) ; // Make sure all column expressions refer the index table List < VoltXMLElement > columnRefs = predicateXML . findChildrenRecursively ( "columnref" ) ; for ( VoltXMLElement columnRef : columnRefs ) { String columnRefTableName = columnRef . attributes . get ( "table" ) ; if ( columnRefTableName != null && ! tableName . equals ( columnRefTableName ) ) { msg . append ( "with expression(s) involving other tables is not supported." ) ; throw compiler . new VoltCompilerException ( msg . toString ( ) ) ; } } // Now it safe to parse the expression tree AbstractExpression predicate = dummy . parseExpressionTree ( predicateXML ) ; if ( ! predicate . isValidExprForIndexesAndMVs ( msg , false ) ) { throw compiler . new VoltCompilerException ( msg . toString ( ) ) ; } return predicate ; }
|
Build the abstract expression representing the partial index predicate . Verify it satisfies the rules . Throw error messages otherwise .
| 321
| 21
|
155,318
|
public Result getLob ( Session session , long lobID , long offset , long length ) { throw Error . runtimeError ( ErrorCode . U_S0500 , "LobManager" ) ; }
|
Used for SUBSTRING
| 43
| 5
|
155,319
|
@ Override public void close ( ) throws SQLException { try { isClosed = true ; JDBC4ClientConnectionPool . dispose ( NativeConnection ) ; } catch ( Exception x ) { throw SQLError . get ( x ) ; } }
|
Releases this Connection object s database and JDBC resources immediately instead of waiting for them to be automatically released .
| 55
| 22
|
155,320
|
@ Override public Array createArrayOf ( String typeName , Object [ ] elements ) throws SQLException { checkClosed ( ) ; throw SQLError . noSupport ( ) ; }
|
Factory method for creating Array objects .
| 42
| 7
|
155,321
|
@ Override public Statement createStatement ( ) throws SQLException { checkClosed ( ) ; try { return new JDBC4Statement ( this ) ; } catch ( Exception x ) { throw SQLError . get ( x ) ; } }
|
Creates a Statement object for sending SQL statements to the database .
| 53
| 13
|
155,322
|
@ Override public Struct createStruct ( String typeName , Object [ ] attributes ) throws SQLException { checkClosed ( ) ; throw SQLError . noSupport ( ) ; }
|
Factory method for creating Struct objects .
| 41
| 7
|
155,323
|
@ Override public PreparedStatement prepareStatement ( String sql , int [ ] columnIndexes ) throws SQLException { checkClosed ( ) ; throw SQLError . noSupport ( ) ; }
|
Creates a default PreparedStatement object capable of returning the auto - generated keys designated by the given array .
| 44
| 22
|
155,324
|
@ Override public PreparedStatement prepareStatement ( String sql , int resultSetType , int resultSetConcurrency ) throws SQLException { if ( ( resultSetType == ResultSet . TYPE_SCROLL_INSENSITIVE || resultSetType == ResultSet . TYPE_FORWARD_ONLY ) && resultSetConcurrency == ResultSet . CONCUR_READ_ONLY ) { return prepareStatement ( sql ) ; } checkClosed ( ) ; throw SQLError . noSupport ( ) ; }
|
Creates a PreparedStatement object that will generate ResultSet objects with the given type and concurrency .
| 110
| 21
|
155,325
|
@ Override public PreparedStatement prepareStatement ( String sql , int resultSetType , int resultSetConcurrency , int resultSetHoldability ) throws SQLException { checkClosed ( ) ; throw SQLError . noSupport ( ) ; }
|
Creates a PreparedStatement object that will generate ResultSet objects with the given type concurrency and holdability .
| 54
| 23
|
155,326
|
@ Override public void rollback ( ) throws SQLException { checkClosed ( ) ; if ( props . getProperty ( ROLLBACK_THROW_EXCEPTION , "true" ) . equalsIgnoreCase ( "true" ) ) { throw SQLError . noSupport ( ) ; } }
|
Undoes all changes made in the current transaction and releases any database locks currently held by this Connection object .
| 69
| 21
|
155,327
|
@ Override public void setAutoCommit ( boolean autoCommit ) throws SQLException { checkClosed ( ) ; // Always true - error out only if the client is trying to set somethign else if ( ! autoCommit && ( props . getProperty ( COMMIT_THROW_EXCEPTION , "true" ) . equalsIgnoreCase ( "true" ) ) ) { throw SQLError . noSupport ( ) ; } else { this . autoCommit = autoCommit ; } }
|
Sets this connection s auto - commit mode to the given state .
| 112
| 14
|
155,328
|
@ Override public void setReadOnly ( boolean readOnly ) throws SQLException { checkClosed ( ) ; if ( ! Boolean . parseBoolean ( props . getProperty ( "enableSetReadOnly" , "false" ) ) ) { throw SQLError . noSupport ( ) ; } }
|
Puts this connection in read - only mode as a hint to the driver to enable database optimizations .
| 66
| 20
|
155,329
|
@ Override public void setTypeMap ( Map < String , Class < ? > > map ) throws SQLException { checkClosed ( ) ; throw SQLError . noSupport ( ) ; }
|
Installs the given TypeMap object as the type map for this Connection object .
| 44
| 16
|
155,330
|
@ Override public void saveStatistics ( ClientStats stats , String file ) throws IOException { this . NativeConnection . saveStatistics ( stats , file ) ; }
|
Save statistics to a file
| 33
| 5
|
155,331
|
private static boolean trimProcQuotas ( ZooKeeper zk , String path ) throws KeeperException , IOException , InterruptedException { if ( Quotas . quotaZookeeper . equals ( path ) ) { return true ; } List < String > children = zk . getChildren ( path , false ) ; if ( children . size ( ) == 0 ) { zk . delete ( path , - 1 ) ; String parent = path . substring ( 0 , path . lastIndexOf ( ' ' ) ) ; return trimProcQuotas ( zk , parent ) ; } else { return true ; } }
|
trim the quota tree to recover unwanted tree elements in the quota s tree
| 134
| 15
|
155,332
|
public static boolean delQuota ( ZooKeeper zk , String path , boolean bytes , boolean numNodes ) throws KeeperException , IOException , InterruptedException { String parentPath = Quotas . quotaZookeeper + path ; String quotaPath = Quotas . quotaZookeeper + path + "/" + Quotas . limitNode ; if ( zk . exists ( quotaPath , false ) == null ) { System . out . println ( "Quota does not exist for " + path ) ; return true ; } byte [ ] data = null ; try { data = zk . getData ( quotaPath , false , new Stat ( ) ) ; } catch ( KeeperException . NoNodeException ne ) { System . err . println ( "quota does not exist for " + path ) ; return true ; } StatsTrack strack = new StatsTrack ( new String ( data ) ) ; if ( bytes && ! numNodes ) { strack . setBytes ( - 1L ) ; zk . setData ( quotaPath , strack . toString ( ) . getBytes ( ) , - 1 ) ; } else if ( ! bytes && numNodes ) { strack . setCount ( - 1 ) ; zk . setData ( quotaPath , strack . toString ( ) . getBytes ( ) , - 1 ) ; } else if ( bytes && numNodes ) { // delete till you can find a node with more than // one child List < String > children = zk . getChildren ( parentPath , false ) ; // / delete the direct children first for ( String child : children ) { zk . delete ( parentPath + "/" + child , - 1 ) ; } // cut the tree till their is more than one child trimProcQuotas ( zk , parentPath ) ; } return true ; }
|
this method deletes quota for a node .
| 394
| 9
|
155,333
|
private static int generateCrudPKeyWhereClause ( Column partitioncolumn , Constraint pkey , StringBuilder sb ) { // Sort the catalog index columns by index column order. ArrayList < ColumnRef > indexColumns = new ArrayList < ColumnRef > ( pkey . getIndex ( ) . getColumns ( ) . size ( ) ) ; for ( ColumnRef c : pkey . getIndex ( ) . getColumns ( ) ) { indexColumns . add ( c ) ; } Collections . sort ( indexColumns , new ColumnRefComparator ( ) ) ; boolean first = true ; int partitionOffset = - 1 ; sb . append ( " WHERE " ) ; for ( ColumnRef pkc : indexColumns ) { if ( ! first ) sb . append ( " AND " ) ; first = false ; sb . append ( "(" + pkc . getColumn ( ) . getName ( ) + " = ?" + ")" ) ; if ( pkc . getColumn ( ) == partitioncolumn ) { partitionOffset = pkc . getIndex ( ) ; } } return partitionOffset ; }
|
Helper to generate a WHERE pkey_col1 = ? pkey_col2 = ? ... ; clause .
| 243
| 23
|
155,334
|
private static void generateCrudExpressionColumns ( Table table , StringBuilder sb ) { boolean first = true ; // Sort the catalog table columns by column order. ArrayList < Column > tableColumns = new ArrayList < Column > ( table . getColumns ( ) . size ( ) ) ; for ( Column c : table . getColumns ( ) ) { tableColumns . add ( c ) ; } Collections . sort ( tableColumns , new TableColumnComparator ( ) ) ; for ( Column c : tableColumns ) { if ( ! first ) sb . append ( ", " ) ; first = false ; sb . append ( c . getName ( ) + " = ?" ) ; } }
|
Helper to generate a full col1 = ? col2 = ? ... clause .
| 153
| 16
|
155,335
|
public InProcessVoltDBServer start ( ) { DeploymentBuilder depBuilder = new DeploymentBuilder ( sitesPerHost , 1 , 0 ) ; depBuilder . setEnableCommandLogging ( false ) ; depBuilder . setUseDDLSchema ( true ) ; depBuilder . setHTTPDPort ( 8080 ) ; depBuilder . setJSONAPIEnabled ( true ) ; VoltDB . Configuration config = new VoltDB . Configuration ( ) ; if ( pathToLicense != null ) { config . m_pathToLicense = pathToLicense ; } else { config . m_pathToLicense = "./license.xml" ; } File tempDeployment = null ; try { tempDeployment = File . createTempFile ( "volt_deployment_" , ".xml" ) ; } catch ( IOException e ) { e . printStackTrace ( ) ; System . exit ( - 1 ) ; } depBuilder . writeXML ( tempDeployment . getAbsolutePath ( ) ) ; config . m_pathToDeployment = tempDeployment . getAbsolutePath ( ) ; server = new ServerThread ( config ) ; server . start ( ) ; server . waitForInitialization ( ) ; return this ; }
|
Starts the in - process server and blocks until it is ready to accept connections .
| 261
| 17
|
155,336
|
public void compile ( Session session ) { if ( ! database . schemaManager . schemaExists ( compileTimeSchema . name ) ) { compileTimeSchema = session . getSchemaHsqlName ( null ) ; } session . setSchema ( compileTimeSchema . name ) ; ParserDQL p = new ParserDQL ( session , new Scanner ( statement ) ) ; p . read ( ) ; viewSubQuery = p . XreadViewSubquery ( this ) ; queryExpression = viewSubQuery . queryExpression ; if ( getColumnCount ( ) == 0 ) { if ( columnNames == null ) { columnNames = viewSubQuery . queryExpression . getResultColumnNames ( ) ; } if ( columnNames . length != viewSubQuery . queryExpression . getColumnCount ( ) ) { throw Error . error ( ErrorCode . X_42593 , tableName . statementName ) ; } TableUtil . setColumnsInSchemaTable ( this , columnNames , queryExpression . getColumnTypes ( ) ) ; } // viewSubqueries = p . compileContext . getSubqueries ( ) ; for ( int i = 0 ; i < viewSubqueries . length ; i ++ ) { if ( viewSubqueries [ i ] . parentView == null ) { viewSubqueries [ i ] . parentView = this ; } } // viewSubQuery . getTable ( ) . view = this ; viewSubQuery . getTable ( ) . columnList = columnList ; schemaObjectNames = p . compileContext . getSchemaObjectNames ( ) ; baseTable = queryExpression . getBaseTable ( ) ; if ( baseTable == null ) { return ; } switch ( check ) { case SchemaObject . ViewCheckModes . CHECK_NONE : break ; case SchemaObject . ViewCheckModes . CHECK_LOCAL : checkExpression = queryExpression . getCheckCondition ( ) ; break ; case SchemaObject . ViewCheckModes . CHECK_CASCADE : break ; default : throw Error . runtimeError ( ErrorCode . U_S0500 , "View" ) ; } }
|
Compiles the query expression and sets up the columns .
| 463
| 11
|
155,337
|
public static Pair < InMemoryJarfile , String > loadAndUpgradeCatalogFromJar ( byte [ ] catalogBytes , boolean isXDCR ) throws IOException { // Throws IOException on load failure. InMemoryJarfile jarfile = loadInMemoryJarFile ( catalogBytes ) ; return loadAndUpgradeCatalogFromJar ( jarfile , isXDCR ) ; }
|
Load a catalog from the jar bytes .
| 78
| 8
|
155,338
|
public static Pair < InMemoryJarfile , String > loadAndUpgradeCatalogFromJar ( InMemoryJarfile jarfile , boolean isXDCR ) throws IOException { // Let VoltCompiler do a version check and upgrade the catalog on the fly. // I.e. jarfile may be modified. VoltCompiler compiler = new VoltCompiler ( isXDCR ) ; String upgradedFromVersion = compiler . upgradeCatalogAsNeeded ( jarfile ) ; return new Pair <> ( jarfile , upgradedFromVersion ) ; }
|
Load a catalog from the InMemoryJarfile .
| 111
| 10
|
155,339
|
public static String getSerializedCatalogStringFromJar ( InMemoryJarfile jarfile ) { byte [ ] serializedCatalogBytes = jarfile . get ( CatalogUtil . CATALOG_FILENAME ) ; String serializedCatalog = new String ( serializedCatalogBytes , Constants . UTF8ENCODING ) ; return serializedCatalog ; }
|
Convenience method to extract the catalog commands from an InMemoryJarfile as a string
| 74
| 18
|
155,340
|
public static String [ ] getBuildInfoFromJar ( InMemoryJarfile jarfile ) throws IOException { // Read the raw build info bytes. byte [ ] buildInfoBytes = jarfile . get ( CATALOG_BUILDINFO_FILENAME ) ; if ( buildInfoBytes == null ) { throw new IOException ( "Catalog build information not found - please build your application using the current version of VoltDB." ) ; } // Convert the bytes to a string and split by lines. String buildInfo ; buildInfo = new String ( buildInfoBytes , Constants . UTF8ENCODING ) ; String [ ] buildInfoLines = buildInfo . split ( "\n" ) ; // Sanity check the number of lines and the version string. if ( buildInfoLines . length < 1 ) { throw new IOException ( "Catalog build info has no version string." ) ; } String versionFromCatalog = buildInfoLines [ 0 ] . trim ( ) ; if ( ! CatalogUtil . isCatalogVersionValid ( versionFromCatalog ) ) { throw new IOException ( String . format ( "Catalog build info version (%s) is bad." , versionFromCatalog ) ) ; } // Trim leading/trailing whitespace. for ( int i = 0 ; i < buildInfoLines . length ; ++ i ) { buildInfoLines [ i ] = buildInfoLines [ i ] . trim ( ) ; } return buildInfoLines ; }
|
Get the catalog build info from the jar bytes . Performs sanity checks on the build info and version strings .
| 308
| 22
|
155,341
|
public static String getAutoGenDDLFromJar ( InMemoryJarfile jarfile ) throws IOException { // Read the raw auto generated ddl bytes. byte [ ] ddlBytes = jarfile . get ( VoltCompiler . AUTOGEN_DDL_FILE_NAME ) ; if ( ddlBytes == null ) { throw new IOException ( "Auto generated schema DDL not found - please make sure the database is initialized with valid schema." ) ; } String ddl = new String ( ddlBytes , StandardCharsets . UTF_8 ) ; return ddl . trim ( ) ; }
|
Get the auto generated DDL from the catalog jar .
| 127
| 11
|
155,342
|
public static InMemoryJarfile getCatalogJarWithoutDefaultArtifacts ( final InMemoryJarfile jarfile ) { InMemoryJarfile cloneJar = jarfile . deepCopy ( ) ; for ( String entry : CATALOG_DEFAULT_ARTIFACTS ) { cloneJar . remove ( entry ) ; } return cloneJar ; }
|
Removes the default voltdb artifact files from catalog and returns the resulltant jar file . This will contain dependency files needed for generated stored procs
| 70
| 32
|
155,343
|
public static InMemoryJarfile loadInMemoryJarFile ( byte [ ] catalogBytes ) throws IOException { assert ( catalogBytes != null ) ; InMemoryJarfile jarfile = new InMemoryJarfile ( catalogBytes ) ; if ( ! jarfile . containsKey ( CATALOG_FILENAME ) ) { throw new IOException ( "Database catalog not found - please build your application using the current version of VoltDB." ) ; } return jarfile ; }
|
Load an in - memory catalog jar file from jar bytes .
| 97
| 12
|
155,344
|
public static boolean isSnapshotablePersistentTableView ( Database db , Table table ) { Table materializer = table . getMaterializer ( ) ; if ( materializer == null ) { // Return false if it is not a materialized view. return false ; } if ( CatalogUtil . isTableExportOnly ( db , materializer ) ) { // The view source table should not be a streamed table. return false ; } if ( ! table . getIsreplicated ( ) && table . getPartitioncolumn ( ) == null ) { // If the view table is implicitly partitioned (maybe was not in snapshot), // its maintenance is not turned off during the snapshot restore process. // Let it take care of its own data by itself. // Do not attempt to restore data for it. return false ; } return true ; }
|
Test if a table is a persistent table view and should be included in the snapshot .
| 172
| 17
|
155,345
|
public static boolean isSnapshotableStreamedTableView ( Database db , Table table ) { Table materializer = table . getMaterializer ( ) ; if ( materializer == null ) { // Return false if it is not a materialized view. return false ; } if ( ! CatalogUtil . isTableExportOnly ( db , materializer ) ) { // Test if the view source table is a streamed table. return false ; } // Non-partitioned export table are not allowed so it should not get here. Column sourcePartitionColumn = materializer . getPartitioncolumn ( ) ; if ( sourcePartitionColumn == null ) { return false ; } // Make sure the partition column is present in the view. // Export table views are special, we use column names to match.. Column pc = table . getColumns ( ) . get ( sourcePartitionColumn . getName ( ) ) ; if ( pc == null ) { return false ; } return true ; }
|
Test if a table is a streamed table view and should be included in the snapshot .
| 203
| 17
|
155,346
|
public static long getUniqueIdForFragment ( PlanFragment frag ) { long retval = 0 ; CatalogType parent = frag . getParent ( ) ; retval = ( ( long ) parent . getParent ( ) . getRelativeIndex ( ) ) << 32 ; retval += ( ( long ) parent . getRelativeIndex ( ) ) << 16 ; retval += frag . getRelativeIndex ( ) ; return retval ; }
|
Get a unique id for a plan fragment by munging the indices of it s parents and grandparents in the catalog .
| 93
| 24
|
155,347
|
public static < T extends CatalogType > List < T > getSortedCatalogItems ( CatalogMap < T > items , String sortFieldName ) { assert ( items != null ) ; assert ( sortFieldName != null ) ; // build a treemap based on the field value TreeMap < Object , T > map = new TreeMap <> ( ) ; boolean hasField = false ; for ( T item : items ) { // check the first time through for the field if ( hasField == false ) { hasField = ArrayUtils . contains ( item . getFields ( ) , sortFieldName ) ; } assert ( hasField == true ) ; map . put ( item . getField ( sortFieldName ) , item ) ; } // create a sorted list from the map ArrayList < T > retval = new ArrayList <> ( ) ; for ( T item : map . values ( ) ) { retval . add ( item ) ; } return retval ; }
|
Given a set of catalog items return a sorted list of them sorted by the value of a specified field . The field is specified by name . If the field doesn t exist trip an assertion . This is primarily used to sort a table s columns or a procedure s parameters .
| 205
| 54
|
155,348
|
public static < T extends CatalogType > void getSortedCatalogItems ( CatalogMap < T > items , String sortFieldName , List < T > result ) { result . addAll ( getSortedCatalogItems ( items , sortFieldName ) ) ; }
|
A getSortedCatalogItems variant with the result list filled in - place
| 54
| 15
|
155,349
|
public static Index getPrimaryKeyIndex ( Table catalogTable ) throws Exception { // We first need to find the pkey constraint Constraint catalog_constraint = null ; for ( Constraint c : catalogTable . getConstraints ( ) ) { if ( c . getType ( ) == ConstraintType . PRIMARY_KEY . getValue ( ) ) { catalog_constraint = c ; break ; } } if ( catalog_constraint == null ) { throw new Exception ( "ERROR: Table '" + catalogTable . getTypeName ( ) + "' does not have a PRIMARY KEY constraint" ) ; } // And then grab the index that it is using return ( catalog_constraint . getIndex ( ) ) ; }
|
For a given Table catalog object return the PrimaryKey Index catalog object
| 162
| 13
|
155,350
|
public static Collection < Column > getPrimaryKeyColumns ( Table catalogTable ) { Collection < Column > columns = new ArrayList <> ( ) ; Index catalog_idx = null ; try { catalog_idx = CatalogUtil . getPrimaryKeyIndex ( catalogTable ) ; } catch ( Exception ex ) { // IGNORE return ( columns ) ; } assert ( catalog_idx != null ) ; for ( ColumnRef catalog_col_ref : getSortedCatalogItems ( catalog_idx . getColumns ( ) , "index" ) ) { columns . add ( catalog_col_ref . getColumn ( ) ) ; } return ( columns ) ; }
|
Return all the of the primary key columns for a particular table If the table does not have a primary key then the returned list will be empty
| 141
| 28
|
155,351
|
public static boolean isTableExportOnly ( org . voltdb . catalog . Database database , org . voltdb . catalog . Table table ) { int type = table . getTabletype ( ) ; if ( TableType . isInvalidType ( type ) ) { // This implementation uses connectors instead of just looking at the tableType // because snapshots or catalogs from pre-9.0 versions (DR) will not have this new tableType field. for ( Connector connector : database . getConnectors ( ) ) { // iterate the connector tableinfo list looking for tableIndex // tableInfo has a reference to a table - can compare the reference // to the desired table by looking at the relative index. ick. for ( ConnectorTableInfo tableInfo : connector . getTableinfo ( ) ) { if ( tableInfo . getTable ( ) . getRelativeIndex ( ) == table . getRelativeIndex ( ) ) { return true ; } } } // Found no connectors return false ; } else { return TableType . isStream ( type ) ; } }
|
Return true if a table is a stream This function is duplicated in CatalogUtil . h
| 222
| 19
|
155,352
|
public static boolean isTableMaterializeViewSource ( org . voltdb . catalog . Database database , org . voltdb . catalog . Table table ) { CatalogMap < Table > tables = database . getTables ( ) ; for ( Table t : tables ) { Table matsrc = t . getMaterializer ( ) ; if ( ( matsrc != null ) && ( matsrc . getRelativeIndex ( ) == table . getRelativeIndex ( ) ) ) { return true ; } } return false ; }
|
Return true if a table is the source table for a materialized view .
| 108
| 15
|
155,353
|
public static List < Table > getMaterializeViews ( org . voltdb . catalog . Database database , org . voltdb . catalog . Table table ) { ArrayList < Table > tlist = new ArrayList <> ( ) ; CatalogMap < Table > tables = database . getTables ( ) ; for ( Table t : tables ) { Table matsrc = t . getMaterializer ( ) ; if ( ( matsrc != null ) && ( matsrc . getRelativeIndex ( ) == table . getRelativeIndex ( ) ) ) { tlist . add ( t ) ; } } return tlist ; }
|
Return list of materialized view for table .
| 132
| 9
|
155,354
|
public static boolean isCatalogCompatible ( String catalogVersionStr ) { if ( catalogVersionStr == null || catalogVersionStr . isEmpty ( ) ) { return false ; } //Check that it is a properly formed verstion string Object [ ] catalogVersion = MiscUtils . parseVersionString ( catalogVersionStr ) ; if ( catalogVersion == null ) { throw new IllegalArgumentException ( "Invalid version string " + catalogVersionStr ) ; } if ( ! catalogVersionStr . equals ( VoltDB . instance ( ) . getVersionString ( ) ) ) { return false ; } return true ; }
|
Check if a catalog compiled with the given version of VoltDB is compatible with the current version of VoltDB .
| 126
| 22
|
155,355
|
public static boolean isCatalogVersionValid ( String catalogVersionStr ) { // Do we have a version string? if ( catalogVersionStr == null || catalogVersionStr . isEmpty ( ) ) { return false ; } //Check that it is a properly formed version string Object [ ] catalogVersion = MiscUtils . parseVersionString ( catalogVersionStr ) ; if ( catalogVersion == null ) { return false ; } // It's valid. return true ; }
|
Check if a catalog version string is valid .
| 94
| 9
|
155,356
|
public static String compileDeployment ( Catalog catalog , DeploymentType deployment , boolean isPlaceHolderCatalog ) { String errmsg = null ; try { validateDeployment ( catalog , deployment ) ; // add our hacky Deployment to the catalog if ( catalog . getClusters ( ) . get ( "cluster" ) . getDeployment ( ) . get ( "deployment" ) == null ) { catalog . getClusters ( ) . get ( "cluster" ) . getDeployment ( ) . add ( "deployment" ) ; } // set the cluster info setClusterInfo ( catalog , deployment ) ; //Set the snapshot schedule setSnapshotInfo ( catalog , deployment . getSnapshot ( ) ) ; //Set enable security setSecurityEnabled ( catalog , deployment . getSecurity ( ) ) ; // set the users info // We'll skip this when building the dummy catalog on startup // so that we don't spew misleading user/role warnings if ( ! isPlaceHolderCatalog ) { setUsersInfo ( catalog , deployment . getUsers ( ) ) ; } // set the HTTPD info setHTTPDInfo ( catalog , deployment . getHttpd ( ) , deployment . getSsl ( ) ) ; setDrInfo ( catalog , deployment . getDr ( ) , deployment . getCluster ( ) , isPlaceHolderCatalog ) ; if ( ! isPlaceHolderCatalog ) { setExportInfo ( catalog , deployment . getExport ( ) ) ; setImportInfo ( catalog , deployment . getImport ( ) ) ; setSnmpInfo ( deployment . getSnmp ( ) ) ; } setCommandLogInfo ( catalog , deployment . getCommandlog ( ) ) ; //This is here so we can update our local list of paths. //I would not have needed this if validateResourceMonitorInfo didnt exist here. VoltDB . instance ( ) . loadLegacyPathProperties ( deployment ) ; setupPaths ( deployment . getPaths ( ) ) ; validateResourceMonitorInfo ( deployment ) ; } catch ( Exception e ) { // Anything that goes wrong anywhere in trying to handle the deployment file // should return an error, and let the caller decide what to do (crash or not, for // example) errmsg = "Error validating deployment configuration: " + e . getMessage ( ) ; hostLog . error ( errmsg ) ; return errmsg ; } return null ; }
|
Parse the deployment . xml file and add its data into the catalog .
| 504
| 15
|
155,357
|
public static DeploymentType parseDeployment ( String deploymentURL ) { // get the URL/path for the deployment and prep an InputStream InputStream deployIS = null ; try { URL deployURL = new URL ( deploymentURL ) ; deployIS = deployURL . openStream ( ) ; } catch ( MalformedURLException ex ) { // Invalid URL. Try as a file. try { deployIS = new FileInputStream ( deploymentURL ) ; } catch ( FileNotFoundException e ) { deployIS = null ; } } catch ( IOException ioex ) { deployIS = null ; } // make sure the file exists if ( deployIS == null ) { hostLog . error ( "Could not locate deployment info at given URL: " + deploymentURL ) ; return null ; } else { hostLog . info ( "URL of deployment info: " + deploymentURL ) ; } return getDeployment ( deployIS ) ; }
|
Parses the deployment XML file .
| 194
| 8
|
155,358
|
public static DeploymentType parseDeploymentFromString ( String deploymentString ) { ByteArrayInputStream byteIS ; byteIS = new ByteArrayInputStream ( deploymentString . getBytes ( Constants . UTF8ENCODING ) ) ; // get deployment info from xml file return getDeployment ( byteIS ) ; }
|
Parses the deployment XML string .
| 66
| 8
|
155,359
|
public static String getDeployment ( DeploymentType deployment , boolean indent ) throws IOException { try { if ( m_jc == null || m_schema == null ) { throw new RuntimeException ( "Error schema validation." ) ; } Marshaller marshaller = m_jc . createMarshaller ( ) ; marshaller . setSchema ( m_schema ) ; marshaller . setProperty ( Marshaller . JAXB_FORMATTED_OUTPUT , Boolean . valueOf ( indent ) ) ; StringWriter sw = new StringWriter ( ) ; marshaller . marshal ( new JAXBElement <> ( new QName ( "" , "deployment" ) , DeploymentType . class , deployment ) , sw ) ; return sw . toString ( ) ; } catch ( JAXBException e ) { // Convert some linked exceptions to more friendly errors. if ( e . getLinkedException ( ) instanceof java . io . FileNotFoundException ) { hostLog . error ( e . getLinkedException ( ) . getMessage ( ) ) ; return null ; } else if ( e . getLinkedException ( ) instanceof org . xml . sax . SAXParseException ) { hostLog . error ( "Error schema validating deployment.xml file. " + e . getLinkedException ( ) . getMessage ( ) ) ; return null ; } else { throw new RuntimeException ( e ) ; } } }
|
Given the deployment object generate the XML
| 311
| 7
|
155,360
|
private static void validateDeployment ( Catalog catalog , DeploymentType deployment ) { if ( deployment . getSecurity ( ) != null && deployment . getSecurity ( ) . isEnabled ( ) ) { if ( deployment . getUsers ( ) == null ) { String msg = "Cannot enable security without defining at least one user in the built-in ADMINISTRATOR role in the deployment file." ; throw new RuntimeException ( msg ) ; } boolean foundAdminUser = false ; for ( UsersType . User user : deployment . getUsers ( ) . getUser ( ) ) { if ( user . getRoles ( ) == null ) { continue ; } for ( String role : extractUserRoles ( user ) ) { if ( role . equalsIgnoreCase ( ADMIN ) ) { foundAdminUser = true ; break ; } } } if ( ! foundAdminUser ) { String msg = "Cannot enable security without defining at least one user in the built-in ADMINISTRATOR role in the deployment file." ; throw new RuntimeException ( msg ) ; } } }
|
Validate the contents of the deployment . xml file . This is for validating VoltDB requirements not XML schema correctness
| 224
| 23
|
155,361
|
private static void setClusterInfo ( Catalog catalog , DeploymentType deployment ) { ClusterType cluster = deployment . getCluster ( ) ; int kFactor = cluster . getKfactor ( ) ; Cluster catCluster = catalog . getClusters ( ) . get ( "cluster" ) ; // copy the deployment info that is currently not recorded anywhere else Deployment catDeploy = catCluster . getDeployment ( ) . get ( "deployment" ) ; catDeploy . setKfactor ( kFactor ) ; if ( deployment . getPartitionDetection ( ) . isEnabled ( ) ) { catCluster . setNetworkpartition ( true ) ; } else { catCluster . setNetworkpartition ( false ) ; } setSystemSettings ( deployment , catDeploy ) ; catCluster . setHeartbeattimeout ( deployment . getHeartbeat ( ) . getTimeout ( ) ) ; // copy schema modification behavior from xml to catalog if ( cluster . getSchema ( ) != null ) { catCluster . setUseddlschema ( cluster . getSchema ( ) == SchemaType . DDL ) ; } else { // Don't think we can get here, deployment schema guarantees a default value hostLog . warn ( "Schema modification setting not found. " + "Forcing default behavior of UpdateCatalog to modify database schema." ) ; catCluster . setUseddlschema ( false ) ; } }
|
Set cluster info in the catalog .
| 302
| 7
|
155,362
|
private static void setImportInfo ( Catalog catalog , ImportType importType ) { if ( importType == null ) { return ; } List < String > streamList = new ArrayList <> ( ) ; List < ImportConfigurationType > kafkaConfigs = new ArrayList <> ( ) ; for ( ImportConfigurationType importConfiguration : importType . getConfiguration ( ) ) { boolean connectorEnabled = importConfiguration . isEnabled ( ) ; if ( ! connectorEnabled ) { continue ; } if ( importConfiguration . getType ( ) . equals ( ServerImportEnum . KAFKA ) ) { kafkaConfigs . add ( importConfiguration ) ; } if ( ! streamList . contains ( importConfiguration . getModule ( ) ) ) { streamList . add ( importConfiguration . getModule ( ) ) ; } buildImportProcessorConfiguration ( importConfiguration , true ) ; } validateKafkaConfig ( kafkaConfigs ) ; }
|
Set deployment time settings for import
| 196
| 6
|
155,363
|
private static void validateKafkaConfig ( List < ImportConfigurationType > configs ) { if ( configs . isEmpty ( ) ) { return ; } // We associate each group id with the set of topics that belong to it HashMap < String , HashSet < String > > groupidToTopics = new HashMap <> ( ) ; for ( ImportConfigurationType config : configs ) { String groupid = "" ; HashSet < String > topics = new HashSet <> ( ) ; // Fetch topics and group id from each configuration for ( PropertyType pt : config . getProperty ( ) ) { if ( pt . getName ( ) . equals ( "topics" ) ) { topics . addAll ( Arrays . asList ( pt . getValue ( ) . split ( "\\s*,\\s*" ) ) ) ; } else if ( pt . getName ( ) . equals ( "groupid" ) ) { groupid = pt . getValue ( ) ; } } if ( groupidToTopics . containsKey ( groupid ) ) { // Under this group id, we first union the set of already-stored topics with the set of newly-seen topics. HashSet < String > union = new HashSet <> ( groupidToTopics . get ( groupid ) ) ; union . addAll ( topics ) ; if ( union . size ( ) == ( topics . size ( ) + groupidToTopics . get ( groupid ) . size ( ) ) ) { groupidToTopics . put ( groupid , union ) ; } else { // If the size of the union doesn't equal to the sum of sizes of newly-seen topic set and // already-stored topic set, those two sets must overlap with each other, which means that // there must be two configurations having the same group id and overlapping sets of topics. // Thus, we throw the RuntimeException. throw new RuntimeException ( "Invalid import configuration. Two Kafka entries have the same groupid and topic." ) ; } } else { groupidToTopics . put ( groupid , topics ) ; } } }
|
Check whether two Kafka configurations have both the same topic and group id . If two configurations have the same group id and overlapping sets of topics a RuntimeException will be thrown .
| 443
| 34
|
155,364
|
private static void setSnmpInfo ( SnmpType snmpType ) { if ( snmpType == null || ! snmpType . isEnabled ( ) ) { return ; } //Validate Snmp Configuration. if ( snmpType . getTarget ( ) == null || snmpType . getTarget ( ) . trim ( ) . length ( ) == 0 ) { throw new IllegalArgumentException ( "Target must be specified for SNMP configuration." ) ; } if ( snmpType . getAuthkey ( ) != null && snmpType . getAuthkey ( ) . length ( ) < 8 ) { throw new IllegalArgumentException ( "SNMP Authkey must be > 8 characters." ) ; } if ( snmpType . getPrivacykey ( ) != null && snmpType . getPrivacykey ( ) . length ( ) < 8 ) { throw new IllegalArgumentException ( "SNMP Privacy Key must be > 8 characters." ) ; } }
|
Validate Snmp Configuration .
| 202
| 6
|
155,365
|
private static void mergeKafka10ImportConfigurations ( Map < String , ImportConfiguration > processorConfig ) { if ( processorConfig . isEmpty ( ) ) { return ; } Map < String , ImportConfiguration > kafka10ProcessorConfigs = new HashMap <> ( ) ; Iterator < Map . Entry < String , ImportConfiguration > > iter = processorConfig . entrySet ( ) . iterator ( ) ; while ( iter . hasNext ( ) ) { String configName = iter . next ( ) . getKey ( ) ; ImportConfiguration importConfig = processorConfig . get ( configName ) ; Properties properties = importConfig . getmoduleProperties ( ) ; String importBundleJar = properties . getProperty ( ImportDataProcessor . IMPORT_MODULE ) ; Preconditions . checkNotNull ( importBundleJar , "Import source is undefined or custom import plugin class missing." ) ; //handle special cases for kafka 10 and maybe late versions String [ ] bundleJar = importBundleJar . split ( "kafkastream" ) ; if ( bundleJar . length > 1 ) { String version = bundleJar [ 1 ] . substring ( 0 , bundleJar [ 1 ] . indexOf ( ".jar" ) ) ; if ( ! version . isEmpty ( ) ) { int versionNumber = Integer . parseInt ( version ) ; if ( versionNumber == 10 ) { kafka10ProcessorConfigs . put ( configName , importConfig ) ; iter . remove ( ) ; } } } } if ( kafka10ProcessorConfigs . isEmpty ( ) ) { return ; } Map < String , ImportConfiguration > mergedConfigs = new HashMap <> ( ) ; iter = kafka10ProcessorConfigs . entrySet ( ) . iterator ( ) ; while ( iter . hasNext ( ) ) { ImportConfiguration importConfig = iter . next ( ) . getValue ( ) ; Properties props = importConfig . getmoduleProperties ( ) ; //organize the kafka10 importer by the broker list and group id //All importers must be configured by either broker list or zookeeper in the same group //otherwise, these importers can not be correctly merged. String brokers = props . getProperty ( "brokers" ) ; String groupid = props . getProperty ( "groupid" , "voltdb" ) ; if ( brokers == null ) { brokers = props . getProperty ( "zookeeper" ) ; } String brokersGroup = brokers + "_" + groupid ; ImportConfiguration config = mergedConfigs . get ( brokersGroup ) ; if ( config == null ) { mergedConfigs . put ( brokersGroup , importConfig ) ; } else { config . mergeProperties ( props ) ; } } processorConfig . putAll ( mergedConfigs ) ; }
|
aggregate Kafka10 importer configurations . One importer per brokers and kafka group . Formatters and stored procedures can vary by topics .
| 598
| 29
|
155,366
|
private static void setSecurityEnabled ( Catalog catalog , SecurityType security ) { Cluster cluster = catalog . getClusters ( ) . get ( "cluster" ) ; Database database = cluster . getDatabases ( ) . get ( "database" ) ; cluster . setSecurityenabled ( security . isEnabled ( ) ) ; database . setSecurityprovider ( security . getProvider ( ) . value ( ) ) ; }
|
Set the security setting in the catalog from the deployment file
| 85
| 11
|
155,367
|
private static void setSnapshotInfo ( Catalog catalog , SnapshotType snapshotSettings ) { Database db = catalog . getClusters ( ) . get ( "cluster" ) . getDatabases ( ) . get ( "database" ) ; SnapshotSchedule schedule = db . getSnapshotschedule ( ) . get ( "default" ) ; if ( schedule == null ) { schedule = db . getSnapshotschedule ( ) . add ( "default" ) ; } schedule . setEnabled ( snapshotSettings . isEnabled ( ) ) ; String frequency = snapshotSettings . getFrequency ( ) ; if ( ! frequency . endsWith ( "s" ) && ! frequency . endsWith ( "m" ) && ! frequency . endsWith ( "h" ) ) { hostLog . error ( "Snapshot frequency " + frequency + " needs to end with time unit specified" + " that is one of [s, m, h] (seconds, minutes, hours)" + " Defaulting snapshot frequency to 10m." ) ; frequency = "10m" ; } int frequencyInt = 0 ; String frequencySubstring = frequency . substring ( 0 , frequency . length ( ) - 1 ) ; try { frequencyInt = Integer . parseInt ( frequencySubstring ) ; } catch ( Exception e ) { hostLog . error ( "Frequency " + frequencySubstring + " is not an integer. Defaulting frequency to 10m." ) ; frequency = "10m" ; frequencyInt = 10 ; } String prefix = snapshotSettings . getPrefix ( ) ; if ( prefix == null || prefix . isEmpty ( ) ) { hostLog . error ( "Snapshot prefix " + prefix + " is not a valid prefix. Using prefix of 'SNAPSHOTNONCE' " ) ; prefix = "SNAPSHOTNONCE" ; } if ( prefix . contains ( "-" ) || prefix . contains ( "," ) ) { String oldprefix = prefix ; prefix = prefix . replaceAll ( "-" , "_" ) ; prefix = prefix . replaceAll ( "," , "_" ) ; hostLog . error ( "Snapshot prefix " + oldprefix + " cannot include , or -." + " Using the prefix: " + prefix + " instead." ) ; } int retain = snapshotSettings . getRetain ( ) ; if ( retain < 1 ) { hostLog . error ( "Snapshot retain value " + retain + " is not a valid value. Must be 1 or greater." + " Defaulting snapshot retain to 1." ) ; retain = 1 ; } schedule . setFrequencyunit ( frequency . substring ( frequency . length ( ) - 1 , frequency . length ( ) ) ) ; schedule . setFrequencyvalue ( frequencyInt ) ; schedule . setPrefix ( prefix ) ; schedule . setRetain ( retain ) ; }
|
Set the auto - snapshot settings in the catalog from the deployment file
| 598
| 13
|
155,368
|
private static void setupPaths ( PathsType paths ) { File voltDbRoot ; // Handles default voltdbroot (and completely missing "paths" element). voltDbRoot = getVoltDbRoot ( paths ) ; //Snapshot setupSnapshotPaths ( paths . getSnapshots ( ) , voltDbRoot ) ; //export overflow setupExportOverflow ( paths . getExportoverflow ( ) , voltDbRoot ) ; // only use these directories in the enterprise version setupCommandLog ( paths . getCommandlog ( ) , voltDbRoot ) ; setupCommandLogSnapshot ( paths . getCommandlogsnapshot ( ) , voltDbRoot ) ; setupDROverflow ( paths . getDroverflow ( ) , voltDbRoot ) ; setupLargeQuerySwap ( paths . getLargequeryswap ( ) , voltDbRoot ) ; }
|
Set voltroot path and set the path overrides for export overflow partition etc .
| 182
| 16
|
155,369
|
public static File getVoltDbRoot ( PathsType paths ) { File voltDbRoot ; if ( paths == null || paths . getVoltdbroot ( ) == null || VoltDB . instance ( ) . getVoltDBRootPath ( paths . getVoltdbroot ( ) ) == null ) { voltDbRoot = new VoltFile ( VoltDB . DBROOT ) ; if ( ! voltDbRoot . exists ( ) ) { hostLog . info ( "Creating voltdbroot directory: " + voltDbRoot . getAbsolutePath ( ) ) ; if ( ! voltDbRoot . mkdirs ( ) ) { hostLog . fatal ( "Failed to create voltdbroot directory \"" + voltDbRoot . getAbsolutePath ( ) + "\"" ) ; } } } else { voltDbRoot = new VoltFile ( VoltDB . instance ( ) . getVoltDBRootPath ( paths . getVoltdbroot ( ) ) ) ; if ( ! voltDbRoot . exists ( ) ) { hostLog . info ( "Creating voltdbroot directory: " + voltDbRoot . getAbsolutePath ( ) ) ; if ( ! voltDbRoot . mkdirs ( ) ) { hostLog . fatal ( "Failed to create voltdbroot directory \"" + voltDbRoot . getAbsolutePath ( ) + "\"" ) ; } } } validateDirectory ( "volt root" , voltDbRoot ) ; return voltDbRoot ; }
|
Get a File object representing voltdbroot . Create directory if missing . Use paths if non - null to get override default location .
| 321
| 27
|
155,370
|
private static void setUsersInfo ( Catalog catalog , UsersType users ) throws RuntimeException { if ( users == null ) { return ; } // The database name is not available in deployment.xml (it is defined // in project.xml). However, it must always be named "database", so // I've temporarily hardcoded it here until a more robust solution is // available. Database db = catalog . getClusters ( ) . get ( "cluster" ) . getDatabases ( ) . get ( "database" ) ; SecureRandom sr = new SecureRandom ( ) ; for ( UsersType . User user : users . getUser ( ) ) { Set < String > roles = extractUserRoles ( user ) ; String sha1hex = user . getPassword ( ) ; String sha256hex = user . getPassword ( ) ; if ( user . isPlaintext ( ) ) { sha1hex = extractPassword ( user . getPassword ( ) , ClientAuthScheme . HASH_SHA1 ) ; sha256hex = extractPassword ( user . getPassword ( ) , ClientAuthScheme . HASH_SHA256 ) ; } else if ( user . getPassword ( ) . length ( ) == 104 ) { int sha1len = ClientAuthScheme . getHexencodedDigestLength ( ClientAuthScheme . HASH_SHA1 ) ; sha1hex = sha1hex . substring ( 0 , sha1len ) ; sha256hex = sha256hex . substring ( sha1len ) ; } else { // if one user has invalid password, give a warn. hostLog . warn ( "User \"" + user . getName ( ) + "\" has invalid masked password in deployment file." ) ; // throw exception disable user with invalid masked password throw new RuntimeException ( "User \"" + user . getName ( ) + "\" has invalid masked password in deployment file" ) ; } org . voltdb . catalog . User catUser = db . getUsers ( ) . get ( user . getName ( ) ) ; if ( catUser == null ) { catUser = db . getUsers ( ) . add ( user . getName ( ) ) ; } // generate salt only once for sha1 and sha256 String saltGen = BCrypt . gensalt ( BCrypt . GENSALT_DEFAULT_LOG2_ROUNDS , sr ) ; String hashedPW = BCrypt . hashpw ( sha1hex , saltGen ) ; String hashedPW256 = BCrypt . hashpw ( sha256hex , saltGen ) ; catUser . setShadowpassword ( hashedPW ) ; catUser . setSha256shadowpassword ( hashedPW256 ) ; //use fixed seed for comparison catUser . setPassword ( BCrypt . hashpw ( sha256hex , "$2a$10$pWO/a/OQkFyQWQDpchZdEe" ) ) ; // process the @groups and @roles comma separated list for ( final String role : roles ) { final Group catalogGroup = db . getGroups ( ) . get ( role ) ; // if the role doesn't exist, ignore it. if ( catalogGroup != null ) { GroupRef groupRef = catUser . getGroups ( ) . get ( role ) ; if ( groupRef == null ) { groupRef = catUser . getGroups ( ) . add ( role ) ; } groupRef . setGroup ( catalogGroup ) ; } else { hostLog . warn ( "User \"" + user . getName ( ) + "\" is assigned to non-existent role \"" + role + "\" " + "and may not have the expected database permissions." ) ; } } } }
|
Set user info in the catalog .
| 813
| 7
|
155,371
|
private static Set < String > extractUserRoles ( final UsersType . User user ) { Set < String > roles = new TreeSet <> ( ) ; if ( user == null ) { return roles ; } if ( user . getRoles ( ) != null && ! user . getRoles ( ) . trim ( ) . isEmpty ( ) ) { String [ ] rolelist = user . getRoles ( ) . trim ( ) . split ( "," ) ; for ( String role : rolelist ) { if ( role == null || role . trim ( ) . isEmpty ( ) ) { continue ; } roles . add ( role . trim ( ) . toLowerCase ( ) ) ; } } return roles ; }
|
Takes the list of roles specified in the roles user attributes and returns a set from the comma - separated list
| 152
| 22
|
155,372
|
public static byte [ ] makeDeploymentHash ( byte [ ] inbytes ) { MessageDigest md = null ; try { md = MessageDigest . getInstance ( "SHA-1" ) ; } catch ( NoSuchAlgorithmException e ) { VoltDB . crashLocalVoltDB ( "Bad JVM has no SHA-1 hash." , true , e ) ; } md . update ( inbytes ) ; byte [ ] hash = md . digest ( ) ; assert ( hash . length == 20 ) ; // sha-1 length return hash ; }
|
This code appeared repeatedly . Extract method to take bytes for the catalog or deployment file do the irritating exception crash test jam the bytes in and get the SHA - 1 hash .
| 118
| 34
|
155,373
|
public static Pair < Set < String > , Set < String > > getSnapshotableTableNamesFromInMemoryJar ( InMemoryJarfile jarfile ) { Set < String > fullTableNames = new HashSet <> ( ) ; Set < String > optionalTableNames = new HashSet <> ( ) ; Catalog catalog = new Catalog ( ) ; catalog . execute ( getSerializedCatalogStringFromJar ( jarfile ) ) ; Database db = catalog . getClusters ( ) . get ( "cluster" ) . getDatabases ( ) . get ( "database" ) ; Pair < List < Table > , Set < String > > ret ; ret = getSnapshotableTables ( db , true ) ; ret . getFirst ( ) . forEach ( table -> fullTableNames . add ( table . getTypeName ( ) ) ) ; optionalTableNames . addAll ( ret . getSecond ( ) ) ; ret = getSnapshotableTables ( db , false ) ; ret . getFirst ( ) . forEach ( table -> fullTableNames . add ( table . getTypeName ( ) ) ) ; optionalTableNames . addAll ( ret . getSecond ( ) ) ; return new Pair < Set < String > , Set < String > > ( fullTableNames , optionalTableNames ) ; }
|
Get all snapshot - able table names from an in - memory catalog jar file . A snapshot - able table is one that s neither an export table nor an implicitly partitioned view .
| 275
| 36
|
155,374
|
public static Pair < List < Table > , Set < String > > getSnapshotableTables ( Database catalog , boolean isReplicated ) { List < Table > tables = new ArrayList <> ( ) ; Set < String > optionalTableNames = new HashSet <> ( ) ; for ( Table table : catalog . getTables ( ) ) { if ( table . getIsreplicated ( ) != isReplicated ) { // We handle replicated tables and partitioned tables separately. continue ; } if ( isTableExportOnly ( catalog , table ) ) { // Streamed tables are not considered as "normal" tables here. continue ; } if ( table . getMaterializer ( ) != null ) { if ( isSnapshotablePersistentTableView ( catalog , table ) ) { // Some persistent table views are added to the snapshot starting from // V8.2, they are since then considered as "normal" tables, too. // But their presence in the snapshot is not compulsory for backward // compatibility reasons. optionalTableNames . add ( table . getTypeName ( ) ) ; } else if ( ! isSnapshotableStreamedTableView ( catalog , table ) ) { continue ; } } tables . add ( table ) ; } return new Pair < List < Table > , Set < String > > ( tables , optionalTableNames ) ; }
|
Get all snapshot - able tables from the catalog . A snapshot - able table is one that s neither an export table nor an implicitly partitioned view .
| 281
| 30
|
155,375
|
public static List < Table > getNormalTables ( Database catalog , boolean isReplicated ) { List < Table > tables = new ArrayList <> ( ) ; for ( Table table : catalog . getTables ( ) ) { if ( ( table . getIsreplicated ( ) == isReplicated ) && table . getMaterializer ( ) == null && ! CatalogUtil . isTableExportOnly ( catalog , table ) ) { tables . add ( table ) ; continue ; } //Handle views which are on STREAM only partitioned STREAM allow view and must have partition //column as part of view. if ( ( table . getMaterializer ( ) != null ) && ! isReplicated && ( CatalogUtil . isTableExportOnly ( catalog , table . getMaterializer ( ) ) ) ) { //Non partitioned export table are not allowed so it should not get here. Column bpc = table . getMaterializer ( ) . getPartitioncolumn ( ) ; if ( bpc != null ) { String bPartName = bpc . getName ( ) ; Column pc = table . getColumns ( ) . get ( bPartName ) ; if ( pc != null ) { tables . add ( table ) ; } } } } return tables ; }
|
Get all normal tables from the catalog . A normal table is one that s NOT a materialized view nor an export table . For the lack of a better name I call it normal .
| 265
| 37
|
155,376
|
public static boolean isDurableProc ( String procName ) { SystemProcedureCatalog . Config sysProc = SystemProcedureCatalog . listing . get ( procName ) ; return sysProc == null || sysProc . isDurable ( ) ; }
|
Return if given proc is durable if its a sysproc SystemProcedureCatalog is consulted . All non sys procs are all durable .
| 57
| 28
|
155,377
|
public static File createTemporaryEmptyCatalogJarFile ( boolean isXDCR ) throws IOException { File emptyJarFile = File . createTempFile ( "catalog-empty" , ".jar" ) ; emptyJarFile . deleteOnExit ( ) ; VoltCompiler compiler = new VoltCompiler ( isXDCR ) ; if ( ! compiler . compileEmptyCatalog ( emptyJarFile . getAbsolutePath ( ) ) ) { return null ; } return emptyJarFile ; }
|
Build an empty catalog jar file .
| 102
| 7
|
155,378
|
public static String getSignatureForTable ( String name , SortedMap < Integer , VoltType > schema ) { StringBuilder sb = new StringBuilder ( ) ; sb . append ( name ) . append ( SIGNATURE_TABLE_NAME_SEPARATOR ) ; for ( VoltType t : schema . values ( ) ) { sb . append ( t . getSignatureChar ( ) ) ; } return sb . toString ( ) ; }
|
Get a string signature for the table represented by the args
| 96
| 11
|
155,379
|
public static Pair < Long , String > calculateDrTableSignatureAndCrc ( Database catalog ) { SortedSet < Table > tables = Sets . newTreeSet ( ) ; tables . addAll ( getSnapshotableTables ( catalog , true ) . getFirst ( ) ) ; tables . addAll ( getSnapshotableTables ( catalog , false ) . getFirst ( ) ) ; final PureJavaCrc32 crc = new PureJavaCrc32 ( ) ; final StringBuilder sb = new StringBuilder ( ) ; String delimiter = "" ; for ( Table t : tables ) { if ( t . getIsdred ( ) ) { crc . update ( t . getSignature ( ) . getBytes ( Charsets . UTF_8 ) ) ; sb . append ( delimiter ) . append ( t . getSignature ( ) ) ; delimiter = SIGNATURE_DELIMITER ; } } return Pair . of ( crc . getValue ( ) , sb . toString ( ) ) ; }
|
Deterministically serializes all DR table signatures into a string and calculates the CRC checksum .
| 222
| 20
|
155,380
|
public static Map < String , String > deserializeCatalogSignature ( String signature ) { Map < String , String > tableSignatures = Maps . newHashMap ( ) ; for ( String oneSig : signature . split ( Pattern . quote ( SIGNATURE_DELIMITER ) ) ) { if ( ! oneSig . isEmpty ( ) ) { final String [ ] parts = oneSig . split ( Pattern . quote ( SIGNATURE_TABLE_NAME_SEPARATOR ) , 2 ) ; tableSignatures . put ( parts [ 0 ] , parts [ 1 ] ) ; } } return tableSignatures ; }
|
Deserializes a catalog DR table signature string into a map of table signatures .
| 133
| 16
|
155,381
|
public static String getLimitPartitionRowsDeleteStmt ( Table table ) { CatalogMap < Statement > map = table . getTuplelimitdeletestmt ( ) ; if ( map . isEmpty ( ) ) { return null ; } assert ( map . size ( ) == 1 ) ; return map . iterator ( ) . next ( ) . getSqltext ( ) ; }
|
Given a table return the DELETE statement that can be executed by a LIMIT PARTITION ROWS constraint or NULL if there isn t one .
| 81
| 30
|
155,382
|
public static ExportType addExportConfigToDRConflictsTable ( ExportType export ) { if ( export == null ) { export = new ExportType ( ) ; } boolean userDefineStream = false ; for ( ExportConfigurationType exportConfiguration : export . getConfiguration ( ) ) { if ( exportConfiguration . getTarget ( ) . equals ( DR_CONFLICTS_TABLE_EXPORT_GROUP ) ) { userDefineStream = true ; } } if ( ! userDefineStream ) { ExportConfigurationType defaultConfiguration = new ExportConfigurationType ( ) ; defaultConfiguration . setEnabled ( true ) ; defaultConfiguration . setTarget ( DR_CONFLICTS_TABLE_EXPORT_GROUP ) ; defaultConfiguration . setType ( ServerExportEnum . FILE ) ; // type PropertyType type = new PropertyType ( ) ; type . setName ( "type" ) ; type . setValue ( DEFAULT_DR_CONFLICTS_EXPORT_TYPE ) ; defaultConfiguration . getProperty ( ) . add ( type ) ; // nonce PropertyType nonce = new PropertyType ( ) ; nonce . setName ( "nonce" ) ; nonce . setValue ( DEFAULT_DR_CONFLICTS_NONCE ) ; defaultConfiguration . getProperty ( ) . add ( nonce ) ; // outdir PropertyType outdir = new PropertyType ( ) ; outdir . setName ( "outdir" ) ; outdir . setValue ( DEFAULT_DR_CONFLICTS_DIR ) ; defaultConfiguration . getProperty ( ) . add ( outdir ) ; // k-safe file export PropertyType ksafe = new PropertyType ( ) ; ksafe . setName ( "replicated" ) ; ksafe . setValue ( "true" ) ; defaultConfiguration . getProperty ( ) . add ( ksafe ) ; // skip internal export columns PropertyType skipinternal = new PropertyType ( ) ; skipinternal . setName ( "skipinternals" ) ; skipinternal . setValue ( "true" ) ; defaultConfiguration . getProperty ( ) . add ( skipinternal ) ; export . getConfiguration ( ) . add ( defaultConfiguration ) ; } return export ; }
|
Add default configuration to DR conflicts export target if deployment file doesn t have the configuration
| 463
| 16
|
155,383
|
public synchronized void printResults ( ) throws Exception { ClientStats stats = fullStatsContext . fetch ( ) . getStats ( ) ; String display = "\nA total of %d login requests were received...\n" ; System . out . printf ( display , stats . getInvocationsCompleted ( ) ) ; System . out . printf ( "Average throughput: %,9d txns/sec\n" , stats . getTxnThroughput ( ) ) ; System . out . printf ( "Average latency: %,9.2f ms\n" , stats . getAverageLatency ( ) ) ; System . out . printf ( "10th percentile latency: %,9.2f ms\n" , stats . kPercentileLatencyAsDouble ( .1 ) ) ; System . out . printf ( "25th percentile latency: %,9.2f ms\n" , stats . kPercentileLatencyAsDouble ( .25 ) ) ; System . out . printf ( "50th percentile latency: %,9.2f ms\n" , stats . kPercentileLatencyAsDouble ( .5 ) ) ; System . out . printf ( "75th percentile latency: %,9.2f ms\n" , stats . kPercentileLatencyAsDouble ( .75 ) ) ; System . out . printf ( "90th percentile latency: %,9.2f ms\n" , stats . kPercentileLatencyAsDouble ( .9 ) ) ; System . out . printf ( "95th percentile latency: %,9.2f ms\n" , stats . kPercentileLatencyAsDouble ( .95 ) ) ; System . out . printf ( "99th percentile latency: %,9.2f ms\n" , stats . kPercentileLatencyAsDouble ( .99 ) ) ; System . out . printf ( "99.5th percentile latency: %,9.2f ms\n" , stats . kPercentileLatencyAsDouble ( .995 ) ) ; System . out . printf ( "99.9th percentile latency: %,9.2f ms\n" , stats . kPercentileLatencyAsDouble ( .999 ) ) ; System . out . println ( "\n\n" + stats . latencyHistoReport ( ) ) ; }
|
Prints the results and statistics of the data load .
| 498
| 11
|
155,384
|
private void doLogin ( LoginGenerator . LoginRecord login ) { // Synchronously call the "Login" procedure passing in a json string containing // login-specific structure/data. try { ClientResponse response = client . callProcedure ( "Login" , login . username , login . password , login . json ) ; long resultCode = response . getResults ( ) [ 0 ] . asScalarLong ( ) ; if ( resultCode == LOGIN_SUCCESSFUL ) acceptedLogins . incrementAndGet ( ) ; else badLogins . incrementAndGet ( ) ; } catch ( Exception e ) { badLogins . incrementAndGet ( ) ; e . printStackTrace ( ) ; } }
|
Invoke the Login stored procedure to add a login record to the database . If the login is called multiple times for the same username the last accessed time for the login is updated . Thus this sample client can be run repeatedly without having to cycle the database .
| 152
| 51
|
155,385
|
public void loadDatabase ( ) throws Exception { // create/start the requested number of threads int thread_count = 10 ; Thread [ ] loginThreads = new Thread [ thread_count ] ; for ( int i = 0 ; i < thread_count ; ++ i ) { loginThreads [ i ] = new Thread ( new LoginThread ( ) ) ; loginThreads [ i ] . start ( ) ; } // Initialize the statistics fullStatsContext . fetchAndResetBaseline ( ) ; // Run the data loading for 10 seconds System . out . println ( "\nLoading database..." ) ; Thread . sleep ( 1000l * 10 ) ; // stop the threads loadComplete . set ( true ) ; // block until all outstanding txns return client . drain ( ) ; // join on the threads for ( Thread t : loginThreads ) { t . join ( ) ; } // print the summary statistics of the data load printResults ( ) ; // Create entries that we can query on. createUniqueData ( ) ; }
|
Load the database with as much data as possible within the specified time range .
| 213
| 15
|
155,386
|
public static void main ( String [ ] args ) throws Exception { JSONClient app = new JSONClient ( ) ; // Initialize connections app . initialize ( ) ; // load data, measuring the throughput. app . loadDatabase ( ) ; // run sample JSON queries app . runQueries ( ) ; // Disconnect app . shutdown ( ) ; }
|
Main routine creates a client instance loads the database then executes example queries against the data .
| 71
| 17
|
155,387
|
@ Override public synchronized void reportForeignHostFailed ( int hostId ) { long initiatorSiteId = CoreUtils . getHSIdFromHostAndSite ( hostId , AGREEMENT_SITE_ID ) ; m_agreementSite . reportFault ( initiatorSiteId ) ; if ( ! m_shuttingDown ) { // should be the single console message a user sees when another node fails networkLog . warn ( String . format ( "Host %d failed. Cluster remains operational." , hostId ) ) ; } }
|
Synchronization protects m_knownFailedHosts and ensures that every failed host is only reported once
| 115
| 21
|
155,388
|
public InstanceId getInstanceId ( ) { if ( m_instanceId == null ) { try { byte [ ] data = m_zk . getData ( CoreZK . instance_id , false , null ) ; JSONObject idJSON = new JSONObject ( new String ( data , "UTF-8" ) ) ; m_instanceId = new InstanceId ( idJSON . getInt ( "coord" ) , idJSON . getLong ( "timestamp" ) ) ; } catch ( Exception e ) { String msg = "Unable to get instance ID info from " + CoreZK . instance_id ; hostLog . error ( msg ) ; throw new RuntimeException ( msg , e ) ; } } return m_instanceId ; }
|
Get a unique ID for this cluster
| 160
| 7
|
155,389
|
@ Override public void requestJoin ( SocketChannel socket , SSLEngine sslEngine , MessagingChannel messagingChannel , InetSocketAddress listeningAddress , JSONObject jo ) throws Exception { /* * Generate the host id via creating an ephemeral sequential node */ Integer hostId = selectNewHostId ( socket . socket ( ) . getInetAddress ( ) . getHostAddress ( ) ) ; prepSocketChannel ( socket ) ; ForeignHost fhost = null ; try { try { JoinAcceptor . PleaDecision decision = m_acceptor . considerMeshPlea ( m_zk , hostId , jo ) ; /* * Write the response that advertises the cluster topology */ writeRequestJoinResponse ( hostId , decision , socket , messagingChannel ) ; if ( ! decision . accepted ) { socket . close ( ) ; return ; } /* * Wait for the a response from the joining node saying that it connected * to all the nodes we just advertised. Use a timeout so that the cluster can't be stuck * on failed joins. */ ByteBuffer finishedJoining = ByteBuffer . allocate ( 1 ) ; socket . configureBlocking ( false ) ; long start = System . currentTimeMillis ( ) ; while ( finishedJoining . hasRemaining ( ) && System . currentTimeMillis ( ) - start < 120000 ) { // This is just one byte to indicate that it finished joining. // No need to encrypt because the value of it doesn't matter int read = socket . read ( finishedJoining ) ; if ( read == - 1 ) { networkLog . info ( "New connection was unable to establish mesh" ) ; socket . close ( ) ; return ; } else if ( read < 1 ) { Thread . sleep ( 5 ) ; } } /* * Now add the host to the mailbox system */ PicoNetwork picoNetwork = createPicoNetwork ( sslEngine , socket , false ) ; fhost = new ForeignHost ( this , hostId , socket , m_config . deadHostTimeout , listeningAddress , picoNetwork ) ; putForeignHost ( hostId , fhost ) ; fhost . enableRead ( VERBOTEN_THREADS ) ; m_acceptor . accrue ( hostId , jo ) ; } catch ( Exception e ) { networkLog . error ( "Error joining new node" , e ) ; addFailedHost ( hostId ) ; synchronized ( HostMessenger . this ) { removeForeignHost ( hostId ) ; } m_acceptor . detract ( m_zk , hostId ) ; socket . close ( ) ; return ; } /* * And the last step is to wait for the new node to join ZooKeeper. * This node is the one to create the txn that will add the new host to the list of hosts * with agreement sites across the cluster. */ long hsId = CoreUtils . getHSIdFromHostAndSite ( hostId , AGREEMENT_SITE_ID ) ; if ( ! m_agreementSite . requestJoin ( hsId ) . await ( 60 , TimeUnit . SECONDS ) ) { reportForeignHostFailed ( hostId ) ; } } catch ( Throwable e ) { org . voltdb . VoltDB . crashLocalVoltDB ( "" , true , e ) ; } }
|
Any node can serve a request to join . The coordination of generating a new host id is done via ZK
| 702
| 22
|
155,390
|
@ Override public void notifyOfConnection ( int hostId , SocketChannel socket , SSLEngine sslEngine , InetSocketAddress listeningAddress ) throws Exception { networkLog . info ( "Host " + getHostId ( ) + " receives a new connection from host " + hostId ) ; prepSocketChannel ( socket ) ; // Auxiliary connection never time out ForeignHost fhost = new ForeignHost ( this , hostId , socket , Integer . MAX_VALUE , listeningAddress , createPicoNetwork ( sslEngine , socket , true ) ) ; putForeignHost ( hostId , fhost ) ; fhost . enableRead ( VERBOTEN_THREADS ) ; // Do all peers have enough secondary connections? for ( int hId : m_peers ) { if ( m_foreignHosts . get ( hId ) . size ( ) != ( m_secondaryConnections + 1 ) ) { return ; } } // Now it's time to use secondary pico network, see comments in presend() to know why we can't // do this earlier. m_hasAllSecondaryConnectionCreated = true ; }
|
SocketJoiner receives the request of creating a new connection from given host id create a new ForeignHost for this connection .
| 239
| 24
|
155,391
|
public Map < Integer , HostInfo > waitForGroupJoin ( int expectedHosts ) { Map < Integer , HostInfo > hostInfos = Maps . newTreeMap ( ) ; try { while ( true ) { ZKUtil . FutureWatcher fw = new ZKUtil . FutureWatcher ( ) ; final List < String > children = m_zk . getChildren ( CoreZK . hosts , fw ) ; final int numChildren = children . size ( ) ; for ( String child : children ) { final HostInfo info = HostInfo . fromBytes ( m_zk . getData ( ZKUtil . joinZKPath ( CoreZK . hosts , child ) , false , null ) ) ; hostInfos . put ( parseHostId ( child ) , info ) ; } /* * If the target number of hosts has been reached * break out */ if ( numChildren == expectedHosts ) { break ; } /* * If there are extra hosts that means too many Volt procs were started. * Kill this node based on the assumption that we are the extra one. In most * cases this is correct and fine and in the worst case the cluster will hang coming up * because two or more hosts killed themselves */ if ( numChildren > expectedHosts ) { org . voltdb . VoltDB . crashLocalVoltDB ( "Expected to find " + expectedHosts + " hosts in cluster at startup but found " + numChildren + ". Terminating this host." , false , null ) ; } fw . get ( ) ; } } catch ( Exception e ) { org . voltdb . VoltDB . crashLocalVoltDB ( "Error waiting for hosts to be ready" , false , e ) ; } assert hostInfos . size ( ) == expectedHosts ; return hostInfos ; }
|
Wait until all the nodes have built a mesh .
| 389
| 10
|
155,392
|
@ Override public String getHostnameForHostID ( int hostId ) { if ( hostId == m_localHostId ) { return CoreUtils . getHostnameOrAddress ( ) ; } Iterator < ForeignHost > it = m_foreignHosts . get ( hostId ) . iterator ( ) ; if ( it . hasNext ( ) ) { ForeignHost fh = it . next ( ) ; return fh . hostname ( ) ; } return m_knownFailedHosts . get ( hostId ) != null ? m_knownFailedHosts . get ( hostId ) : "UNKNOWN" ; }
|
Given a hostid return the hostname for it
| 135
| 10
|
155,393
|
public void removeMailbox ( long hsId ) { synchronized ( m_mapLock ) { ImmutableMap . Builder < Long , Mailbox > b = ImmutableMap . builder ( ) ; for ( Map . Entry < Long , Mailbox > e : m_siteMailboxes . entrySet ( ) ) { if ( e . getKey ( ) . equals ( hsId ) ) { continue ; } b . put ( e . getKey ( ) , e . getValue ( ) ) ; } m_siteMailboxes = b . build ( ) ; } }
|
Discard a mailbox
| 120
| 4
|
155,394
|
public void waitForAllHostsToBeReady ( int expectedHosts ) { try { m_zk . create ( CoreZK . readyhosts_host , null , Ids . OPEN_ACL_UNSAFE , CreateMode . EPHEMERAL_SEQUENTIAL ) ; while ( true ) { ZKUtil . FutureWatcher fw = new ZKUtil . FutureWatcher ( ) ; int readyHosts = m_zk . getChildren ( CoreZK . readyhosts , fw ) . size ( ) ; if ( readyHosts == expectedHosts ) { break ; } fw . get ( ) ; } } catch ( KeeperException | InterruptedException e ) { org . voltdb . VoltDB . crashLocalVoltDB ( "Error waiting for hosts to be ready" , false , e ) ; } }
|
Block on this call until the number of ready hosts is equal to the number of expected hosts .
| 187
| 19
|
155,395
|
public void waitForJoiningHostsToBeReady ( int expectedHosts , int localHostId ) { try { //register this host as joining. The host registration will be deleted after joining is completed. m_zk . create ( ZKUtil . joinZKPath ( CoreZK . readyjoininghosts , Integer . toString ( localHostId ) ) , null , Ids . OPEN_ACL_UNSAFE , CreateMode . PERSISTENT ) ; while ( true ) { ZKUtil . FutureWatcher fw = new ZKUtil . FutureWatcher ( ) ; int readyHosts = m_zk . getChildren ( CoreZK . readyjoininghosts , fw ) . size ( ) ; if ( readyHosts == expectedHosts ) { break ; } fw . get ( ) ; } } catch ( KeeperException | InterruptedException e ) { org . voltdb . VoltDB . crashLocalVoltDB ( "Error waiting for hosts to be ready" , false , e ) ; } }
|
For elastic join . Block on this call until the number of ready hosts is equal to the number of expected joining hosts .
| 227
| 24
|
155,396
|
public int countForeignHosts ( ) { int retval = 0 ; for ( ForeignHost host : m_foreignHosts . values ( ) ) { if ( ( host != null ) && ( host . isUp ( ) ) ) { retval ++ ; } } return retval ; }
|
Get the number of up foreign hosts . Used for test purposes .
| 61
| 13
|
155,397
|
public void closeForeignHostSocket ( int hostId ) { Iterator < ForeignHost > it = m_foreignHosts . get ( hostId ) . iterator ( ) ; while ( it . hasNext ( ) ) { ForeignHost fh = it . next ( ) ; if ( fh . isUp ( ) ) { fh . killSocket ( ) ; } } reportForeignHostFailed ( hostId ) ; }
|
Kill a foreign host socket by id .
| 89
| 8
|
155,398
|
public void cutLink ( int hostIdA , int hostIdB ) { if ( m_localHostId == hostIdA ) { Iterator < ForeignHost > it = m_foreignHosts . get ( hostIdB ) . iterator ( ) ; while ( it . hasNext ( ) ) { ForeignHost fh = it . next ( ) ; fh . cutLink ( ) ; } } if ( m_localHostId == hostIdB ) { Iterator < ForeignHost > it = m_foreignHosts . get ( hostIdA ) . iterator ( ) ; while ( it . hasNext ( ) ) { ForeignHost fh = it . next ( ) ; fh . cutLink ( ) ; } } }
|
Cut the network connection between two hostids immediately Useful for simulating network partitions
| 155
| 15
|
155,399
|
void execute ( ) { String sCmd = null ; if ( 4096 <= ifHuge . length ( ) ) { sCmd = ifHuge ; } else { sCmd = txtCommand . getText ( ) ; } if ( sCmd . startsWith ( "-->>>TEST<<<--" ) ) { testPerformance ( ) ; return ; } String [ ] g = new String [ 1 ] ; lTime = System . currentTimeMillis ( ) ; try { if ( sStatement == null ) { return ; } sStatement . execute ( sCmd ) ; lTime = System . currentTimeMillis ( ) - lTime ; int r = sStatement . getUpdateCount ( ) ; if ( r == - 1 ) { formatResultSet ( sStatement . getResultSet ( ) ) ; } else { g [ 0 ] = "update count" ; gResult . setHead ( g ) ; g [ 0 ] = String . valueOf ( r ) ; gResult . addRow ( g ) ; } addToRecent ( txtCommand . getText ( ) ) ; } catch ( SQLException e ) { lTime = System . currentTimeMillis ( ) - lTime ; g [ 0 ] = "SQL Error" ; gResult . setHead ( g ) ; String s = e . getMessage ( ) ; s += " / Error Code: " + e . getErrorCode ( ) ; s += " / State: " + e . getSQLState ( ) ; g [ 0 ] = s ; gResult . addRow ( g ) ; } updateResult ( ) ; System . gc ( ) ; }
|
Adjust this method for large strings ... ie multi megabtypes .
| 345
| 13
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.