idx
int64 0
165k
| question
stringlengths 73
4.15k
| target
stringlengths 5
918
| len_question
int64 21
890
| len_target
int64 3
255
|
|---|---|---|---|---|
154,200
|
VoltTable executePrecompiledSQL ( Statement catStmt , Object [ ] params , boolean replicated ) throws VoltAbortException { // Create a SQLStmt instance on the fly // This is unusual to do, as they are typically required to be final instance variables. // This only works because the SQL text and plan is identical from the borrowed procedure. SQLStmt stmt = new SQLStmt ( catStmt . getSqltext ( ) ) ; if ( replicated ) { stmt . setInCatalog ( false ) ; } m_runner . initSQLStmt ( stmt , catStmt ) ; voltQueueSQL ( stmt , params ) ; return voltExecuteSQL ( ) [ 0 ] ; }
|
Execute a pre - compiled adHoc SQL statement throw exception if not .
| 151
| 16
|
154,201
|
protected static void printLogStatic ( String className , String msg , Object ... args ) { if ( args != null ) { msg = String . format ( msg , args ) ; } String header = String . format ( "%s [%s] " , ZonedDateTime . now ( ) . format ( TIME_FORMAT ) , className ) ; System . out . println ( String . format ( "%s%s" , header , msg . replaceAll ( "\n" , "\n" + header ) ) ) ; }
|
The static method to print a log message to the console .
| 111
| 12
|
154,202
|
StatementSimple compileSetStatement ( RangeVariable rangeVars [ ] ) { read ( ) ; OrderedHashSet colNames = new OrderedHashSet ( ) ; HsqlArrayList exprList = new HsqlArrayList ( ) ; readSetClauseList ( rangeVars , colNames , exprList ) ; if ( exprList . size ( ) > 1 ) { throw Error . error ( ErrorCode . X_42602 ) ; } Expression expression = ( Expression ) exprList . get ( 0 ) ; if ( expression . getDegree ( ) != colNames . size ( ) ) { // throw Error.error(ErrorCode.X_42546); } int [ ] indexes = new int [ colNames . size ( ) ] ; ColumnSchema [ ] variables = new ColumnSchema [ colNames . size ( ) ] ; setVariables ( rangeVars , colNames , indexes , variables ) ; HsqlList unresolved = expression . resolveColumnReferences ( rangeVars , rangeVars . length , null , false ) ; unresolved = Expression . resolveColumnSet ( rangeVars , unresolved , null ) ; ExpressionColumn . checkColumnsResolved ( unresolved ) ; expression . resolveTypes ( session , null ) ; StatementSimple cs = new StatementSimple ( StatementTypes . ASSIGNMENT , variables , expression , indexes ) ; return cs ; }
|
Creates SET Statement for PSM from this parse context .
| 284
| 12
|
154,203
|
StatementSchema compileCreateProcedureOrFunction ( ) { int routineType = token . tokenType == Tokens . PROCEDURE ? SchemaObject . PROCEDURE : SchemaObject . FUNCTION ; HsqlName name ; read ( ) ; name = readNewSchemaObjectNameNoCheck ( routineType ) ; Routine routine = new Routine ( routineType ) ; routine . setName ( name ) ; readThis ( Tokens . OPENBRACKET ) ; if ( token . tokenType == Tokens . CLOSEBRACKET ) { read ( ) ; } else { while ( true ) { ColumnSchema newcolumn = readRoutineParameter ( routine ) ; routine . addParameter ( newcolumn ) ; if ( token . tokenType == Tokens . COMMA ) { read ( ) ; } else { readThis ( Tokens . CLOSEBRACKET ) ; break ; } } } if ( routineType != SchemaObject . PROCEDURE ) { readThis ( Tokens . RETURNS ) ; if ( token . tokenType == Tokens . TABLE ) { read ( ) ; TableDerived table = new TableDerived ( database , name , TableBase . FUNCTION_TABLE ) ; readThis ( Tokens . OPENBRACKET ) ; if ( token . tokenType == Tokens . CLOSEBRACKET ) { read ( ) ; } else { while ( true ) { ColumnSchema newcolumn = readRoutineParameter ( routine ) ; table . addColumn ( newcolumn ) ; if ( token . tokenType == Tokens . COMMA ) { read ( ) ; } else { readThis ( Tokens . CLOSEBRACKET ) ; break ; } } } routine . setReturnTable ( table ) ; } else { Type type = readTypeDefinition ( true ) ; routine . setReturnType ( type ) ; } } readRoutineCharacteristics ( routine ) ; if ( token . tokenType == Tokens . EXTERNAL ) { if ( routine . getLanguage ( ) != Routine . LANGUAGE_JAVA ) { throw unexpectedToken ( ) ; } read ( ) ; readThis ( Tokens . NAME ) ; checkIsValue ( Types . SQL_CHAR ) ; routine . setMethodURL ( ( String ) token . tokenValue ) ; read ( ) ; if ( token . tokenType == Tokens . PARAMETER ) { read ( ) ; readThis ( Tokens . STYLE ) ; readThis ( Tokens . JAVA ) ; } } else { startRecording ( ) ; Statement statement = readSQLProcedureStatementOrNull ( routine , null ) ; Token [ ] tokenList = getRecordedStatement ( ) ; String sql = Token . getSQL ( tokenList ) ; statement . setSQL ( sql ) ; routine . setProcedure ( statement ) ; } Object [ ] args = new Object [ ] { routine } ; String sql = getLastPart ( ) ; StatementSchema cs = new StatementSchema ( sql , StatementTypes . CREATE_ROUTINE , args , null , null ) ; return cs ; }
|
SQL - invoked routine
| 642
| 4
|
154,204
|
void close ( boolean script ) { closeLog ( ) ; deleteNewAndOldFiles ( ) ; writeScript ( script ) ; closeAllTextCaches ( script ) ; if ( cache != null ) { cache . close ( true ) ; } properties . setProperty ( HsqlDatabaseProperties . db_version , HsqlDatabaseProperties . THIS_VERSION ) ; properties . setProperty ( HsqlDatabaseProperties . hsqldb_compatible_version , HsqlDatabaseProperties . FIRST_COMPATIBLE_VERSION ) ; // set this one last to save the props properties . setDBModified ( HsqlDatabaseProperties . FILES_NEW ) ; deleteLog ( ) ; if ( script ) { deleteBackup ( ) ; deleteData ( ) ; } else { try { backupData ( ) ; renameNewBackup ( ) ; } catch ( IOException e ) { } } renameNewScript ( ) ; properties . setProperty ( HsqlDatabaseProperties . hsqldb_cache_version , HsqlDatabaseProperties . THIS_CACHE_VERSION ) ; properties . setDBModified ( HsqlDatabaseProperties . FILES_NOT_MODIFIED ) ; }
|
Close all the database files . If script argument is true no . data or . backup file will remain and the . script file will contain all the data of the cached tables as well as memory tables .
| 252
| 40
|
154,205
|
void deleteNewAndOldFiles ( ) { fa . removeElement ( fileName + ".data" + ".old" ) ; fa . removeElement ( fileName + ".data" + ".new" ) ; fa . removeElement ( fileName + ".backup" + ".new" ) ; fa . removeElement ( scriptFileName + ".new" ) ; }
|
Deletes the leftovers from any previous unfinished operations .
| 76
| 11
|
154,206
|
boolean forceDefrag ( ) { long megas = properties . getIntegerProperty ( HsqlDatabaseProperties . hsqldb_defrag_limit , 200 ) ; long defraglimit = megas * 1024L * 1024 ; long lostSize = cache . freeBlocks . getLostBlocksSize ( ) ; return lostSize > defraglimit ; }
|
Returns true if lost space is above the threshold
| 75
| 9
|
154,207
|
DataFileCache getCache ( ) { /* if (database.isFilesInJar()) { return null; } */ if ( cache == null ) { cache = new DataFileCache ( database , fileName ) ; cache . open ( filesReadOnly ) ; } return cache ; }
|
Responsible for creating the cache instance .
| 58
| 9
|
154,208
|
void setScriptType ( int type ) { // OOo related code if ( database . isStoredFileAccess ( ) ) { return ; } // OOo end boolean needsCheckpoint = scriptFormat != type ; scriptFormat = type ; properties . setProperty ( HsqlDatabaseProperties . hsqldb_script_format , String . valueOf ( scriptFormat ) ) ; if ( needsCheckpoint ) { database . logger . needsCheckpoint = true ; } }
|
Changing the script format results in a checkpoint with the . script file written in the new format .
| 100
| 19
|
154,209
|
private void writeScript ( boolean full ) { deleteNewScript ( ) ; //fredt - to do - flag for chache set index ScriptWriterBase scw = ScriptWriterBase . newScriptWriter ( database , scriptFileName + ".new" , full , true , scriptFormat ) ; scw . writeAll ( ) ; scw . close ( ) ; }
|
Write the . script file as . script . new .
| 76
| 11
|
154,210
|
private void processScript ( ) { ScriptReaderBase scr = null ; try { if ( database . isFilesInJar ( ) || fa . isStreamElement ( scriptFileName ) ) { scr = ScriptReaderBase . newScriptReader ( database , scriptFileName , scriptFormat ) ; Session session = database . sessionManager . getSysSessionForScript ( database ) ; scr . readAll ( session ) ; scr . close ( ) ; } } catch ( Throwable e ) { if ( scr != null ) { scr . close ( ) ; if ( cache != null ) { cache . close ( false ) ; } closeAllTextCaches ( false ) ; } database . logger . appLog . logContext ( e , null ) ; if ( e instanceof HsqlException ) { throw ( HsqlException ) e ; } else if ( e instanceof IOException ) { throw Error . error ( ErrorCode . FILE_IO_ERROR , e . toString ( ) ) ; } else if ( e instanceof OutOfMemoryError ) { throw Error . error ( ErrorCode . OUT_OF_MEMORY ) ; } else { throw Error . error ( ErrorCode . GENERAL_ERROR , e . toString ( ) ) ; } } }
|
Performs all the commands in the . script file .
| 259
| 11
|
154,211
|
private void processDataFile ( ) { // OOo related code if ( database . isStoredFileAccess ( ) ) { return ; } // OOo end if ( cache == null || filesReadOnly || ! fa . isStreamElement ( logFileName ) ) { return ; } File file = new File ( logFileName ) ; long logLength = file . length ( ) ; long dataLength = cache . getFileFreePos ( ) ; if ( logLength + dataLength > cache . maxDataFileSize ) { database . logger . needsCheckpoint = true ; } }
|
Defrag large data files when the sum of . log and . data files is large .
| 123
| 18
|
154,212
|
private void processLog ( ) { if ( ! database . isFilesInJar ( ) && fa . isStreamElement ( logFileName ) ) { ScriptRunner . runScript ( database , logFileName , ScriptWriterBase . SCRIPT_TEXT_170 ) ; } }
|
Performs all the commands in the . log file .
| 57
| 11
|
154,213
|
private void restoreBackup ( ) { if ( incBackup ) { restoreBackupIncremental ( ) ; return ; } // in case data file cannot be deleted, reset it DataFileCache . deleteOrResetFreePos ( database , fileName + ".data" ) ; try { FileArchiver . unarchive ( fileName + ".backup" , fileName + ".data" , database . getFileAccess ( ) , FileArchiver . COMPRESSION_ZIP ) ; } catch ( Exception e ) { throw Error . error ( ErrorCode . FILE_IO_ERROR , ErrorCode . M_Message_Pair , new Object [ ] { fileName + ".backup" , e . toString ( ) } ) ; } }
|
Restores a compressed backup or the . data file .
| 157
| 11
|
154,214
|
private void restoreBackupIncremental ( ) { try { if ( fa . isStreamElement ( fileName + ".backup" ) ) { RAShadowFile . restoreFile ( fileName + ".backup" , fileName + ".data" ) ; } else { /* // this is to ensure file has been written fully but it is not necessary // as semantics dictate that if a backup does not exist, the file // was never changed or was fully written to if (FileUtil.exists(cacheFileName)) { int flags = DataFileCache.getFlags(cacheFileName); if (!BitMap.isSet(flags, DataFileCache.FLAG_ISSAVED)) { FileUtil.delete(cacheFileName); } } */ } deleteBackup ( ) ; } catch ( IOException e ) { throw Error . error ( ErrorCode . FILE_IO_ERROR , fileName + ".backup" ) ; } }
|
Restores in from an incremental backup
| 200
| 7
|
154,215
|
public void updateCatalog ( String diffCmds , CatalogContext context , boolean isReplay , boolean requireCatalogDiffCmdsApplyToEE , boolean requiresNewExportGeneration ) { // note this will never require snapshot isolation because the MPI has no snapshot funtionality m_executionSite . updateCatalog ( diffCmds , context , false , true , Long . MIN_VALUE , Long . MIN_VALUE , Long . MIN_VALUE , isReplay , requireCatalogDiffCmdsApplyToEE , requiresNewExportGeneration ) ; m_scheduler . updateCatalog ( diffCmds , context ) ; }
|
Update the MPI s Site s catalog . Unlike the SPI this is not going to run from the same Site s thread ; this is actually going to run from some other local SPI s Site thread . Since the MPI s site thread is going to be blocked running the EveryPartitionTask for the catalog update this is currently safe with no locking . And yes I m a horrible person .
| 129
| 78
|
154,216
|
public static Pair < AbstractTopology , ImmutableList < Integer > > mutateAddNewHosts ( AbstractTopology currentTopology , Map < Integer , HostInfo > newHostInfos ) { int startingPartitionId = getNextFreePartitionId ( currentTopology ) ; TopologyBuilder topologyBuilder = addPartitionsToHosts ( newHostInfos , Collections . emptySet ( ) , currentTopology . getReplicationFactor ( ) , startingPartitionId ) ; ImmutableList . Builder < Integer > newPartitions = ImmutableList . builder ( ) ; for ( PartitionBuilder pb : topologyBuilder . m_partitions ) { newPartitions . add ( pb . m_id ) ; } return Pair . of ( new AbstractTopology ( currentTopology , topologyBuilder ) , newPartitions . build ( ) ) ; }
|
Add new hosts to an existing topology and layout partitions on those hosts
| 185
| 14
|
154,217
|
public static Pair < AbstractTopology , Set < Integer > > mutateRemoveHosts ( AbstractTopology currentTopology , Set < Integer > removalHosts ) { Set < Integer > removalPartitionIds = getPartitionIdsForHosts ( currentTopology , removalHosts ) ; return Pair . of ( new AbstractTopology ( currentTopology , removalHosts , removalPartitionIds ) , removalPartitionIds ) ; }
|
Remove hosts from an existing topology
| 96
| 7
|
154,218
|
public Set < Integer > getPartitionGroupPeers ( int hostId ) { Set < Integer > peers = Sets . newHashSet ( ) ; for ( Partition p : hostsById . get ( hostId ) . partitions ) { peers . addAll ( p . hostIds ) ; } return peers ; }
|
get all the hostIds in the partition group where the host with the given host id belongs
| 66
| 19
|
154,219
|
public static List < Collection < Integer > > sortHostIdByHGDistance ( int hostId , Map < Integer , String > hostGroups ) { String localHostGroup = hostGroups . get ( hostId ) ; Preconditions . checkArgument ( localHostGroup != null ) ; HAGroup localHaGroup = new HAGroup ( localHostGroup ) ; // Memorize the distance, map the distance to host ids. Multimap < Integer , Integer > distanceMap = MultimapBuilder . treeKeys ( Comparator . < Integer > naturalOrder ( ) . reversed ( ) ) . arrayListValues ( ) . build ( ) ; for ( Map . Entry < Integer , String > entry : hostGroups . entrySet ( ) ) { if ( hostId == entry . getKey ( ) ) { continue ; } distanceMap . put ( localHaGroup . getRelationshipTo ( entry . getValue ( ) ) . m_distance , entry . getKey ( ) ) ; } return new ArrayList <> ( distanceMap . asMap ( ) . values ( ) ) ; }
|
Sort all nodes in reverse hostGroup distance order then group by rack - aware group local host id is excluded .
| 232
| 22
|
154,220
|
public void reportQueued ( String importerName , String procName ) { StatsInfo statsInfo = getStatsInfo ( importerName , procName ) ; statsInfo . m_pendingCount . incrementAndGet ( ) ; }
|
An insert request was queued
| 49
| 6
|
154,221
|
public void reportFailure ( String importerName , String procName , boolean decrementPending ) { StatsInfo statsInfo = getStatsInfo ( importerName , procName ) ; if ( decrementPending ) { statsInfo . m_pendingCount . decrementAndGet ( ) ; } statsInfo . m_failureCount . incrementAndGet ( ) ; }
|
Use this when the insert fails even before the request is queued by the InternalConnectionHandler
| 79
| 18
|
154,222
|
private void reportSuccess ( String importerName , String procName ) { StatsInfo statsInfo = getStatsInfo ( importerName , procName ) ; statsInfo . m_pendingCount . decrementAndGet ( ) ; statsInfo . m_successCount . incrementAndGet ( ) ; }
|
One insert succeeded
| 63
| 3
|
154,223
|
private void reportRetry ( String importerName , String procName ) { StatsInfo statsInfo = getStatsInfo ( importerName , procName ) ; statsInfo . m_retryCount . incrementAndGet ( ) ; }
|
One insert was retried
| 49
| 5
|
154,224
|
public void writeBlock ( byte [ ] block ) throws IOException { if ( block . length != 512 ) { throw new IllegalArgumentException ( RB . singleton . getString ( RB . BAD_BLOCK_WRITE_LEN , block . length ) ) ; } write ( block , block . length ) ; }
|
Write a user - specified 512 - byte block .
| 68
| 10
|
154,225
|
public void writePadBlocks ( int blockCount ) throws IOException { for ( int i = 0 ; i < blockCount ; i ++ ) { write ( ZERO_BLOCK , ZERO_BLOCK . length ) ; } }
|
Writes the specified quantity of zero d blocks .
| 49
| 10
|
154,226
|
private Vector getAllTables ( ) { Vector result = new Vector ( 20 ) ; try { if ( cConn == null ) { return null ; } dbmeta = cConn . getMetaData ( ) ; String [ ] tableTypes = { "TABLE" } ; ResultSet allTables = dbmeta . getTables ( null , null , null , tableTypes ) ; while ( allTables . next ( ) ) { String aktTable = allTables . getString ( "TABLE_NAME" ) ; ResultSet primKeys = dbmeta . getPrimaryKeys ( null , null , aktTable ) ; // take only table with a primary key if ( primKeys . next ( ) ) { result . addElement ( aktTable ) ; } primKeys . close ( ) ; } allTables . close ( ) ; } catch ( SQLException e ) { // System.out.println("SQL Exception: " + e.getMessage()); } return result ; }
|
exclude tables without primary key
| 207
| 6
|
154,227
|
private int getChoosenTableIndex ( ) { String tableName = cTables . getSelectedItem ( ) ; // System.out.println("in getChoosenTableIndex, selected Item is "+tableName); int index = getTableIndex ( tableName ) ; if ( index >= 0 ) { // System.out.println("table found, index: " + index); return index ; } // end of if (index >= 0) ZaurusTableForm tableForm = new ZaurusTableForm ( tableName , cConn ) ; pForm . add ( tableName , tableForm ) ; vHoldTableNames . addElement ( tableName ) ; vHoldForms . addElement ( tableForm ) ; // System.out.println("new tableform for table "+tableName+", index: " + index); return vHoldTableNames . size ( ) - 1 ; }
|
if the table name is not in vHoldTableNames create a ZaurusTableForm for it
| 185
| 19
|
154,228
|
private int getTableIndex ( String tableName ) { int index ; // System.out.println("begin searching for "+tableName); for ( index = 0 ; index < vHoldTableNames . size ( ) ; index ++ ) { // System.out.println("in getTableIndex searching for "+tableName+", index: "+index); if ( tableName . equals ( ( String ) vHoldTableNames . elementAt ( index ) ) ) { return index ; } // end of if (tableName.equals(vHoldTableNames.elementAt(index))) } // end of for (index = 0; index < vHoldTableNames.size(); index ++) return - 1 ; }
|
if the name is not in vHoldTableNames answer - 1
| 147
| 13
|
154,229
|
private String [ ] getWords ( ) { StringTokenizer tokenizer = new StringTokenizer ( fSearchWords . getText ( ) ) ; String [ ] result = new String [ tokenizer . countTokens ( ) ] ; int i = 0 ; while ( tokenizer . hasMoreTokens ( ) ) { result [ i ++ ] = tokenizer . nextToken ( ) ; } // end of while ((tokenizer.hasMoreTokens())) return result ; }
|
convert the search words in the textfield to an array of words
| 96
| 14
|
154,230
|
private void initButtons ( ) { // the buttons for the search form bSearchRow = new Button ( "Search Rows" ) ; bNewRow = new Button ( "Insert New Row" ) ; bSearchRow . addActionListener ( this ) ; bNewRow . addActionListener ( this ) ; pSearchButs = new Panel ( ) ; pSearchButs . setLayout ( new GridLayout ( 1 , 0 , 4 , 4 ) ) ; pSearchButs . add ( bSearchRow ) ; pSearchButs . add ( bNewRow ) ; // the buttons for editing a row bCancel1 = new Button ( "Cancel" ) ; bPrev = new Button ( "Prev" ) ; bNext = new Button ( "Next" ) ; bDelete = new Button ( "Delete" ) ; lastButtonDelete = false ; bNewSearch = new Button ( "Search" ) ; bCancel1 . addActionListener ( this ) ; bPrev . addActionListener ( this ) ; bNext . addActionListener ( this ) ; bDelete . addActionListener ( this ) ; bNewSearch . addActionListener ( this ) ; pEditButs = new Panel ( ) ; pEditButs . setLayout ( new GridLayout ( 1 , 0 , 4 , 4 ) ) ; pEditButs . add ( bCancel1 ) ; pEditButs . add ( bPrev ) ; pEditButs . add ( bNext ) ; pEditButs . add ( bDelete ) ; pEditButs . add ( bNewSearch ) ; // the buttons for inserting a new row pInsertButs = new Panel ( ) ; pInsertButs . setLayout ( new GridLayout ( 1 , 0 , 4 , 4 ) ) ; bCancel2 = new Button ( "Cancel Insert" ) ; bNewInsert = new Button ( "New Insert" ) ; bNewSearch1 = new Button ( "Search" ) ; bCancel2 . addActionListener ( this ) ; bNewInsert . addActionListener ( this ) ; bNewSearch1 . addActionListener ( this ) ; pInsertButs . add ( bCancel2 ) ; pInsertButs . add ( bNewInsert ) ; pInsertButs . add ( bNewSearch1 ) ; }
|
init the three boxes for buttons
| 491
| 6
|
154,231
|
private void resetTableForms ( ) { lForm . show ( pForm , "search" ) ; lButton . show ( pButton , "search" ) ; Vector vAllTables = getAllTables ( ) ; // fill the drop down list again // get all table names and show a drop down list of them in cTables cTables . removeAll ( ) ; for ( Enumeration e = vAllTables . elements ( ) ; e . hasMoreElements ( ) ; ) { cTables . addItem ( ( String ) e . nextElement ( ) ) ; } // remove all form panels from pForm for ( Enumeration e = vHoldForms . elements ( ) ; e . hasMoreElements ( ) ; ) { pForm . remove ( ( ZaurusTableForm ) e . nextElement ( ) ) ; } // end of while (Enumeration e = vHoldForms.elements(); e.hasMoreElements();) // initialize a new list for the table names which have a form in pForm vHoldTableNames = new Vector ( 20 ) ; vHoldForms = new Vector ( 20 ) ; }
|
reset everything after changes in the database
| 247
| 7
|
154,232
|
private ParsedSelectStmt getLeftmostSelectStmt ( ) { assert ( ! m_children . isEmpty ( ) ) ; AbstractParsedStmt firstChild = m_children . get ( 0 ) ; if ( firstChild instanceof ParsedSelectStmt ) { return ( ParsedSelectStmt ) firstChild ; } else { assert ( firstChild instanceof ParsedUnionStmt ) ; return ( ( ParsedUnionStmt ) firstChild ) . getLeftmostSelectStmt ( ) ; } }
|
Return the leftmost child SELECT statement
| 110
| 7
|
154,233
|
@ Override public String calculateContentDeterminismMessage ( ) { String ans = null ; for ( AbstractParsedStmt child : m_children ) { ans = child . getContentDeterminismMessage ( ) ; if ( ans != null ) { return ans ; } } return null ; }
|
Here we search all the children finding if each is content deterministic . If it is we return right away .
| 63
| 22
|
154,234
|
public synchronized void rollLog ( ) throws IOException { if ( logStream != null ) { this . logStream . flush ( ) ; this . logStream = null ; oa = null ; } }
|
rollover the current log file to a new one .
| 42
| 11
|
154,235
|
public synchronized void close ( ) throws IOException { if ( logStream != null ) { logStream . close ( ) ; } for ( FileOutputStream log : streamsToFlush ) { log . close ( ) ; } }
|
close all the open file handles
| 47
| 6
|
154,236
|
public synchronized boolean append ( TxnHeader hdr , Record txn ) throws IOException { if ( hdr != null ) { if ( hdr . getZxid ( ) <= lastZxidSeen ) { LOG . warn ( "Current zxid " + hdr . getZxid ( ) + " is <= " + lastZxidSeen + " for " + hdr . getType ( ) ) ; } if ( logStream == null ) { if ( LOG . isInfoEnabled ( ) ) { LOG . info ( "Creating new log file: log." + Long . toHexString ( hdr . getZxid ( ) ) ) ; } logFileWrite = new File ( logDir , ( "log." + Long . toHexString ( hdr . getZxid ( ) ) ) ) ; fos = new FileOutputStream ( logFileWrite ) ; logStream = new BufferedOutputStream ( fos ) ; oa = BinaryOutputArchive . getArchive ( logStream ) ; FileHeader fhdr = new FileHeader ( TXNLOG_MAGIC , VERSION , dbId ) ; fhdr . serialize ( oa , "fileheader" ) ; currentSize = fos . getChannel ( ) . position ( ) ; streamsToFlush . add ( fos ) ; } padFile ( fos ) ; byte [ ] buf = Util . marshallTxnEntry ( hdr , txn ) ; if ( buf == null || buf . length == 0 ) { throw new IOException ( "Faulty serialization for header " + "and txn" ) ; } Checksum crc = makeChecksumAlgorithm ( ) ; crc . update ( buf , 0 , buf . length ) ; oa . writeLong ( crc . getValue ( ) , "txnEntryCRC" ) ; Util . writeTxnBytes ( oa , buf ) ; return true ; } return false ; }
|
append an entry to the transaction log
| 428
| 7
|
154,237
|
private void padFile ( FileOutputStream out ) throws IOException { currentSize = Util . padLogFile ( out , currentSize , preAllocSize ) ; }
|
pad the current file to increase its size
| 36
| 8
|
154,238
|
public static File [ ] getLogFiles ( File [ ] logDirList , long snapshotZxid ) { List < File > files = Util . sortDataDir ( logDirList , "log" , true ) ; long logZxid = 0 ; // Find the log file that starts before or at the same time as the // zxid of the snapshot for ( File f : files ) { long fzxid = Util . getZxidFromName ( f . getName ( ) , "log" ) ; if ( fzxid > snapshotZxid ) { continue ; } // the files // are sorted with zxid's if ( fzxid > logZxid ) { logZxid = fzxid ; } } List < File > v = new ArrayList < File > ( 5 ) ; for ( File f : files ) { long fzxid = Util . getZxidFromName ( f . getName ( ) , "log" ) ; if ( fzxid < logZxid ) { continue ; } v . add ( f ) ; } return v . toArray ( new File [ 0 ] ) ; }
|
Find the log file that starts at or just before the snapshot . Return this and all subsequent logs . Results are ordered by zxid of file ascending order .
| 249
| 32
|
154,239
|
public long getLastLoggedZxid ( ) { File [ ] files = getLogFiles ( logDir . listFiles ( ) , 0 ) ; long maxLog = files . length > 0 ? Util . getZxidFromName ( files [ files . length - 1 ] . getName ( ) , "log" ) : - 1 ; // if a log file is more recent we must scan it to find // the highest zxid long zxid = maxLog ; try { FileTxnLog txn = new FileTxnLog ( logDir ) ; TxnIterator itr = txn . read ( maxLog ) ; while ( true ) { if ( ! itr . next ( ) ) break ; TxnHeader hdr = itr . getHeader ( ) ; zxid = hdr . getZxid ( ) ; } } catch ( IOException e ) { LOG . warn ( "Unexpected exception" , e ) ; } return zxid ; }
|
get the last zxid that was logged in the transaction logs
| 212
| 13
|
154,240
|
public synchronized void commit ( ) throws IOException { if ( logStream != null ) { logStream . flush ( ) ; } for ( FileOutputStream log : streamsToFlush ) { log . flush ( ) ; if ( forceSync ) { log . getChannel ( ) . force ( false ) ; } } while ( streamsToFlush . size ( ) > 1 ) { streamsToFlush . removeFirst ( ) . close ( ) ; } }
|
commit the logs . make sure that evertyhing hits the disk
| 95
| 13
|
154,241
|
public boolean truncate ( long zxid ) throws IOException { FileTxnIterator itr = new FileTxnIterator ( this . logDir , zxid ) ; PositionInputStream input = itr . inputStream ; long pos = input . getPosition ( ) ; // now, truncate at the current position RandomAccessFile raf = new RandomAccessFile ( itr . logFile , "rw" ) ; raf . setLength ( pos ) ; raf . close ( ) ; while ( itr . goToNextLog ( ) ) { if ( ! itr . logFile . delete ( ) ) { LOG . warn ( "Unable to truncate " + itr . logFile ) ; } } return true ; }
|
truncate the current transaction logs
| 157
| 7
|
154,242
|
private static FileHeader readHeader ( File file ) throws IOException { InputStream is = null ; try { is = new BufferedInputStream ( new FileInputStream ( file ) ) ; InputArchive ia = BinaryInputArchive . getArchive ( is ) ; FileHeader hdr = new FileHeader ( ) ; hdr . deserialize ( ia , "fileheader" ) ; return hdr ; } finally { try { if ( is != null ) is . close ( ) ; } catch ( IOException e ) { LOG . warn ( "Ignoring exception during close" , e ) ; } } }
|
read the header of the transaction file
| 131
| 7
|
154,243
|
public long getDbId ( ) throws IOException { FileTxnIterator itr = new FileTxnIterator ( logDir , 0 ) ; FileHeader fh = readHeader ( itr . logFile ) ; itr . close ( ) ; if ( fh == null ) throw new IOException ( "Unsupported Format." ) ; return fh . getDbid ( ) ; }
|
the dbid of this transaction database
| 82
| 7
|
154,244
|
public static void verifyForHdfsUse ( String sb ) throws IllegalArgumentException { Preconditions . checkArgument ( sb != null && ! sb . trim ( ) . isEmpty ( ) , "null or empty hdfs endpoint" ) ; int mask = conversionMaskFor ( sb ) ; boolean hasDateConversion = ( mask & DATE ) == DATE ; Preconditions . checkArgument ( ( mask & HDFS_MASK ) == HDFS_MASK , "hdfs endpoint \"" + sb + "\" must contain the (%t)able, the (%p)artition, and the (%g) generation conversions" ) ; final String tn = "__IMPROBABLE_TABLE_NAME__" ; final int pn = Integer . MIN_VALUE ; final long gn = Long . MIN_VALUE ; final Date dt = new Date ( 0 ) ; final String fmtd = hasDateConversion ? new SimpleDateFormat ( DATE_FORMAT ) . format ( dt ) : "" ; URI uri = URI . create ( expand ( sb , tn , pn , gn , dt ) ) ; String path = uri . getPath ( ) ; List < String > missing = new ArrayList <> ( ) ; if ( ! path . contains ( tn ) ) missing . add ( "%t" ) ; if ( ! path . contains ( Integer . toString ( pn ) ) ) missing . add ( "%p" ) ; if ( ! path . contains ( Long . toString ( gn , Character . MAX_RADIX ) ) ) missing . add ( "%g" ) ; if ( hasDateConversion && ! path . contains ( fmtd ) ) missing . add ( "%d" ) ; if ( ! missing . isEmpty ( ) ) { String notInPath = Joiner . on ( ", " ) . join ( missing ) ; throw new IllegalArgumentException ( "hdfs enpoint \"" + sb + "\" does not contain conversion(s) " + notInPath + " in the path element of the URL" ) ; } }
|
Verifies that given endpoint format string specifies all the required hdfs conversions in the path portion of the endpoint .
| 460
| 23
|
154,245
|
public static void verifyForBatchUse ( String sb ) throws IllegalArgumentException { Preconditions . checkArgument ( sb != null && ! sb . trim ( ) . isEmpty ( ) , "null or empty hdfs endpoint" ) ; int mask = conversionMaskFor ( sb ) ; Preconditions . checkArgument ( ( mask & HDFS_MASK ) == HDFS_MASK , "batch mode endpoint \"" + sb + "\" must contain the (%t)able, the (%p)artition, and the (%g) generation conversions" ) ; }
|
Verifies that given endpoint format string specifies all the required batch mode conversions .
| 129
| 15
|
154,246
|
@ Override protected void handleJSONMessage ( JSONObject obj ) throws Exception { hostLog . warn ( "SystemCatalogAgent received a JSON message, which should be impossible." ) ; VoltTable [ ] results = null ; sendOpsResponse ( results , obj ) ; }
|
SystemCatalog shouldn t currently get here make it so we don t die or do anything
| 55
| 17
|
154,247
|
public static < K , V > Map < K , V > constrainedMap ( Map < K , V > map , MapConstraint < ? super K , ? super V > constraint ) { return new ConstrainedMap < K , V > ( map , constraint ) ; }
|
Returns a constrained view of the specified map using the specified constraint . Any operations that add new mappings will call the provided constraint . However this method does not verify that existing mappings satisfy the constraint .
| 58
| 40
|
154,248
|
public static < K , V > ListMultimap < K , V > constrainedListMultimap ( ListMultimap < K , V > multimap , MapConstraint < ? super K , ? super V > constraint ) { return new ConstrainedListMultimap < K , V > ( multimap , constraint ) ; }
|
Returns a constrained view of the specified list multimap using the specified constraint . Any operations that add new mappings will call the provided constraint . However this method does not verify that existing mappings satisfy the constraint .
| 72
| 42
|
154,249
|
private void validateWindowedSyntax ( ) { // Check that the aggregate is one of the supported ones, and // that the number of aggregate parameters is right. switch ( opType ) { case OpTypes . WINDOWED_RANK : case OpTypes . WINDOWED_DENSE_RANK : case OpTypes . WINDOWED_ROW_NUMBER : if ( nodes . length != 0 ) { throw Error . error ( "Windowed Aggregate " + OpTypes . aggregateName ( opType ) + " expects no arguments." , "" , 0 ) ; } break ; case OpTypes . WINDOWED_COUNT : case OpTypes . WINDOWED_MIN : case OpTypes . WINDOWED_MAX : case OpTypes . WINDOWED_SUM : break ; default : throw Error . error ( "Unsupported window function " + OpTypes . aggregateName ( opType ) , "" , 0 ) ; } }
|
Validate that this is a collection of values .
| 205
| 10
|
154,250
|
Result getResult ( Session session ) { Table table = baseTable ; Result resultOut = null ; RowSetNavigator generatedNavigator = null ; PersistentStore store = session . sessionData . getRowStore ( baseTable ) ; if ( generatedIndexes != null ) { resultOut = Result . newUpdateCountResult ( generatedResultMetaData , 0 ) ; generatedNavigator = resultOut . getChainedResult ( ) . getNavigator ( ) ; } RowSetNavigator newDataNavigator = queryExpression == null ? getInsertValuesNavigator ( session ) : getInsertSelectNavigator ( session ) ; Expression checkCondition = null ; RangeIteratorBase checkIterator = null ; if ( targetTable != baseTable ) { QuerySpecification select = ( ( TableDerived ) targetTable ) . getQueryExpression ( ) . getMainSelect ( ) ; checkCondition = select . checkQueryCondition ; if ( checkCondition != null ) { checkIterator = select . rangeVariables [ 0 ] . getIterator ( session ) ; } } while ( newDataNavigator . hasNext ( ) ) { Object [ ] data = newDataNavigator . getNext ( ) ; if ( checkCondition != null ) { checkIterator . currentData = data ; boolean check = checkCondition . testCondition ( session ) ; if ( ! check ) { throw Error . error ( ErrorCode . X_44000 ) ; } } table . insertRow ( session , store , data ) ; if ( generatedNavigator != null ) { Object [ ] generatedValues = getGeneratedColumns ( data ) ; generatedNavigator . add ( generatedValues ) ; } } newDataNavigator . beforeFirst ( ) ; table . fireAfterTriggers ( session , Trigger . INSERT_AFTER , newDataNavigator ) ; if ( resultOut == null ) { resultOut = Result . getUpdateCountResult ( newDataNavigator . getSize ( ) ) ; } else { resultOut . setUpdateCount ( newDataNavigator . getSize ( ) ) ; } return resultOut ; }
|
Executes an INSERT_SELECT statement . It is assumed that the argument is of the correct type .
| 433
| 21
|
154,251
|
@ Override public void resolveForTable ( Table table ) { assert ( table != null ) ; // It MAY be that for the case in which this function is called (expression indexes), the column's // table name is not specified (and not missed?). // It is possible to "correct" that here by cribbing it from the supplied table (base table for the index) // -- not bothering for now. Column column = table . getColumns ( ) . getExact ( m_columnName ) ; assert ( column != null ) ; m_tableName = table . getTypeName ( ) ; m_columnIndex = column . getIndex ( ) ; setTypeSizeAndInBytes ( column ) ; }
|
Resolve a TVE in the context of the given table . Since this is a TVE it is a leaf node in the expression tree . We just look up the metadata from the table and copy it here to this object .
| 148
| 46
|
154,252
|
public int setColumnIndexUsingSchema ( NodeSchema inputSchema ) { int index = inputSchema . getIndexOfTve ( this ) ; if ( index < 0 ) { //* enable to debug*/ System.out.println("DEBUG: setColumnIndex miss: " + this); //* enable to debug*/ System.out.println("DEBUG: setColumnIndex candidates: " + inputSchema); return index ; } setColumnIndex ( index ) ; if ( getValueType ( ) == null ) { // In case of sub-queries the TVE may not have its // value type and size resolved yet. Try to resolve it now SchemaColumn inputColumn = inputSchema . getColumn ( index ) ; setTypeSizeAndInBytes ( inputColumn ) ; } return index ; }
|
Given an input schema resolve this TVE expression .
| 170
| 10
|
154,253
|
public String getColumnClassName ( int column ) throws SQLException { sourceResultSet . checkColumnBounds ( column ) ; VoltType type = sourceResultSet . table . getColumnType ( column - 1 ) ; String result = type . getJdbcClass ( ) ; if ( result == null ) { throw SQLError . get ( SQLError . TRANSLATION_NOT_FOUND , type ) ; } return result ; }
|
Returns the fully - qualified name of the Java class whose instances are manufactured if the method ResultSet . getObject is called to retrieve a value from the column .
| 97
| 32
|
154,254
|
public int getPrecision ( int column ) throws SQLException { sourceResultSet . checkColumnBounds ( column ) ; VoltType type = sourceResultSet . table . getColumnType ( column - 1 ) ; Integer result = type . getTypePrecisionAndRadix ( ) [ 0 ] ; if ( result == null ) { result = 0 ; } return result ; }
|
Get the designated column s specified column size .
| 80
| 9
|
154,255
|
public int getScale ( int column ) throws SQLException { sourceResultSet . checkColumnBounds ( column ) ; VoltType type = sourceResultSet . table . getColumnType ( column - 1 ) ; Integer result = type . getMaximumScale ( ) ; if ( result == null ) { result = 0 ; } return result ; }
|
Gets the designated column s number of digits to right of the decimal point .
| 72
| 16
|
154,256
|
public boolean isCaseSensitive ( int column ) throws SQLException { sourceResultSet . checkColumnBounds ( column ) ; VoltType type = sourceResultSet . table . getColumnType ( column - 1 ) ; return type . isCaseSensitive ( ) ; }
|
Indicates whether a column s case matters .
| 58
| 9
|
154,257
|
public boolean isSigned ( int column ) throws SQLException { sourceResultSet . checkColumnBounds ( column ) ; VoltType type = sourceResultSet . table . getColumnType ( column - 1 ) ; Boolean result = type . isUnsigned ( ) ; if ( result == null ) { // Null return value means 'not signed' as far as this interface goes return false ; } return ! result ; }
|
Indicates whether values in the designated column are signed numbers .
| 88
| 12
|
154,258
|
private AbstractPlanNode applyOptimization ( WindowFunctionPlanNode plan ) { assert ( plan . getChildCount ( ) == 1 ) ; assert ( plan . getChild ( 0 ) != null ) ; AbstractPlanNode child = plan . getChild ( 0 ) ; assert ( child != null ) ; // SP Plans which have an index which can provide // the window function ordering don't create // an order by node. if ( ! ( child instanceof OrderByPlanNode ) ) { return plan ; } OrderByPlanNode onode = ( OrderByPlanNode ) child ; child = onode . getChild ( 0 ) ; // The order by node needs a RECEIVE node child // for this optimization to work. if ( ! ( child instanceof ReceivePlanNode ) ) { return plan ; } ReceivePlanNode receiveNode = ( ReceivePlanNode ) child ; assert ( receiveNode . getChildCount ( ) == 1 ) ; child = receiveNode . getChild ( 0 ) ; // The Receive node needs a send node child. assert ( child instanceof SendPlanNode ) ; SendPlanNode sendNode = ( SendPlanNode ) child ; child = sendNode . getChild ( 0 ) ; // If this window function does not use the // index then this optimization is not possible. // We've recorded a number of the window function // in the root of the subplan, which will be // the first child of the send node. // // Right now the only window function has number // 0, and we don't record that in the // WINDOWFUNCTION plan node. If there were // more than one window function we would need // to record a number in the plan node and // then check that child.getWindowFunctionUsesIndex() // returns the number in the plan node. if ( ! ( child instanceof IndexSortablePlanNode ) ) { return plan ; } IndexSortablePlanNode indexed = ( IndexSortablePlanNode ) child ; if ( indexed . indexUse ( ) . getWindowFunctionUsesIndex ( ) != 0 ) { return plan ; } // Remove the Receive node and the Order by node // and replace them with a MergeReceive node. Leave // the order by node inline in the MergeReceive node, // since we need it to calculate the merge. plan . clearChildren ( ) ; receiveNode . removeFromGraph ( ) ; MergeReceivePlanNode mrnode = new MergeReceivePlanNode ( ) ; mrnode . addInlinePlanNode ( onode ) ; mrnode . addAndLinkChild ( sendNode ) ; plan . addAndLinkChild ( mrnode ) ; return plan ; }
|
Convert ReceivePlanNodes into MergeReceivePlanNodes when the RECEIVE node s nearest parent is a window function . We won t have any inline limits or aggregates here so this is somewhat simpler than the order by case .
| 559
| 50
|
154,259
|
AbstractPlanNode convertToSerialAggregation ( AbstractPlanNode aggregateNode , OrderByPlanNode orderbyNode ) { assert ( aggregateNode instanceof HashAggregatePlanNode ) ; HashAggregatePlanNode hashAggr = ( HashAggregatePlanNode ) aggregateNode ; List < AbstractExpression > groupbys = new ArrayList <> ( hashAggr . getGroupByExpressions ( ) ) ; List < AbstractExpression > orderbys = new ArrayList <> ( orderbyNode . getSortExpressions ( ) ) ; Set < Integer > coveredGroupByColumns = new HashSet <> ( ) ; Iterator < AbstractExpression > orderbyIt = orderbys . iterator ( ) ; while ( orderbyIt . hasNext ( ) ) { AbstractExpression orderby = orderbyIt . next ( ) ; int idx = 0 ; for ( AbstractExpression groupby : groupbys ) { if ( ! coveredGroupByColumns . contains ( idx ) ) { if ( orderby . equals ( groupby ) ) { orderbyIt . remove ( ) ; coveredGroupByColumns . add ( idx ) ; break ; } } ++ idx ; } } if ( orderbys . isEmpty ( ) && groupbys . size ( ) == coveredGroupByColumns . size ( ) ) { // All GROUP BY expressions are also ORDER BY - Serial aggregation return AggregatePlanNode . convertToSerialAggregatePlanNode ( hashAggr ) ; } if ( orderbys . isEmpty ( ) && ! coveredGroupByColumns . isEmpty ( ) ) { // Partial aggregation List < Integer > coveredGroupByColumnList = new ArrayList <> ( ) ; coveredGroupByColumnList . addAll ( coveredGroupByColumns ) ; return AggregatePlanNode . convertToPartialAggregatePlanNode ( hashAggr , coveredGroupByColumnList ) ; } return aggregateNode ; }
|
The Hash aggregate can be converted to a Serial or Partial aggregate if - all GROUP BY and ORDER BY expressions bind to each other - Serial Aggregate - a subset of the GROUP BY expressions covers all of the ORDER BY - Partial - anything else - remains a Hash Aggregate
| 401
| 54
|
154,260
|
private final void startHeartbeat ( ) { if ( timerTask == null || HsqlTimer . isCancelled ( timerTask ) ) { Runnable runner = new HeartbeatRunner ( ) ; timerTask = timer . schedulePeriodicallyAfter ( 0 , HEARTBEAT_INTERVAL , runner , true ) ; } }
|
Schedules the lock heartbeat task .
| 70
| 8
|
154,261
|
private final void stopHeartbeat ( ) { if ( timerTask != null && ! HsqlTimer . isCancelled ( timerTask ) ) { HsqlTimer . cancel ( timerTask ) ; timerTask = null ; } }
|
Cancels the lock heartbeat task .
| 48
| 8
|
154,262
|
public final static boolean isLocked ( final String path ) { boolean locked = true ; try { LockFile lockFile = LockFile . newLockFile ( path ) ; lockFile . checkHeartbeat ( false ) ; locked = false ; } catch ( Exception e ) { } return locked ; }
|
Retrieves whether there is potentially already a cooperative lock operating system lock or some other situation preventing a cooperative lock condition from being aquired using the specified path .
| 61
| 32
|
154,263
|
public static ImmutableSortedSet < String > hosts ( String option ) { checkArgument ( option != null , "option is null" ) ; if ( option . trim ( ) . isEmpty ( ) ) { return ImmutableSortedSet . of ( HostAndPort . fromParts ( "" , Constants . DEFAULT_INTERNAL_PORT ) . toString ( ) ) ; } Splitter commaSplitter = Splitter . on ( ' ' ) . omitEmptyStrings ( ) . trimResults ( ) ; ImmutableSortedSet . Builder < String > sbld = ImmutableSortedSet . naturalOrder ( ) ; for ( String h : commaSplitter . split ( option ) ) { checkArgument ( isValidCoordinatorSpec ( h ) , "%s is not a valid host spec" , h ) ; sbld . add ( HostAndPort . fromString ( h ) . withDefaultPort ( Constants . DEFAULT_INTERNAL_PORT ) . toString ( ) ) ; } return sbld . build ( ) ; }
|
Helper method that takes a comma delimited list of host specs validates it and converts it to a set of valid coordinators
| 227
| 25
|
154,264
|
public static ImmutableSortedSet < String > hosts ( int ... ports ) { if ( ports . length == 0 ) { return ImmutableSortedSet . of ( HostAndPort . fromParts ( "" , Constants . DEFAULT_INTERNAL_PORT ) . toString ( ) ) ; } ImmutableSortedSet . Builder < String > sbld = ImmutableSortedSet . naturalOrder ( ) ; for ( int p : ports ) { sbld . add ( HostAndPort . fromParts ( "" , p ) . toString ( ) ) ; } return sbld . build ( ) ; }
|
Convenience method mainly used in local cluster testing
| 132
| 10
|
154,265
|
public ClientResponseImpl call ( Object ... paramListIn ) { m_perCallStats = m_statsCollector . beginProcedure ( ) ; // if we're keeping track, calculate parameter size if ( m_perCallStats != null ) { StoredProcedureInvocation invoc = ( m_txnState != null ? m_txnState . getInvocation ( ) : null ) ; ParameterSet params = ( invoc != null ? invoc . getParams ( ) : ParameterSet . fromArrayNoCopy ( paramListIn ) ) ; m_perCallStats . setParameterSize ( params . getSerializedSize ( ) ) ; } ClientResponseImpl result = coreCall ( paramListIn ) ; // if we're keeping track, calculate result size if ( m_perCallStats != null ) { m_perCallStats . setResultSize ( result . getResults ( ) ) ; } m_statsCollector . endProcedure ( result . getStatus ( ) == ClientResponse . USER_ABORT , ( result . getStatus ( ) != ClientResponse . USER_ABORT ) && ( result . getStatus ( ) != ClientResponse . SUCCESS ) , m_perCallStats ) ; // allow the GC to collect per-call stats if this proc isn't called for a while m_perCallStats = null ; return result ; }
|
Wraps coreCall with statistics code .
| 295
| 8
|
154,266
|
public static boolean isProcedureStackTraceElement ( String procedureName , StackTraceElement stel ) { int lastPeriodPos = stel . getClassName ( ) . lastIndexOf ( ' ' ) ; if ( lastPeriodPos == - 1 ) { lastPeriodPos = 0 ; } else { ++ lastPeriodPos ; } // Account for inner classes too. Inner classes names comprise of the parent // class path followed by a dollar sign String simpleName = stel . getClassName ( ) . substring ( lastPeriodPos ) ; return simpleName . equals ( procedureName ) || ( simpleName . startsWith ( procedureName ) && simpleName . charAt ( procedureName . length ( ) ) == ' ' ) ; }
|
Test whether or not the given stack frame is within a procedure invocation
| 159
| 13
|
154,267
|
public void handleUpdateDeployment ( String jsonp , HttpServletRequest request , HttpServletResponse response , AuthenticationResult ar ) throws IOException , ServletException { String deployment = request . getParameter ( "deployment" ) ; if ( deployment == null || deployment . length ( ) == 0 ) { response . getWriter ( ) . print ( buildClientResponse ( jsonp , ClientResponse . UNEXPECTED_FAILURE , "Failed to get deployment information." ) ) ; response . setStatus ( HttpServletResponse . SC_BAD_REQUEST ) ; return ; } try { DeploymentType newDeployment = m_mapper . readValue ( deployment , DeploymentType . class ) ; if ( newDeployment == null ) { response . getWriter ( ) . print ( buildClientResponse ( jsonp , ClientResponse . UNEXPECTED_FAILURE , "Failed to parse deployment information." ) ) ; return ; } DeploymentType currentDeployment = this . getDeployment ( ) ; if ( currentDeployment . getUsers ( ) != null ) { newDeployment . setUsers ( currentDeployment . getUsers ( ) ) ; } // reset the host count so that it wont fail the deployment checks newDeployment . getCluster ( ) . setHostcount ( currentDeployment . getCluster ( ) . getHostcount ( ) ) ; String dep = CatalogUtil . getDeployment ( newDeployment ) ; if ( dep == null || dep . trim ( ) . length ( ) <= 0 ) { response . getWriter ( ) . print ( buildClientResponse ( jsonp , ClientResponse . UNEXPECTED_FAILURE , "Failed to build deployment information." ) ) ; return ; } Object [ ] params = new Object [ ] { null , dep } ; SyncCallback cb = new SyncCallback ( ) ; httpClientInterface . callProcedure ( request . getRemoteHost ( ) , ar , BatchTimeoutOverrideType . NO_TIMEOUT , cb , "@UpdateApplicationCatalog" , params ) ; cb . waitForResponse ( ) ; ClientResponseImpl r = ClientResponseImpl . class . cast ( cb . getResponse ( ) ) ; if ( r . getStatus ( ) == ClientResponse . SUCCESS ) { response . getWriter ( ) . print ( buildClientResponse ( jsonp , ClientResponse . SUCCESS , "Deployment Updated." ) ) ; } else { response . getWriter ( ) . print ( HTTPClientInterface . asJsonp ( jsonp , r . toJSONString ( ) ) ) ; } } catch ( JsonParseException e ) { response . setStatus ( HttpServletResponse . SC_BAD_REQUEST ) ; response . getWriter ( ) . print ( buildClientResponse ( jsonp , ClientResponse . UNEXPECTED_FAILURE , "Unparsable JSON" ) ) ; } catch ( Exception ex ) { m_log . error ( "Failed to update deployment from API" , ex ) ; response . setStatus ( HttpServletResponse . SC_INTERNAL_SERVER_ERROR ) ; response . getWriter ( ) . print ( buildClientResponse ( jsonp , ClientResponse . UNEXPECTED_FAILURE , Throwables . getStackTraceAsString ( ex ) ) ) ; } }
|
Update the deployment
| 719
| 3
|
154,268
|
public void handleRemoveUser ( String jsonp , String target , HttpServletRequest request , HttpServletResponse response , AuthenticationResult ar ) throws IOException , ServletException { try { DeploymentType newDeployment = CatalogUtil . getDeployment ( new ByteArrayInputStream ( getDeploymentBytes ( ) ) ) ; User user = null ; String [ ] splitTarget = target . split ( "/" ) ; if ( splitTarget . length == 3 ) { user = findUser ( splitTarget [ 2 ] , newDeployment ) ; } if ( user == null ) { response . setStatus ( HttpServletResponse . SC_NOT_FOUND ) ; response . getWriter ( ) . print ( buildClientResponse ( jsonp , ClientResponse . UNEXPECTED_FAILURE , "User not found" ) ) ; return ; } if ( newDeployment . getUsers ( ) . getUser ( ) . size ( ) == 1 ) { newDeployment . setUsers ( null ) ; } else { newDeployment . getUsers ( ) . getUser ( ) . remove ( user ) ; } String dep = CatalogUtil . getDeployment ( newDeployment ) ; if ( dep == null || dep . trim ( ) . length ( ) <= 0 ) { response . setStatus ( HttpServletResponse . SC_BAD_REQUEST ) ; response . getWriter ( ) . print ( buildClientResponse ( jsonp , ClientResponse . UNEXPECTED_FAILURE , "Failed to build deployment information." ) ) ; return ; } Object [ ] params = new Object [ ] { null , dep } ; //Call sync as nothing else can happen when this is going on. SyncCallback cb = new SyncCallback ( ) ; httpClientInterface . callProcedure ( request . getRemoteHost ( ) , ar , BatchTimeoutOverrideType . NO_TIMEOUT , cb , "@UpdateApplicationCatalog" , params ) ; cb . waitForResponse ( ) ; ClientResponseImpl r = ClientResponseImpl . class . cast ( cb . getResponse ( ) ) ; response . setStatus ( HttpServletResponse . SC_NO_CONTENT ) ; if ( r . getStatus ( ) == ClientResponse . SUCCESS ) { response . getWriter ( ) . print ( buildClientResponse ( jsonp , ClientResponse . SUCCESS , "User Removed." ) ) ; } else { response . getWriter ( ) . print ( HTTPClientInterface . asJsonp ( jsonp , r . toJSONString ( ) ) ) ; } } catch ( Exception ex ) { m_log . error ( "Failed to update role from API" , ex ) ; response . setStatus ( HttpServletResponse . SC_BAD_REQUEST ) ; response . getWriter ( ) . print ( buildClientResponse ( jsonp , ClientResponse . UNEXPECTED_FAILURE , Throwables . getStackTraceAsString ( ex ) ) ) ; } }
|
Handle DELETE for users
| 640
| 6
|
154,269
|
public void handleGetUsers ( String jsonp , String target , HttpServletRequest request , HttpServletResponse response ) throws IOException , ServletException { ObjectMapper mapper = new ObjectMapper ( ) ; User user = null ; String [ ] splitTarget = target . split ( "/" ) ; if ( splitTarget . length < 3 || splitTarget [ 2 ] . isEmpty ( ) ) { if ( jsonp != null ) { response . getWriter ( ) . write ( jsonp + "(" ) ; } if ( getDeployment ( ) . getUsers ( ) != null ) { List < IdUser > id = new ArrayList <> ( ) ; for ( UsersType . User u : getDeployment ( ) . getUsers ( ) . getUser ( ) ) { id . add ( new IdUser ( u , getHostHeader ( ) ) ) ; } mapper . writeValue ( response . getWriter ( ) , id ) ; } else { response . getWriter ( ) . write ( "[]" ) ; } if ( jsonp != null ) { response . getWriter ( ) . write ( ")" ) ; } return ; } user = findUser ( splitTarget [ 2 ] , getDeployment ( ) ) ; if ( user == null ) { response . setStatus ( HttpServletResponse . SC_NOT_FOUND ) ; response . getWriter ( ) . print ( buildClientResponse ( jsonp , ClientResponse . UNEXPECTED_FAILURE , "User not found" ) ) ; return ; } else { if ( jsonp != null ) { response . getWriter ( ) . write ( jsonp + "(" ) ; } mapper . writeValue ( response . getWriter ( ) , new IdUser ( user , getHostHeader ( ) ) ) ; if ( jsonp != null ) { response . getWriter ( ) . write ( ")" ) ; } } }
|
Handle GET for users
| 407
| 4
|
154,270
|
public void handleGetExportTypes ( String jsonp , HttpServletResponse response ) throws IOException , ServletException { if ( jsonp != null ) { response . getWriter ( ) . write ( jsonp + "(" ) ; } JSONObject exportTypes = new JSONObject ( ) ; HashSet < String > exportList = new HashSet <> ( ) ; for ( ServerExportEnum type : ServerExportEnum . values ( ) ) { exportList . add ( type . value ( ) . toUpperCase ( ) ) ; } try { exportTypes . put ( "types" , exportList ) ; } catch ( JSONException e ) { m_log . error ( "Failed to generate exportTypes JSON: " , e ) ; response . setStatus ( HttpServletResponse . SC_INTERNAL_SERVER_ERROR ) ; response . getWriter ( ) . print ( buildClientResponse ( jsonp , ClientResponse . UNEXPECTED_FAILURE , "Type list failed to build" ) ) ; return ; } response . getWriter ( ) . write ( exportTypes . toString ( ) ) ; if ( jsonp != null ) { response . getWriter ( ) . write ( ")" ) ; } }
|
Handle GET for export types
| 264
| 5
|
154,271
|
void createZKDirectory ( String path ) { try { try { m_zk . create ( path , new byte [ 0 ] , Ids . OPEN_ACL_UNSAFE , CreateMode . PERSISTENT ) ; } catch ( KeeperException e ) { if ( e . code ( ) != Code . NODEEXISTS ) { throw e ; } } } catch ( Exception e ) { VoltDB . crashGlobalVoltDB ( "Failed to create Zookeeper node: " + e . getMessage ( ) , false , e ) ; } }
|
Creates a ZooKeeper directory if it doesn t exist . Crashes VoltDB if the creation fails for any reason other then the path already existing .
| 122
| 31
|
154,272
|
public Pair < Integer , String > findRestoreCatalog ( ) { enterRestore ( ) ; try { m_snapshotToRestore = generatePlans ( ) ; } catch ( Exception e ) { VoltDB . crashGlobalVoltDB ( e . getMessage ( ) , true , e ) ; } if ( m_snapshotToRestore != null ) { int hostId = m_snapshotToRestore . hostId ; File file = new File ( m_snapshotToRestore . path , m_snapshotToRestore . nonce + ".jar" ) ; String path = file . getPath ( ) ; return Pair . of ( hostId , path ) ; } return null ; }
|
Generate restore and replay plans and return the catalog associated with the snapshot to restore if there is anything to restore .
| 151
| 23
|
154,273
|
void enterRestore ( ) { createZKDirectory ( VoltZK . restore ) ; createZKDirectory ( VoltZK . restore_barrier ) ; createZKDirectory ( VoltZK . restore_barrier2 ) ; try { m_generatedRestoreBarrier2 = m_zk . create ( VoltZK . restore_barrier2 + "/counter" , null , Ids . OPEN_ACL_UNSAFE , CreateMode . EPHEMERAL_SEQUENTIAL ) ; } catch ( Exception e ) { VoltDB . crashGlobalVoltDB ( "Failed to create Zookeeper node: " + e . getMessage ( ) , false , e ) ; } }
|
Enters the restore process . Creates ZooKeeper barrier node for this host .
| 152
| 17
|
154,274
|
void exitRestore ( ) { try { m_zk . delete ( m_generatedRestoreBarrier2 , - 1 ) ; } catch ( Exception e ) { VoltDB . crashLocalVoltDB ( "Unable to delete zk node " + m_generatedRestoreBarrier2 , false , e ) ; } if ( m_callback != null ) { m_callback . onSnapshotRestoreCompletion ( ) ; } LOG . debug ( "Waiting for all hosts to complete restore" ) ; List < String > children = null ; while ( true ) { try { children = m_zk . getChildren ( VoltZK . restore_barrier2 , false ) ; } catch ( KeeperException e2 ) { VoltDB . crashGlobalVoltDB ( e2 . getMessage ( ) , false , e2 ) ; } catch ( InterruptedException e2 ) { continue ; } if ( children . size ( ) > 0 ) { try { Thread . sleep ( 500 ) ; } catch ( InterruptedException e ) { } } else { break ; } } // Clean up the ZK snapshot ID node so that we're good for next time. try { m_zk . delete ( VoltZK . restore_snapshot_id , - 1 ) ; } catch ( Exception ignore ) { } }
|
Exists the restore process . Waits for all other hosts to complete first . This method blocks .
| 281
| 20
|
154,275
|
static SnapshotInfo consolidateSnapshotInfos ( Collection < SnapshotInfo > lastSnapshot ) { SnapshotInfo chosen = null ; if ( lastSnapshot != null ) { Iterator < SnapshotInfo > i = lastSnapshot . iterator ( ) ; while ( i . hasNext ( ) ) { SnapshotInfo next = i . next ( ) ; if ( chosen == null ) { chosen = next ; } else if ( next . hostId < chosen . hostId ) { next . partitionToTxnId . putAll ( chosen . partitionToTxnId ) ; chosen = next ; } else { // create a full mapping of txn ids to partition ids. chosen . partitionToTxnId . putAll ( next . partitionToTxnId ) ; } } } return chosen ; }
|
Picks a snapshot info for restore . A single snapshot might have different files scattered across multiple machines . All nodes must pick the same SnapshotInfo or different nodes will pick different catalogs to restore . Pick one SnapshotInfo and consolidate the per - node state into it .
| 170
| 55
|
154,276
|
private void sendSnapshotTxnId ( SnapshotInfo toRestore ) { long txnId = toRestore != null ? toRestore . txnId : 0 ; String jsonData = toRestore != null ? toRestore . toJSONObject ( ) . toString ( ) : "{}" ; LOG . debug ( "Sending snapshot ID " + txnId + " for restore to other nodes" ) ; try { m_zk . create ( VoltZK . restore_snapshot_id , jsonData . getBytes ( Constants . UTF8ENCODING ) , Ids . OPEN_ACL_UNSAFE , CreateMode . EPHEMERAL ) ; } catch ( Exception e ) { VoltDB . crashGlobalVoltDB ( "Failed to create Zookeeper node: " + e . getMessage ( ) , false , e ) ; } }
|
Send the txnId of the snapshot that was picked to restore from to the other hosts . If there was no snapshot to restore from send 0 .
| 189
| 30
|
154,277
|
private void sendLocalRestoreInformation ( Long max , Set < SnapshotInfo > snapshots ) { String jsonData = serializeRestoreInformation ( max , snapshots ) ; String zkNode = VoltZK . restore + "/" + m_hostId ; try { m_zk . create ( zkNode , jsonData . getBytes ( StandardCharsets . UTF_8 ) , Ids . OPEN_ACL_UNSAFE , CreateMode . EPHEMERAL ) ; } catch ( Exception e ) { throw new RuntimeException ( "Failed to create Zookeeper node: " + e . getMessage ( ) , e ) ; } }
|
Send the information about the local snapshot files to the other hosts to generate restore plan .
| 140
| 17
|
154,278
|
private Long deserializeRestoreInformation ( List < String > children , Map < String , Set < SnapshotInfo > > snapshotFragments ) throws Exception { try { int recover = m_action . ordinal ( ) ; Long clStartTxnId = null ; for ( String node : children ) { //This might be created before we are done fetching the restore info if ( node . equals ( "snapshot_id" ) ) { continue ; } byte [ ] data = null ; data = m_zk . getData ( VoltZK . restore + "/" + node , false , null ) ; String jsonData = new String ( data , "UTF8" ) ; JSONObject json = new JSONObject ( jsonData ) ; long maxTxnId = json . optLong ( "max" , Long . MIN_VALUE ) ; if ( maxTxnId != Long . MIN_VALUE ) { if ( clStartTxnId == null || maxTxnId > clStartTxnId ) { clStartTxnId = maxTxnId ; } } int remoteRecover = json . getInt ( "action" ) ; if ( remoteRecover != recover ) { String msg = "Database actions are not consistent. Remote node action is not 'recover'. " + "Please enter the same database action on the command-line." ; VoltDB . crashLocalVoltDB ( msg , false , null ) ; } JSONArray snapInfos = json . getJSONArray ( "snapInfos" ) ; int snapInfoCnt = snapInfos . length ( ) ; for ( int i = 0 ; i < snapInfoCnt ; i ++ ) { JSONObject jsonInfo = snapInfos . getJSONObject ( i ) ; SnapshotInfo info = new SnapshotInfo ( jsonInfo ) ; Set < SnapshotInfo > fragments = snapshotFragments . get ( info . nonce ) ; if ( fragments == null ) { fragments = new HashSet < SnapshotInfo > ( ) ; snapshotFragments . put ( info . nonce , fragments ) ; } fragments . add ( info ) ; } } return clStartTxnId ; } catch ( JSONException je ) { VoltDB . crashLocalVoltDB ( "Error exchanging snapshot information" , true , je ) ; } throw new RuntimeException ( "impossible" ) ; }
|
This function like all good functions does three things . It produces the command log start transaction Id . It produces a map of SnapshotInfo objects . And it errors if the remote start action does not match the local action .
| 497
| 44
|
154,279
|
private void changeState ( ) { if ( m_state == State . RESTORE ) { fetchSnapshotTxnId ( ) ; exitRestore ( ) ; m_state = State . REPLAY ; /* * Add the interest here so that we can use the barriers in replay * agent to synchronize. */ m_snapshotMonitor . addInterest ( this ) ; m_replayAgent . replay ( ) ; } else if ( m_state == State . REPLAY ) { m_state = State . TRUNCATE ; } else if ( m_state == State . TRUNCATE ) { m_snapshotMonitor . removeInterest ( this ) ; if ( m_callback != null ) { m_callback . onReplayCompletion ( m_truncationSnapshot , m_truncationSnapshotPerPartition ) ; } // Call balance partitions after enabling transactions on the node to shorten the recovery time if ( m_isLeader ) { m_replayAgent . resumeElasticOperationIfNecessary ( ) ; } } }
|
Change the state of the restore agent based on the current state .
| 224
| 13
|
154,280
|
private Map < String , Snapshot > getSnapshots ( ) { /* * Use the individual snapshot directories instead of voltroot, because * they can be set individually */ Map < String , SnapshotPathType > paths = new HashMap < String , SnapshotPathType > ( ) ; if ( VoltDB . instance ( ) . getConfig ( ) . m_isEnterprise ) { if ( m_clSnapshotPath != null ) { paths . put ( m_clSnapshotPath , SnapshotPathType . SNAP_CL ) ; } } if ( m_snapshotPath != null ) { paths . put ( m_snapshotPath , SnapshotPathType . SNAP_AUTO ) ; } HashMap < String , Snapshot > snapshots = new HashMap < String , Snapshot > ( ) ; FileFilter filter = new SnapshotUtil . SnapshotFilter ( ) ; for ( String path : paths . keySet ( ) ) { SnapshotUtil . retrieveSnapshotFiles ( new File ( path ) , snapshots , filter , false , paths . get ( path ) , LOG ) ; } return snapshots ; }
|
Finds all the snapshots in all the places we know of which could possibly store snapshots like command log snapshots auto snapshots etc .
| 236
| 25
|
154,281
|
@ Override public CountDownLatch snapshotCompleted ( SnapshotCompletionEvent event ) { if ( ! event . truncationSnapshot || ! event . didSucceed ) { VoltDB . crashGlobalVoltDB ( "Failed to truncate command logs by snapshot" , false , null ) ; } else { m_truncationSnapshot = event . multipartTxnId ; m_truncationSnapshotPerPartition = event . partitionTxnIds ; m_replayAgent . returnAllSegments ( ) ; changeState ( ) ; } return new CountDownLatch ( 0 ) ; }
|
All nodes will be notified about the completion of the truncation snapshot .
| 132
| 14
|
154,282
|
void shutdown ( ) throws InterruptedException { m_shouldStop = true ; if ( m_thread != null ) { m_selector . wakeup ( ) ; m_thread . join ( ) ; } }
|
Instruct the network to stop after the current loop
| 45
| 9
|
154,283
|
Connection registerChannel ( final SocketChannel channel , final InputHandler handler , final int interestOps , final ReverseDNSPolicy dns , final CipherExecutor cipherService , final SSLEngine sslEngine ) throws IOException { synchronized ( channel . blockingLock ( ) ) { channel . configureBlocking ( false ) ; channel . socket ( ) . setKeepAlive ( true ) ; } Callable < Connection > registerTask = new Callable < Connection > ( ) { @ Override public Connection call ( ) throws Exception { final VoltPort port = VoltPortFactory . createVoltPort ( channel , VoltNetwork . this , handler , ( InetSocketAddress ) channel . socket ( ) . getRemoteSocketAddress ( ) , m_pool , cipherService , sslEngine ) ; port . registering ( ) ; /* * This means we are used by a client. No need to wait then, trigger * the reverse DNS lookup now. */ if ( dns != ReverseDNSPolicy . NONE ) { port . resolveHostname ( dns == ReverseDNSPolicy . SYNCHRONOUS ) ; } try { SelectionKey key = channel . register ( m_selector , interestOps , null ) ; port . setKey ( key ) ; port . registered ( ) ; //Fix a bug witnessed on the mini where the registration lock and the selector wakeup contained //within was not enough to prevent the selector from returning the port after it was registered, //but before setKey was called. Suspect a bug in the selector.wakeup() or register() implementation //on the mac. //The null check in invokeCallbacks will catch the null attachment, continue, and do the work //next time through the selection loop key . attach ( port ) ; return port ; } finally { m_ports . add ( port ) ; m_numPorts . incrementAndGet ( ) ; } } } ; FutureTask < Connection > ft = new FutureTask < Connection > ( registerTask ) ; m_tasks . offer ( ft ) ; m_selector . wakeup ( ) ; try { return ft . get ( ) ; } catch ( Exception e ) { throw new IOException ( e ) ; } }
|
Register a channel with the selector and create a Connection that will pass incoming events to the provided handler .
| 459
| 20
|
154,284
|
Future < ? > unregisterChannel ( Connection c ) { FutureTask < Object > ft = new FutureTask < Object > ( getUnregisterRunnable ( c ) , null ) ; m_tasks . offer ( ft ) ; m_selector . wakeup ( ) ; return ft ; }
|
Unregister a channel . The connections streams are not drained before finishing .
| 63
| 14
|
154,285
|
void addToChangeList ( final VoltPort port , final boolean runFirst ) { if ( runFirst ) { m_tasks . offer ( new Runnable ( ) { @ Override public void run ( ) { callPort ( port ) ; } } ) ; } else { m_tasks . offer ( new Runnable ( ) { @ Override public void run ( ) { installInterests ( port ) ; } } ) ; } m_selector . wakeup ( ) ; }
|
Set interest registrations for a port
| 105
| 6
|
154,286
|
protected void invokeCallbacks ( ThreadLocalRandom r ) { final Set < SelectionKey > selectedKeys = m_selector . selectedKeys ( ) ; final int keyCount = selectedKeys . size ( ) ; int startInx = r . nextInt ( keyCount ) ; int itInx = 0 ; Iterator < SelectionKey > it = selectedKeys . iterator ( ) ; while ( itInx < startInx ) { it . next ( ) ; itInx ++ ; } while ( itInx < keyCount ) { final Object obj = it . next ( ) . attachment ( ) ; if ( obj == null ) { continue ; } final VoltPort port = ( VoltPort ) obj ; callPort ( port ) ; itInx ++ ; } itInx = 0 ; it = selectedKeys . iterator ( ) ; while ( itInx < startInx ) { final Object obj = it . next ( ) . attachment ( ) ; if ( obj == null ) { continue ; } final VoltPort port = ( VoltPort ) obj ; callPort ( port ) ; itInx ++ ; } selectedKeys . clear ( ) ; }
|
Set the selected interest set on the port and run it .
| 240
| 12
|
154,287
|
public static String path ( String ... components ) { String path = components [ 0 ] ; for ( int i = 1 ; i < components . length ; i ++ ) { path = ZKUtil . joinZKPath ( path , components [ i ] ) ; } return path ; }
|
Helper to produce a valid path from variadic strings .
| 60
| 11
|
154,288
|
private String getSegmentFileName ( long currentId , long previousId ) { return PbdSegmentName . createName ( m_nonce , currentId , previousId , false ) ; }
|
Return a segment file name from m_nonce and current + previous segment ids .
| 42
| 18
|
154,289
|
private long getPreviousSegmentId ( File file ) { PbdSegmentName segmentName = PbdSegmentName . parseFile ( m_usageSpecificLog , file ) ; if ( segmentName . m_result != PbdSegmentName . Result . OK ) { throw new IllegalStateException ( "Invalid file name: " + file . getName ( ) ) ; } return segmentName . m_prevId ; }
|
Extract the previous segment id from a file name .
| 90
| 11
|
154,290
|
private void deleteStalePbdFile ( File file ) throws IOException { try { PBDSegment . setFinal ( file , false ) ; if ( m_usageSpecificLog . isDebugEnabled ( ) ) { m_usageSpecificLog . debug ( "Segment " + file . getName ( ) + " (final: " + PBDSegment . isFinal ( file ) + "), will be closed and deleted during init" ) ; } file . delete ( ) ; } catch ( Exception e ) { if ( e instanceof NoSuchFileException ) { // Concurrent delete, noop } else { throw e ; } } }
|
Delete a PBD segment that was identified as stale i . e . produced by earlier VoltDB releases
| 136
| 20
|
154,291
|
private void recoverSegment ( long segmentIndex , long segmentId , PbdSegmentName segmentName ) throws IOException { PBDSegment segment ; if ( segmentName . m_quarantined ) { segment = new PbdQuarantinedSegment ( segmentName . m_file , segmentIndex , segmentId ) ; } else { segment = newSegment ( segmentIndex , segmentId , segmentName . m_file ) ; try { if ( segment . getNumEntries ( ) == 0 ) { if ( m_usageSpecificLog . isDebugEnabled ( ) ) { m_usageSpecificLog . debug ( "Found Empty Segment with entries: " + segment . getNumEntries ( ) + " For: " + segment . file ( ) . getName ( ) ) ; m_usageSpecificLog . debug ( "Segment " + segment . file ( ) + " (final: " + segment . isFinal ( ) + "), will be closed and deleted during init" ) ; } segment . closeAndDelete ( ) ; return ; } // Any recovered segment that is not final should be checked // for internal consistency. if ( ! segment . isFinal ( ) ) { m_usageSpecificLog . warn ( "Segment " + segment . file ( ) + " (final: " + segment . isFinal ( ) + "), has been recovered but is not in a final state" ) ; } else if ( m_usageSpecificLog . isDebugEnabled ( ) ) { m_usageSpecificLog . debug ( "Segment " + segment . file ( ) + " (final: " + segment . isFinal ( ) + "), has been recovered" ) ; } m_segments . put ( segment . segmentIndex ( ) , segment ) ; } catch ( IOException e ) { m_usageSpecificLog . warn ( "Failed to retrieve entry count from segment " + segment . file ( ) + ". Quarantining segment" , e ) ; quarantineSegment ( segment ) ; return ; } finally { segment . close ( ) ; } } m_segments . put ( segment . segmentIndex ( ) , segment ) ; }
|
Recover a PBD segment and add it to m_segments
| 453
| 14
|
154,292
|
int numOpenSegments ( ) { int numOpen = 0 ; for ( PBDSegment segment : m_segments . values ( ) ) { if ( ! segment . isClosed ( ) ) { numOpen ++ ; } } return numOpen ; }
|
Used by test only
| 55
| 4
|
154,293
|
public CacheBuilder < K , V > expireAfterWrite ( long duration , TimeUnit unit ) { checkState ( expireAfterWriteNanos == UNSET_INT , "expireAfterWrite was already set to %s ns" , expireAfterWriteNanos ) ; checkArgument ( duration >= 0 , "duration cannot be negative: %s %s" , duration , unit ) ; this . expireAfterWriteNanos = unit . toNanos ( duration ) ; return this ; }
|
Specifies that each entry should be automatically removed from the cache once a fixed duration has elapsed after the entry s creation or the most recent replacement of its value .
| 101
| 32
|
154,294
|
public void revoke ( Grantee role ) { if ( ! hasRoleDirect ( role ) ) { throw Error . error ( ErrorCode . X_0P503 , role . getNameString ( ) ) ; } roles . remove ( role ) ; }
|
Revoke a direct role only
| 52
| 6
|
154,295
|
private OrderedHashSet addGranteeAndRoles ( OrderedHashSet set ) { Grantee candidateRole ; set . add ( this ) ; for ( int i = 0 ; i < roles . size ( ) ; i ++ ) { candidateRole = ( Grantee ) roles . get ( i ) ; if ( ! set . contains ( candidateRole ) ) { candidateRole . addGranteeAndRoles ( set ) ; } } return set ; }
|
Adds to given Set this . sName plus all roles and nested roles .
| 95
| 15
|
154,296
|
public void addAllRoles ( HashMap map ) { for ( int i = 0 ; i < roles . size ( ) ; i ++ ) { Grantee role = ( Grantee ) roles . get ( i ) ; map . put ( role . granteeName . name , role . roles ) ; } }
|
returns a map with grantee name keys and sets of granted roles as value
| 65
| 16
|
154,297
|
void clearPrivileges ( ) { roles . clear ( ) ; directRightsMap . clear ( ) ; grantedRightsMap . clear ( ) ; fullRightsMap . clear ( ) ; isAdmin = false ; }
|
Revokes all rights from this Grantee object . The map is cleared and the database administrator role attribute is set false .
| 47
| 24
|
154,298
|
boolean updateNestedRoles ( Grantee role ) { boolean hasNested = false ; if ( role != this ) { for ( int i = 0 ; i < roles . size ( ) ; i ++ ) { Grantee currentRole = ( Grantee ) roles . get ( i ) ; hasNested |= currentRole . updateNestedRoles ( role ) ; } } if ( hasNested ) { updateAllRights ( ) ; } return hasNested || role == this ; }
|
Recursive method used with ROLE Grantee objects to set the fullRightsMap and admin flag for all the roles .
| 106
| 25
|
154,299
|
void addToFullRights ( HashMap map ) { Iterator it = map . keySet ( ) . iterator ( ) ; while ( it . hasNext ( ) ) { Object key = it . next ( ) ; Right add = ( Right ) map . get ( key ) ; Right existing = ( Right ) fullRightsMap . get ( key ) ; if ( existing == null ) { existing = add . duplicate ( ) ; fullRightsMap . put ( key , existing ) ; } else { existing . add ( add ) ; } if ( add . grantableRights == null ) { continue ; } if ( existing . grantableRights == null ) { existing . grantableRights = add . grantableRights . duplicate ( ) ; } else { existing . grantableRights . add ( add . grantableRights ) ; } } }
|
Full or partial rights are added to existing
| 181
| 8
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.