idx
int64 0
165k
| question
stringlengths 73
4.15k
| target
stringlengths 5
918
| len_question
int64 21
890
| len_target
int64 3
255
|
|---|---|---|---|---|
154,500
|
public static Pair < Object [ ] , AdHocPlannedStatement [ ] > decodeSerializedBatchData ( byte [ ] serializedBatchData ) { // Collections must be the same size since they all contain slices of the same data. assert ( serializedBatchData != null ) ; ByteBuffer buf = ByteBuffer . wrap ( serializedBatchData ) ; AdHocPlannedStatement [ ] statements = null ; Object [ ] userparams = null ; try { userparams = AdHocPlannedStmtBatch . userParamsFromBuffer ( buf ) ; statements = AdHocPlannedStmtBatch . planArrayFromBuffer ( buf ) ; } catch ( IOException e ) { throw new VoltAbortException ( e ) ; } return new Pair < Object [ ] , AdHocPlannedStatement [ ] > ( userparams , statements ) ; }
|
Decode binary data into structures needed to process adhoc queries . This code was pulled out of runAdHoc so it could be shared there and with adHocSQLStringFromPlannedStatement .
| 186
| 42
|
154,501
|
static Object [ ] paramsForStatement ( AdHocPlannedStatement statement , Object [ ] userparams ) { // When there are no user-provided parameters, statements may have parameterized constants. if ( userparams . length > 0 ) { return userparams ; } else { return statement . extractedParamArray ( ) ; } }
|
Get the params for a specific SQL statement within a batch . Note that there is usually a batch size of one .
| 68
| 23
|
154,502
|
public static void writeToFile ( byte [ ] catalogBytes , File file ) throws IOException { JarOutputStream jarOut = new JarOutputStream ( new FileOutputStream ( file ) ) ; JarInputStream jarIn = new JarInputStream ( new ByteArrayInputStream ( catalogBytes ) ) ; JarEntry catEntry = null ; JarInputStreamReader reader = new JarInputStreamReader ( ) ; while ( ( catEntry = jarIn . getNextJarEntry ( ) ) != null ) { byte [ ] value = reader . readEntryFromStream ( jarIn ) ; String key = catEntry . getName ( ) ; assert ( value != null ) ; JarEntry entry = new JarEntry ( key ) ; entry . setSize ( value . length ) ; entry . setTime ( System . currentTimeMillis ( ) ) ; jarOut . putNextEntry ( entry ) ; jarOut . write ( value ) ; jarOut . flush ( ) ; jarOut . closeEntry ( ) ; } jarOut . finish ( ) ; jarIn . close ( ) ; }
|
directly transformed and written to the specified file
| 222
| 9
|
154,503
|
public long getCRC ( ) { PureJavaCrc32 crc = new PureJavaCrc32 ( ) ; for ( Entry < String , byte [ ] > e : super . entrySet ( ) ) { if ( e . getKey ( ) . equals ( "buildinfo.txt" ) || e . getKey ( ) . equals ( "catalog-report.html" ) ) { continue ; } // Hacky way to skip the first line of the autogenerated ddl, which // has a date which changes and causes test failures if ( e . getKey ( ) . equals ( VoltCompiler . AUTOGEN_DDL_FILE_NAME ) ) { byte [ ] ddlbytes = e . getValue ( ) ; int index = 0 ; while ( ddlbytes [ index ] != ' ' ) { index ++ ; } byte [ ] newddlbytes = Arrays . copyOfRange ( ddlbytes , index , ddlbytes . length ) ; crc . update ( e . getKey ( ) . getBytes ( Constants . UTF8ENCODING ) ) ; crc . update ( newddlbytes ) ; } else { crc . update ( e . getKey ( ) . getBytes ( Constants . UTF8ENCODING ) ) ; crc . update ( e . getValue ( ) ) ; } } return crc . getValue ( ) ; }
|
just replacing one method call with another though .
| 299
| 9
|
154,504
|
public void removeClassFromJar ( String classname ) { for ( String innerclass : getLoader ( ) . getInnerClassesForClass ( classname ) ) { remove ( classToFileName ( innerclass ) ) ; } remove ( classToFileName ( classname ) ) ; }
|
Remove the provided classname and all inner classes from the jarfile and the classloader
| 62
| 17
|
154,505
|
public static ParsedCall parseJDBCCall ( String jdbcCall ) throws SQLParser . Exception { Matcher m = PAT_CALL_WITH_PARAMETERS . matcher ( jdbcCall ) ; if ( m . matches ( ) ) { String sql = m . group ( 1 ) ; int parameterCount = PAT_CLEAN_CALL_PARAMETERS . matcher ( m . group ( 2 ) ) . replaceAll ( "" ) . length ( ) ; return new ParsedCall ( sql , parameterCount ) ; } m = PAT_CALL_WITHOUT_PARAMETERS . matcher ( jdbcCall ) ; if ( m . matches ( ) ) { return new ParsedCall ( m . group ( 1 ) , 0 ) ; } return null ; }
|
Parse call statements for JDBC .
| 173
| 8
|
154,506
|
static Type getType ( int setType , Type type ) { if ( setType == OpTypes . COUNT ) { return Type . SQL_BIGINT ; } // A VoltDB extension to handle aggfnc(*) syntax errors. // If the argument node does not have // a data type, it may be '*'. If the // operation is COUNT (optype == 71) this is // just fine. But if it's anything else this // is a syntax error. if ( type == null ) { throw Error . error ( ErrorCode . U_S0500 ) ; } // End of VoltDB extension int dataType = type . isIntervalType ( ) ? Types . SQL_INTERVAL : type . typeCode ; switch ( setType ) { case OpTypes . AVG : { switch ( dataType ) { case Types . TINYINT : case Types . SQL_SMALLINT : case Types . SQL_INTEGER : case Types . SQL_BIGINT : case Types . SQL_REAL : case Types . SQL_FLOAT : case Types . SQL_DOUBLE : case Types . SQL_NUMERIC : case Types . SQL_DECIMAL : case Types . SQL_INTERVAL : return type ; default : throw Error . error ( ErrorCode . X_42565 ) ; } } case OpTypes . SUM : { switch ( dataType ) { case Types . TINYINT : case Types . SQL_SMALLINT : case Types . SQL_INTEGER : return Type . SQL_BIGINT ; case Types . SQL_BIGINT : return Type . SQL_DECIMAL_BIGINT_SQR ; case Types . SQL_REAL : case Types . SQL_FLOAT : case Types . SQL_DOUBLE : return Type . SQL_DOUBLE ; case Types . SQL_NUMERIC : case Types . SQL_DECIMAL : return Type . getType ( type . typeCode , 0 , type . precision * 2 , type . scale ) ; case Types . SQL_INTERVAL : return IntervalType . newIntervalType ( type . typeCode , DTIType . maxIntervalPrecision , type . scale ) ; default : throw Error . error ( ErrorCode . X_42565 ) ; } } case OpTypes . MIN : case OpTypes . MAX : return type ; case OpTypes . EVERY : case OpTypes . SOME : if ( type . isBooleanType ( ) ) { return Type . SQL_BOOLEAN ; } break ; case OpTypes . STDDEV_POP : case OpTypes . STDDEV_SAMP : case OpTypes . VAR_POP : case OpTypes . VAR_SAMP : if ( type . isNumberType ( ) ) { return Type . SQL_DOUBLE ; } break ; // A VoltDB extension for APPROX_COUNT_DISTINCT case OpTypes . APPROX_COUNT_DISTINCT : switch ( dataType ) { case Types . TINYINT : case Types . SQL_SMALLINT : case Types . SQL_INTEGER : case Types . SQL_BIGINT : case Types . SQL_DECIMAL : case Types . SQL_TIMESTAMP : return Type . SQL_BIGINT ; default : // We only support fixed-width types for this // aggregate function. // // FLOAT is not supported since this function // relies on different values having different bit // patterns, and the same values being the // same. Floating point numbers don't hold to // this---e.g., positive and negative zero. // // Incompatible data types in operation throw Error . error ( ErrorCode . X_42565 ) ; } // End of VoltDB extension for APPROX_COUNT_DISTINCT default : throw Error . runtimeError ( ErrorCode . U_S0500 , "SetFunction" ) ; } throw Error . error ( ErrorCode . X_42565 ) ; }
|
During parsing and before an instance of SetFunction is created getType is called with type parameter set to correct type when main SELECT statements contain aggregates .
| 852
| 30
|
154,507
|
public static URI makeFileLoggerURL ( File dataDir , File dataLogDir ) { return URI . create ( makeURIString ( dataDir . getPath ( ) , dataLogDir . getPath ( ) , null ) ) ; }
|
Given two directory files the method returns a well - formed logfile provider URI . This method is for backward compatibility with the existing code that only supports logfile persistence and expects these two parameters passed either on the command - line or in the configuration file .
| 50
| 50
|
154,508
|
public static boolean isValidSnapshot ( File f ) throws IOException { if ( f == null || Util . getZxidFromName ( f . getName ( ) , "snapshot" ) == - 1 ) return false ; // Check for a valid snapshot RandomAccessFile raf = new RandomAccessFile ( f , "r" ) ; // including the header and the last / bytes // the snapshot should be atleast 10 bytes if ( raf . length ( ) < 10 ) { return false ; } try { raf . seek ( raf . length ( ) - 5 ) ; byte bytes [ ] = new byte [ 5 ] ; int readlen = 0 ; int l ; while ( readlen < 5 && ( l = raf . read ( bytes , readlen , bytes . length - readlen ) ) >= 0 ) { readlen += l ; } if ( readlen != bytes . length ) { LOG . info ( "Invalid snapshot " + f + " too short, len = " + readlen ) ; return false ; } ByteBuffer bb = ByteBuffer . wrap ( bytes ) ; int len = bb . getInt ( ) ; byte b = bb . get ( ) ; if ( len != 1 || b != ' ' ) { LOG . info ( "Invalid snapshot " + f + " len = " + len + " byte = " + ( b & 0xff ) ) ; return false ; } } finally { raf . close ( ) ; } return true ; }
|
Verifies that the file is a valid snapshot . Snapshot may be invalid if it s incomplete as in a situation when the server dies while in the process of storing a snapshot . Any file that is not a snapshot is also an invalid snapshot .
| 317
| 49
|
154,509
|
public static byte [ ] readTxnBytes ( InputArchive ia ) throws IOException { try { byte [ ] bytes = ia . readBuffer ( "txtEntry" ) ; // Since we preallocate, we define EOF to be an // empty transaction if ( bytes . length == 0 ) return bytes ; if ( ia . readByte ( "EOF" ) != ' ' ) { LOG . error ( "Last transaction was partial." ) ; return null ; } return bytes ; } catch ( EOFException e ) { } return null ; }
|
Reads a transaction entry from the input archive .
| 118
| 10
|
154,510
|
public static byte [ ] marshallTxnEntry ( TxnHeader hdr , Record txn ) throws IOException { ByteArrayOutputStream baos = new ByteArrayOutputStream ( ) ; OutputArchive boa = BinaryOutputArchive . getArchive ( baos ) ; hdr . serialize ( boa , "hdr" ) ; if ( txn != null ) { txn . serialize ( boa , "txn" ) ; } return baos . toByteArray ( ) ; }
|
Serializes transaction header and transaction data into a byte buffer .
| 110
| 12
|
154,511
|
public static void writeTxnBytes ( OutputArchive oa , byte [ ] bytes ) throws IOException { oa . writeBuffer ( bytes , "txnEntry" ) ; oa . writeByte ( ( byte ) 0x42 , "EOR" ) ; // 'B' }
|
Write the serialized transaction record to the output archive .
| 62
| 11
|
154,512
|
public static List < File > sortDataDir ( File [ ] files , String prefix , boolean ascending ) { if ( files == null ) return new ArrayList < File > ( 0 ) ; List < File > filelist = Arrays . asList ( files ) ; Collections . sort ( filelist , new DataDirFileComparator ( prefix , ascending ) ) ; return filelist ; }
|
Sort the list of files . Recency as determined by the version component of the file name .
| 80
| 19
|
154,513
|
private List < String > diagnoseIndexMismatch ( Index theIndex , Index otherIndex ) { List < String > mismatchedAttrs = new ArrayList <> ( ) ; // Pairs of matching indexes must agree on type (int hash, etc.). if ( theIndex . getType ( ) != otherIndex . getType ( ) ) { mismatchedAttrs . add ( "index type (hash vs tree)" ) ; } // Pairs of matching indexes must agree whether they are (assume)unique. if ( theIndex . getUnique ( ) != otherIndex . getUnique ( ) || theIndex . getAssumeunique ( ) != otherIndex . getAssumeunique ( ) ) { mismatchedAttrs . add ( "UNIQUE attribute" ) ; } // Pairs of matching indexes must agree whether they are partial // and if so, agree on the predicate. String thePredicateJSON = theIndex . getPredicatejson ( ) ; String otherPredicateJSON = otherIndex . getPredicatejson ( ) ; if ( thePredicateJSON == null ) { if ( otherPredicateJSON != null ) { mismatchedAttrs . add ( "WHERE predicate" ) ; } } else if ( ! thePredicateJSON . equals ( otherPredicateJSON ) ) { mismatchedAttrs . add ( "WHERE predicate" ) ; } // Pairs of matching indexes must agree that they do or do not index // expressions and, if so, agree on the expressions. String theExprsJSON = theIndex . getExpressionsjson ( ) ; String otherExprsJSON = otherIndex . getExpressionsjson ( ) ; if ( theExprsJSON == null ) { if ( otherExprsJSON != null ) { mismatchedAttrs . add ( "indexed expression" ) ; } } else if ( ! theExprsJSON . equals ( otherExprsJSON ) ) { mismatchedAttrs . add ( "indexed expression" ) ; } // Indexes must agree on the columns they are based on, // identifiable by the columns' order in the table. CatalogMap < ColumnRef > theColumns = theIndex . getColumns ( ) ; int theColumnCount = theColumns . size ( ) ; CatalogMap < ColumnRef > otherColumns = otherIndex . getColumns ( ) ; if ( theColumnCount != otherColumns . size ( ) ) { mismatchedAttrs . add ( "indexed expression" ) ; } Iterator < ColumnRef > theColumnIterator = theColumns . iterator ( ) ; Iterator < ColumnRef > otherColumnIterator = otherColumns . iterator ( ) ; for ( int ii = 0 ; ii < theColumnCount ; ++ ii ) { int theColIndex = theColumnIterator . next ( ) . getColumn ( ) . getIndex ( ) ; int otherColIndex = otherColumnIterator . next ( ) . getColumn ( ) . getIndex ( ) ; if ( theColIndex != otherColIndex ) { mismatchedAttrs . add ( "indexed expression" ) ; } } return mismatchedAttrs ; }
|
Give two strings return a list of attributes that do not match
| 655
| 12
|
154,514
|
private void validateTableCompatibility ( String theName , String otherName , Table theTable , Table otherTable , FailureMessage failureMessage ) { if ( theTable . getIsdred ( ) != otherTable . getIsdred ( ) ) { failureMessage . addReason ( "To swap table " + theName + " with table " + otherName + " both tables must be DR enabled or both tables must not be DR enabled." ) ; } if ( theTable . getIsreplicated ( ) != otherTable . getIsreplicated ( ) ) { failureMessage . addReason ( "one table is partitioned and the other is not" ) ; } if ( theTable . getTuplelimit ( ) != otherTable . getTuplelimit ( ) ) { failureMessage . addReason ( "the tables differ in the LIMIT PARTITION ROWS constraint" ) ; } if ( ( theTable . getMaterializer ( ) != null || ! theTable . getMvhandlerinfo ( ) . isEmpty ( ) ) || ( otherTable . getMaterializer ( ) != null || ! otherTable . getMvhandlerinfo ( ) . isEmpty ( ) ) ) { failureMessage . addReason ( "one or both of the tables is actually a view" ) ; } StringBuilder viewNames = new StringBuilder ( ) ; if ( viewsDependOn ( theTable , viewNames ) ) { failureMessage . addReason ( theName + " is referenced in views " + viewNames . toString ( ) ) ; } viewNames . setLength ( 0 ) ; if ( viewsDependOn ( otherTable , viewNames ) ) { failureMessage . addReason ( otherName + " is referenced in views " + viewNames . toString ( ) ) ; } }
|
Flag any issues of incompatibility between the two table operands of a swap by appending error details to a feedback buffer . These details and possibly others should get attached to a PlannerErrorException s message by the caller .
| 371
| 45
|
154,515
|
private void validateColumnCompatibility ( String theName , String otherName , Table theTable , Table otherTable , FailureMessage failureMessage ) { CatalogMap < Column > theColumns = theTable . getColumns ( ) ; int theColCount = theColumns . size ( ) ; CatalogMap < Column > otherColumns = otherTable . getColumns ( ) ; if ( theColCount != otherColumns . size ( ) ) { failureMessage . addReason ( "the tables have different numbers of columns" ) ; return ; } Column [ ] theColArray = new Column [ theColumns . size ( ) ] ; for ( Column theColumn : theColumns ) { theColArray [ theColumn . getIndex ( ) ] = theColumn ; } for ( Column otherColumn : otherColumns ) { int colIndex = otherColumn . getIndex ( ) ; String colName = otherColumn . getTypeName ( ) ; if ( colIndex < theColCount ) { Column theColumn = theColArray [ colIndex ] ; if ( theColumn . getTypeName ( ) . equals ( colName ) ) { if ( theColumn . getType ( ) != otherColumn . getType ( ) || theColumn . getSize ( ) != otherColumn . getSize ( ) || theColumn . getInbytes ( ) != otherColumn . getInbytes ( ) ) { failureMessage . addReason ( "columns named " + colName + " have different types or sizes" ) ; } continue ; } } Column matchedByName = theColumns . get ( colName ) ; if ( matchedByName != null ) { failureMessage . addReason ( colName + " is in a different ordinal position in the two tables" ) ; } else { failureMessage . addReason ( colName + " appears in " + otherName + " but not in " + theName ) ; } } if ( ! theTable . getIsreplicated ( ) && ! otherTable . getIsreplicated ( ) ) { if ( ! theTable . getPartitioncolumn ( ) . getTypeName ( ) . equals ( otherTable . getPartitioncolumn ( ) . getTypeName ( ) ) ) { failureMessage . addReason ( "the tables are not partitioned on the same column" ) ; } } }
|
Flag any issues of incompatibility between the columns of the two table operands of a swap by appending error details to a feedback buffer . These details and possibly others should get attached to a PlannerErrorException s message by the caller .
| 485
| 48
|
154,516
|
private final void growIfNeeded ( int minimumDesired ) { if ( buffer . b ( ) . remaining ( ) < minimumDesired ) { // Compute the size of the new buffer int newCapacity = buffer . b ( ) . capacity ( ) ; int newRemaining = newCapacity - buffer . b ( ) . position ( ) ; while ( newRemaining < minimumDesired ) { newRemaining += newCapacity ; newCapacity *= 2 ; } // Allocate and copy BBContainer next ; if ( isDirect ) { next = DBBPool . allocateDirect ( newCapacity ) ; } else { next = DBBPool . wrapBB ( ByteBuffer . allocate ( newCapacity ) ) ; } buffer . b ( ) . flip ( ) ; next . b ( ) . put ( buffer . b ( ) ) ; assert next . b ( ) . remaining ( ) == newRemaining ; buffer . discard ( ) ; buffer = next ; if ( callback != null ) callback . onBufferGrow ( this ) ; assert ( buffer . b ( ) . order ( ) == ByteOrder . BIG_ENDIAN ) ; } }
|
Resizes the internal byte buffer with a simple doubling policy if needed .
| 244
| 14
|
154,517
|
public static byte [ ] serialize ( FastSerializable object ) throws IOException { FastSerializer out = new FastSerializer ( ) ; object . writeExternal ( out ) ; return out . getBBContainer ( ) . b ( ) . array ( ) ; }
|
Get the byte version of object . This is a shortcut utility method when you only need to serialize a single object .
| 56
| 24
|
154,518
|
public byte [ ] getBytes ( ) { byte [ ] retval = new byte [ buffer . b ( ) . position ( ) ] ; int position = buffer . b ( ) . position ( ) ; buffer . b ( ) . rewind ( ) ; buffer . b ( ) . get ( retval ) ; assert position == buffer . b ( ) . position ( ) ; return retval ; }
|
This method is slow and horrible . It entails an extra copy . Don t use it! Ever! Not even for test! Just say no to test only code . It will also leak the BBContainer if this FS is being used with a pool .
| 83
| 51
|
154,519
|
public ByteBuffer getBuffer ( ) { assert ( isDirect == false ) ; assert ( buffer . b ( ) . hasArray ( ) ) ; assert ( ! buffer . b ( ) . isDirect ( ) ) ; buffer . b ( ) . flip ( ) ; return buffer . b ( ) . asReadOnlyBuffer ( ) ; }
|
Return a readOnly slice of this buffer . Flips the internal buffer . May not be usefully invoked multiple times on the same internal state .
| 70
| 29
|
154,520
|
public String getHexEncodedBytes ( ) { buffer . b ( ) . flip ( ) ; byte bytes [ ] = new byte [ buffer . b ( ) . remaining ( ) ] ; buffer . b ( ) . get ( bytes ) ; String hex = Encoder . hexEncode ( bytes ) ; buffer . discard ( ) ; return hex ; }
|
Get a ascii - string - safe version of the binary value using a hex encoding .
| 74
| 19
|
154,521
|
public static void writeString ( String string , ByteBuffer buffer ) throws IOException { if ( string == null ) { buffer . putInt ( VoltType . NULL_STRING_LENGTH ) ; return ; } byte [ ] strbytes = string . getBytes ( Constants . UTF8ENCODING ) ; int len = strbytes . length ; buffer . putInt ( len ) ; buffer . put ( strbytes ) ; }
|
Write a string in the standard VoltDB way without wrapping the byte buffer .
| 90
| 15
|
154,522
|
public void writeString ( String string ) throws IOException { if ( string == null ) { writeInt ( VoltType . NULL_STRING_LENGTH ) ; return ; } byte [ ] strbytes = string . getBytes ( Constants . UTF8ENCODING ) ; int len = strbytes . length ; writeInt ( len ) ; write ( strbytes ) ; }
|
Write a string in the standard VoltDB way . That is two bytes of length info followed by the bytes of characters encoded in UTF - 8 .
| 79
| 29
|
154,523
|
public void writeVarbinary ( byte [ ] bin ) throws IOException { if ( bin == null ) { writeInt ( VoltType . NULL_STRING_LENGTH ) ; return ; } if ( bin . length > VoltType . MAX_VALUE_LENGTH ) { throw new IOException ( "Varbinary exceeds maximum length of " + VoltType . MAX_VALUE_LENGTH + " bytes." ) ; } writeInt ( bin . length ) ; write ( bin ) ; }
|
Write a varbinary in the standard VoltDB way . That is four bytes of length info followed by the bytes .
| 101
| 23
|
154,524
|
public void writeTable ( VoltTable table ) throws IOException { int len = table . getSerializedSize ( ) ; growIfNeeded ( len ) ; table . flattenToBuffer ( buffer . b ( ) ) ; }
|
Write a table using it s ByteBuffer serialization code .
| 48
| 12
|
154,525
|
public void writeInvocation ( StoredProcedureInvocation invocation ) throws IOException { int len = invocation . getSerializedSize ( ) ; growIfNeeded ( len ) ; invocation . flattenToBuffer ( buffer . b ( ) ) ; }
|
Write an SPI using it s ByteBuffer serialization code .
| 54
| 12
|
154,526
|
public void writeParameterSet ( ParameterSet params ) throws IOException { int len = params . getSerializedSize ( ) ; growIfNeeded ( len ) ; params . flattenToBuffer ( buffer . b ( ) ) ; }
|
Write a ParameterSet using it s ByteBuffer serialization code .
| 50
| 14
|
154,527
|
public void start ( InputHandler ih , Set < Long > verbotenThreads ) { m_ih = ih ; m_verbotenThreads = verbotenThreads ; startSetup ( ) ; m_thread . start ( ) ; }
|
Start this VoltNetwork s thread . populate the verbotenThreads set with the id of the thread that is created
| 55
| 24
|
154,528
|
@ Override public boolean execute ( String sql ) throws SQLException { checkClosed ( ) ; VoltSQL query = VoltSQL . parseSQL ( sql ) ; return this . execute ( query ) ; }
|
Executes the given SQL statement which may return multiple results .
| 44
| 12
|
154,529
|
@ Override public boolean execute ( String sql , String [ ] columnNames ) throws SQLException { checkClosed ( ) ; throw SQLError . noSupport ( ) ; }
|
Executes the given SQL statement which may return multiple results and signals the driver that the auto - generated keys indicated in the given array should be made available for retrieval .
| 40
| 33
|
154,530
|
@ Override public int [ ] executeBatch ( ) throws SQLException { checkClosed ( ) ; closeCurrentResult ( ) ; if ( batch == null || batch . size ( ) == 0 ) { return new int [ 0 ] ; } int [ ] updateCounts = new int [ batch . size ( ) ] ; // keep a running total of update counts int runningUpdateCount = 0 ; int i = 0 ; try { for ( ; i < batch . size ( ) ; i ++ ) { setCurrentResult ( null , ( int ) batch . get ( i ) . execute ( sourceConnection . NativeConnection , this . m_timeout , sourceConnection . queryTimeOutUnit ) [ 0 ] . fetchRow ( 0 ) . getLong ( 0 ) ) ; updateCounts [ i ] = this . lastUpdateCount ; runningUpdateCount += this . lastUpdateCount ; } } catch ( SQLException x ) { updateCounts [ i ] = EXECUTE_FAILED ; throw new BatchUpdateException ( Arrays . copyOf ( updateCounts , i + 1 ) , x ) ; } finally { clearBatch ( ) ; } // replace the update count from the last statement with the update count // from the last batch. this . lastUpdateCount = runningUpdateCount ; return updateCounts ; }
|
Submits a batch of commands to the database for execution and if all commands execute successfully returns an array of update counts .
| 281
| 24
|
154,531
|
@ Override public ResultSet executeQuery ( String sql ) throws SQLException { checkClosed ( ) ; VoltSQL query = VoltSQL . parseSQL ( sql ) ; if ( ! query . isOfType ( VoltSQL . TYPE_SELECT ) ) { throw SQLError . get ( SQLError . ILLEGAL_STATEMENT , sql ) ; } return this . executeQuery ( query ) ; }
|
Executes the given SQL statement which returns a single ResultSet object .
| 90
| 14
|
154,532
|
@ Override public void setFetchDirection ( int direction ) throws SQLException { checkClosed ( ) ; if ( ( direction != ResultSet . FETCH_FORWARD ) && ( direction != ResultSet . FETCH_REVERSE ) && ( direction != ResultSet . FETCH_UNKNOWN ) ) { throw SQLError . get ( SQLError . ILLEGAL_ARGUMENT , direction ) ; } this . fetchDirection = direction ; }
|
Gives the driver a hint as to the direction in which rows will be processed in ResultSet objects created using this Statement object .
| 108
| 26
|
154,533
|
@ Override public void setFetchSize ( int rows ) throws SQLException { checkClosed ( ) ; if ( rows < 0 ) { throw SQLError . get ( SQLError . ILLEGAL_ARGUMENT , rows ) ; } this . fetchSize = rows ; }
|
Gives the JDBC driver a hint as to the number of rows that should be fetched from the database when more rows are needed for ResultSet objects genrated by this Statement .
| 67
| 37
|
154,534
|
@ Override public void setMaxFieldSize ( int max ) throws SQLException { checkClosed ( ) ; if ( max < 0 ) { throw SQLError . get ( SQLError . ILLEGAL_ARGUMENT , max ) ; } throw SQLError . noSupport ( ) ; // Not supported by provider - no point trashing data we received from the server just to simulate the feature while not getting any gains! }
|
Sets the limit for the maximum number of bytes that can be returned for character and binary column values in a ResultSet object produced by this Statement object .
| 98
| 31
|
154,535
|
@ Override public void setMaxRows ( int max ) throws SQLException { checkClosed ( ) ; if ( max < 0 ) { throw SQLError . get ( SQLError . ILLEGAL_ARGUMENT , max ) ; } this . maxRows = max ; }
|
Sets the limit for the maximum number of rows that any ResultSet object generated by this Statement object can contain to the given number .
| 68
| 27
|
154,536
|
@ Override public void setQueryTimeout ( int seconds ) throws SQLException { checkClosed ( ) ; if ( seconds < 0 ) { throw SQLError . get ( SQLError . ILLEGAL_ARGUMENT , seconds ) ; } if ( seconds == 0 ) { this . m_timeout = Integer . MAX_VALUE ; } else { this . m_timeout = seconds ; } }
|
0 is infinite in our case its Integer . MAX_VALUE
| 90
| 12
|
154,537
|
public void updateReplicasForJoin ( TransactionState snapshotTransactionState ) { long [ ] replicasAdded = new long [ 0 ] ; if ( m_term != null ) { replicasAdded = ( ( SpTerm ) m_term ) . updateReplicas ( snapshotTransactionState ) ; } m_scheduler . forwardPendingTaskToRejoinNode ( replicasAdded , snapshotTransactionState . m_spHandle ) ; }
|
This will be called from Snapshot in elastic joining or rejoining cases .
| 91
| 15
|
154,538
|
public Person newPerson ( ) { Person p = new Person ( ) ; p . firstname = firstnames [ rand . nextInt ( firstnames . length ) ] ; p . lastname = lastnames [ rand . nextInt ( lastnames . length ) ] ; p . sex = sexes [ rand . nextInt ( 2 ) ] ; p . dob = randomDOB ( ) ; // state and area code match int i = rand . nextInt ( areaCodes . length ) ; p . phonenumber = randomPhone ( areaCodes [ i ] ) ; p . state = states [ i ] ; return p ; }
|
generate a random person
| 132
| 5
|
154,539
|
public void loadFromJSONPlan ( JSONObject jobj , Database db ) throws JSONException { if ( jobj . has ( Members . PLAN_NODES_LISTS ) ) { JSONArray jplanNodesArray = jobj . getJSONArray ( Members . PLAN_NODES_LISTS ) ; for ( int i = 0 ; i < jplanNodesArray . length ( ) ; ++ i ) { JSONObject jplanNodesObj = jplanNodesArray . getJSONObject ( i ) ; JSONArray jplanNodes = jplanNodesObj . getJSONArray ( Members . PLAN_NODES ) ; int stmtId = jplanNodesObj . getInt ( Members . STATEMENT_ID ) ; loadPlanNodesFromJSONArrays ( stmtId , jplanNodes , db ) ; } } else { // There is only one statement in the plan. Its id is set to 0 by default int stmtId = 0 ; JSONArray jplanNodes = jobj . getJSONArray ( Members . PLAN_NODES ) ; loadPlanNodesFromJSONArrays ( stmtId , jplanNodes , db ) ; } // Connect the parent and child statements for ( List < AbstractPlanNode > nextPlanNodes : m_planNodesListMap . values ( ) ) { for ( AbstractPlanNode node : nextPlanNodes ) { connectNodesIfNecessary ( node ) ; } } }
|
Load json plan . The plan must have either PLAN_NODE array in case of a statement without subqueries or PLAN_NODES_LISTS array of PLAN_NODE arrays for each sub statement .
| 316
| 45
|
154,540
|
private void loadPlanNodesFromJSONArrays ( int stmtId , JSONArray jArray , Database db ) { List < AbstractPlanNode > planNodes = new ArrayList <> ( ) ; int size = jArray . length ( ) ; try { for ( int i = 0 ; i < size ; i ++ ) { JSONObject jobj = jArray . getJSONObject ( i ) ; String nodeTypeStr = jobj . getString ( "PLAN_NODE_TYPE" ) ; PlanNodeType nodeType = PlanNodeType . get ( nodeTypeStr ) ; AbstractPlanNode apn = nodeType . getPlanNodeClass ( ) . newInstance ( ) ; apn . loadFromJSONObject ( jobj , db ) ; planNodes . add ( apn ) ; } //link children and parents for ( int i = 0 ; i < size ; i ++ ) { JSONObject jobj = jArray . getJSONObject ( i ) ; if ( jobj . has ( "CHILDREN_IDS" ) ) { AbstractPlanNode parent = planNodes . get ( i ) ; JSONArray children = jobj . getJSONArray ( "CHILDREN_IDS" ) ; for ( int j = 0 ; j < children . length ( ) ; j ++ ) { AbstractPlanNode child = getNodeofId ( children . getInt ( j ) , planNodes ) ; parent . addAndLinkChild ( child ) ; } } } m_planNodesListMap . put ( stmtId , planNodes ) ; } catch ( JSONException | InstantiationException | IllegalAccessException e ) { System . err . println ( e ) ; e . printStackTrace ( ) ; } }
|
Load plan nodes from the PLAN_NODE array . All the nodes are from a substatement with the id = stmtId
| 368
| 27
|
154,541
|
public final void configure ( Properties props , FormatterBuilder formatterBuilder ) { Map < URI , ImporterConfig > configs = m_factory . createImporterConfigurations ( props , formatterBuilder ) ; m_configs = new ImmutableMap . Builder < URI , ImporterConfig > ( ) . putAll ( configs ) . putAll ( Maps . filterKeys ( m_configs , not ( in ( configs . keySet ( ) ) ) ) ) . build ( ) ; }
|
This will be called for every importer configuration section for this importer type .
| 107
| 16
|
154,542
|
public final void stop ( ) { m_stopping = true ; ImmutableMap < URI , AbstractImporter > oldReference ; boolean success = false ; do { // onChange also could set m_importers. Use while loop to pick up latest ref oldReference = m_importers . get ( ) ; success = m_importers . compareAndSet ( oldReference , ImmutableMap . < URI , AbstractImporter > of ( ) ) ; } while ( ! success ) ; if ( ! m_starting . get ( ) ) return ; stopImporters ( oldReference . values ( ) ) ; if ( ! m_factory . isImporterRunEveryWhere ( ) ) { m_distributer . registerChannels ( m_distributerDesignation , Collections . < URI > emptySet ( ) ) ; m_distributer . unregisterCallback ( m_distributerDesignation ) ; } if ( m_executorService == null ) { return ; } //graceful shutdown to allow importers to properly process post shutdown tasks. m_executorService . shutdown ( ) ; try { m_executorService . awaitTermination ( 60 , TimeUnit . SECONDS ) ; m_executorService = null ; } catch ( InterruptedException ex ) { //Should never come here. s_logger . warn ( "Unexpected interrupted exception waiting for " + m_factory . getTypeName ( ) + " to shutdown" , ex ) ; } }
|
This is called by the importer framework to stop importers . All resources for this importer will be unregistered from the resource distributer .
| 320
| 29
|
154,543
|
public int [ ] get ( ) { int includedHashes = Math . min ( m_hashCount , MAX_HASHES_COUNT ) ; int [ ] retval = new int [ includedHashes + HEADER_OFFSET ] ; System . arraycopy ( m_hashes , 0 , retval , HEADER_OFFSET , includedHashes ) ; m_inputCRC . update ( m_hashCount ) ; m_inputCRC . update ( m_catalogVersion ) ; retval [ 0 ] = ( int ) m_inputCRC . getValue ( ) ; retval [ 1 ] = m_catalogVersion ; retval [ 2 ] = m_hashCount ; return retval ; }
|
Serialize the running hashes to an array and complete the overall hash for the first int value in the array .
| 155
| 22
|
154,544
|
public void offerStatement ( int stmtHash , int offset , ByteBuffer psetBuffer ) { m_inputCRC . update ( stmtHash ) ; m_inputCRC . updateFromPosition ( offset , psetBuffer ) ; if ( m_hashCount < MAX_HASHES_COUNT ) { m_hashes [ m_hashCount ] = stmtHash ; m_hashes [ m_hashCount + 1 ] = ( int ) m_inputCRC . getValue ( ) ; } m_hashCount += 2 ; }
|
Update the overall hash . Add a pair of ints to the array if the size isn t too large .
| 118
| 22
|
154,545
|
public static int compareHashes ( int [ ] leftHashes , int [ ] rightHashes ) { assert ( leftHashes != null ) ; assert ( rightHashes != null ) ; assert ( leftHashes . length >= 3 ) ; assert ( rightHashes . length >= 3 ) ; // Compare total checksum first if ( leftHashes [ 0 ] == rightHashes [ 0 ] ) { return - 1 ; } int includedHashLeft = Math . min ( leftHashes [ 2 ] , MAX_HASHES_COUNT ) ; int includedHashRight = Math . min ( rightHashes [ 2 ] , MAX_HASHES_COUNT ) ; int includedHashMin = Math . min ( includedHashLeft , includedHashRight ) ; int pos = 0 ; for ( int i = HEADER_OFFSET ; i < HEADER_OFFSET + includedHashMin ; i += 2 ) { if ( leftHashes [ i ] != rightHashes [ i ] || leftHashes [ i + 1 ] != rightHashes [ i + 1 ] ) { return pos ; } pos ++ ; } // If the number of per-statement hashes is more than MAX_HASHES_COUNT and // the mismatched hash isn't included in the per-statement hashes return HASH_NOT_INCLUDE ; }
|
Compare two hash arrays return true if the same .
| 283
| 10
|
154,546
|
public static String description ( int [ ] hashes , int m_hashMismatchPos ) { assert ( hashes != null ) ; assert ( hashes . length >= 3 ) ; StringBuilder sb = new StringBuilder ( ) ; sb . append ( "Full Hash " ) . append ( hashes [ 0 ] ) ; sb . append ( ", Catalog Version " ) . append ( hashes [ 1 ] ) ; sb . append ( ", Statement Count " ) . append ( hashes [ 2 ] / 2 ) ; int includedHashes = Math . min ( hashes [ 2 ] , MAX_HASHES_COUNT ) ; int pos = 0 ; for ( int i = HEADER_OFFSET ; i < HEADER_OFFSET + includedHashes ; i += 2 ) { sb . append ( "\n Ran Statement " ) . append ( hashes [ i ] ) ; sb . append ( " with Parameters " ) . append ( hashes [ i + 1 ] ) ; if ( pos == m_hashMismatchPos ) { sb . append ( " <--- ALERT: Hash mismatch starts from here!" ) ; } pos ++ ; } if ( hashes [ 2 ] > MAX_HASHES_COUNT ) { sb . append ( "\n Additional SQL statements truncated." ) ; if ( m_hashMismatchPos == DeterminismHash . HASH_NOT_INCLUDE ) { sb . append ( "\n The mismatched hash is also truncated. " + "For debugging purpose, use VOLTDB_OPTS=\"-DMAX_STATEMENTS_WITH_DETAIL=<hashcount>\" to set to a higher value, " + "it could impact performance." ) ; } } return sb . toString ( ) ; }
|
Log the contents of the hash array
| 379
| 7
|
154,547
|
private boolean renameOverwrite ( String oldname , String newname ) { boolean deleted = delete ( newname ) ; if ( exists ( oldname ) ) { File file = new File ( oldname ) ; return file . renameTo ( new File ( newname ) ) ; } return deleted ; }
|
Rename the file with oldname to newname . If a file with newname already exists it is deleted before the renaming operation proceeds .
| 62
| 29
|
154,548
|
public String canonicalOrAbsolutePath ( String path ) { try { return canonicalPath ( path ) ; } catch ( Exception e ) { return absolutePath ( path ) ; } }
|
Retrieves the canonical path for the given path or the absolute path if attemting to retrieve the canonical path fails .
| 37
| 25
|
154,549
|
private boolean isCoordinatorStatsUsable ( boolean incremental ) { if ( m_coordinatorTask == null ) { return false ; } if ( incremental ) { return m_coordinatorTask . m_timedInvocations - m_coordinatorTask . m_lastTimedInvocations > 0 ; } return m_coordinatorTask . m_timedInvocations > 0 ; }
|
if any coordinator task is executed at all .
| 82
| 9
|
154,550
|
public int getIncrementalMinResultSizeAndReset ( ) { int retval = m_workerTask . m_incrMinResultSize ; m_workerTask . m_incrMinResultSize = Integer . MAX_VALUE ; if ( isCoordinatorStatsUsable ( true ) ) { m_coordinatorTask . m_incrMinResultSize = Integer . MAX_VALUE ; } return retval ; }
|
The result size should be taken from the final output coming from the coordinator task .
| 90
| 16
|
154,551
|
private Statement compileAlterTableDropTTL ( Table t ) { if ( t . getTTL ( ) == null ) { throw Error . error ( ErrorCode . X_42501 ) ; } if ( ! StringUtil . isEmpty ( t . getTTL ( ) . migrationTarget ) ) { throw unexpectedToken ( "May not drop migration target" ) ; } Object [ ] args = new Object [ ] { t . getName ( ) , Integer . valueOf ( SchemaObject . CONSTRAINT ) , Boolean . valueOf ( false ) , Boolean . valueOf ( false ) } ; return new StatementSchema ( null , StatementTypes . DROP_TTL , args , null , t . getName ( ) ) ; }
|
VoltDB extension drop TTL
| 158
| 6
|
154,552
|
StatementDMQL compileTriggerSetStatement ( Table table , RangeVariable [ ] rangeVars ) { read ( ) ; Expression [ ] updateExpressions ; int [ ] columnMap ; OrderedHashSet colNames = new OrderedHashSet ( ) ; HsqlArrayList exprList = new HsqlArrayList ( ) ; RangeVariable [ ] targetRangeVars = new RangeVariable [ ] { rangeVars [ TriggerDef . NEW_ROW ] } ; readSetClauseList ( targetRangeVars , colNames , exprList ) ; columnMap = table . getColumnIndexes ( colNames ) ; updateExpressions = new Expression [ exprList . size ( ) ] ; exprList . toArray ( updateExpressions ) ; resolveUpdateExpressions ( table , rangeVars , columnMap , updateExpressions , RangeVariable . emptyArray ) ; StatementDMQL cs = new StatementDML ( session , table , rangeVars , columnMap , updateExpressions , compileContext ) ; return cs ; }
|
Creates SET Statement for a trigger row from this parse context .
| 212
| 13
|
154,553
|
ColumnSchema readColumnDefinitionOrNull ( Table table , HsqlName hsqlName , HsqlArrayList constraintList ) { boolean isIdentity = false ; boolean isPKIdentity = false ; boolean identityAlways = false ; Expression generateExpr = null ; boolean isNullable = true ; Expression defaultExpr = null ; Type typeObject ; NumberSequence sequence = null ; if ( token . tokenType == Tokens . IDENTITY ) { read ( ) ; isIdentity = true ; isPKIdentity = true ; typeObject = Type . SQL_INTEGER ; sequence = new NumberSequence ( null , 0 , 1 , typeObject ) ; } else if ( token . tokenType == Tokens . COMMA ) { ; return null ; } else { typeObject = readTypeDefinition ( true ) ; } if ( isIdentity ) { } else if ( token . tokenType == Tokens . DEFAULT ) { read ( ) ; defaultExpr = readDefaultClause ( typeObject ) ; } else if ( token . tokenType == Tokens . GENERATED && ! isIdentity ) { read ( ) ; if ( token . tokenType == Tokens . BY ) { read ( ) ; readThis ( Tokens . DEFAULT ) ; } else { readThis ( Tokens . ALWAYS ) ; identityAlways = true ; } readThis ( Tokens . AS ) ; if ( token . tokenType == Tokens . IDENTITY ) { read ( ) ; sequence = new NumberSequence ( null , typeObject ) ; sequence . setAlways ( identityAlways ) ; if ( token . tokenType == Tokens . OPENBRACKET ) { read ( ) ; readSequenceOptions ( sequence , false , false ) ; readThis ( Tokens . CLOSEBRACKET ) ; } isIdentity = true ; } else if ( token . tokenType == Tokens . OPENBRACKET ) { read ( ) ; generateExpr = XreadValueExpression ( ) ; readThis ( Tokens . CLOSEBRACKET ) ; } } ColumnSchema column = new ColumnSchema ( hsqlName , typeObject , isNullable , false , defaultExpr ) ; readColumnConstraints ( table , column , constraintList ) ; if ( token . tokenType == Tokens . IDENTITY && ! isIdentity ) { read ( ) ; isIdentity = true ; isPKIdentity = true ; sequence = new NumberSequence ( null , 0 , 1 , typeObject ) ; } if ( isIdentity ) { column . setIdentity ( sequence ) ; } if ( isPKIdentity && ! column . isPrimaryKey ( ) ) { OrderedHashSet set = new OrderedHashSet ( ) ; set . add ( column . getName ( ) . name ) ; HsqlName constName = database . nameManager . newAutoName ( "PK" , table . getSchemaName ( ) , table . getName ( ) , SchemaObject . CONSTRAINT ) ; Constraint c = new Constraint ( constName , true , set , Constraint . PRIMARY_KEY ) ; constraintList . set ( 0 , c ) ; column . setPrimaryKey ( true ) ; } return column ; }
|
Responsible for handling the creation of table columns during the process of executing CREATE TABLE or ADD COLUMN etc . statements .
| 678
| 27
|
154,554
|
void readCheckConstraintCondition ( Constraint c ) { readThis ( Tokens . OPENBRACKET ) ; startRecording ( ) ; isCheckOrTriggerCondition = true ; Expression condition = XreadBooleanValueExpression ( ) ; isCheckOrTriggerCondition = false ; Token [ ] tokens = getRecordedStatement ( ) ; readThis ( Tokens . CLOSEBRACKET ) ; c . check = condition ; c . checkStatement = Token . getSQL ( tokens ) ; }
|
Responsible for handling check constraints section of CREATE TABLE ...
| 104
| 13
|
154,555
|
private int [ ] readColumnList ( Table table , boolean ascOrDesc ) { OrderedHashSet set = readColumnNames ( ascOrDesc ) ; return table . getColumnIndexes ( set ) ; }
|
Process a bracketed column list as used in the declaration of SQL CONSTRAINTS and return an array containing the indexes of the columns within the table .
| 44
| 32
|
154,556
|
void processAlterTableRename ( Table table ) { HsqlName name = readNewSchemaObjectName ( SchemaObject . TABLE ) ; name . setSchemaIfNull ( table . getSchemaName ( ) ) ; if ( table . getSchemaName ( ) != name . schema ) { throw Error . error ( ErrorCode . X_42505 ) ; } database . schemaManager . renameSchemaObject ( table . getName ( ) , name ) ; }
|
Responsible for handling tail of ALTER TABLE ... RENAME ...
| 101
| 15
|
154,557
|
void processAlterTableDropColumn ( Table table , String colName , boolean cascade ) { int colindex = table . getColumnIndex ( colName ) ; if ( table . getColumnCount ( ) == 1 ) { throw Error . error ( ErrorCode . X_42591 ) ; } session . commit ( false ) ; TableWorks tableWorks = new TableWorks ( session , table ) ; tableWorks . dropColumn ( colindex , cascade ) ; //VoltDB extension to support Time to live if ( table . getTTL ( ) != null && colName . equalsIgnoreCase ( table . getTTL ( ) . ttlColumn . getName ( ) . name ) ) { table . dropTTL ( ) ; } }
|
Responsible for handling tail of ALTER TABLE ... DROP COLUMN ...
| 155
| 17
|
154,558
|
void processAlterTableDropConstraint ( Table table , String name , boolean cascade ) { session . commit ( false ) ; TableWorks tableWorks = new TableWorks ( session , table ) ; tableWorks . dropConstraint ( name , cascade ) ; return ; }
|
Responsible for handling tail of ALTER TABLE ... DROP CONSTRAINT ...
| 57
| 18
|
154,559
|
private void processAlterColumnType ( Table table , ColumnSchema oldCol , boolean fullDefinition ) { ColumnSchema newCol ; if ( oldCol . isGenerated ( ) ) { throw Error . error ( ErrorCode . X_42561 ) ; } if ( fullDefinition ) { HsqlArrayList list = new HsqlArrayList ( ) ; Constraint c = table . getPrimaryConstraint ( ) ; if ( c == null ) { c = new Constraint ( null , true , null , Constraint . TEMP ) ; } list . add ( c ) ; newCol = readColumnDefinitionOrNull ( table , oldCol . getName ( ) , list ) ; if ( newCol == null ) { throw Error . error ( ErrorCode . X_42000 ) ; } if ( oldCol . isIdentity ( ) && newCol . isIdentity ( ) ) { throw Error . error ( ErrorCode . X_42525 ) ; } if ( list . size ( ) > 1 ) { // A VoltDB extension to support establishing or preserving the NOT NULL // attribute of an altered column. if ( voltDBacceptNotNullConstraint ( list ) ) { newCol . setNullable ( false ) ; } else // End of VoltDB extension throw Error . error ( ErrorCode . X_42524 ) ; } } else { Type type = readTypeDefinition ( true ) ; if ( oldCol . isIdentity ( ) ) { if ( ! type . isIntegralType ( ) ) { throw Error . error ( ErrorCode . X_42561 ) ; } } newCol = oldCol . duplicate ( ) ; newCol . setType ( type ) ; } TableWorks tw = new TableWorks ( session , table ) ; tw . retypeColumn ( oldCol , newCol ) ; }
|
Allows changes to type of column or addition of an IDENTITY generator . IDENTITY is not removed if it does not appear in new column definition Constraint definitions are not allowed
| 387
| 37
|
154,560
|
private void processAlterColumnRename ( Table table , ColumnSchema column ) { checkIsSimpleName ( ) ; if ( table . findColumn ( token . tokenString ) > - 1 ) { throw Error . error ( ErrorCode . X_42504 , token . tokenString ) ; } database . schemaManager . checkColumnIsReferenced ( table . getName ( ) , column . getName ( ) ) ; session . commit ( false ) ; table . renameColumn ( column , token . tokenString , isDelimitedIdentifier ( ) ) ; read ( ) ; }
|
Responsible for handling tail of ALTER COLUMN ... RENAME ...
| 123
| 17
|
154,561
|
void readLimitConstraintCondition ( Constraint c ) { readThis ( Tokens . PARTITION ) ; readThis ( Tokens . ROWS ) ; int rowsLimit = readInteger ( ) ; c . rowsLimit = rowsLimit ; // The optional EXECUTE (DELETE ...) clause if ( readIfThis ( Tokens . EXECUTE ) ) { // Capture the statement between parentheses following the EXECUTE keyword, // as in // // LIMIT PARTITION ROWS 10 EXECUTE (DELETE FROM tbl WHERE b = 1) // readThis ( Tokens . OPENBRACKET ) ; startRecording ( ) ; int numOpenBrackets = 1 ; while ( numOpenBrackets > 0 ) { switch ( token . tokenType ) { case Tokens . OPENBRACKET : numOpenBrackets ++ ; read ( ) ; break ; case Tokens . CLOSEBRACKET : numOpenBrackets -- ; if ( numOpenBrackets > 0 ) { // don't want the final parenthesis read ( ) ; } break ; case Tokens . X_ENDPARSE : throw unexpectedToken ( ) ; default : read ( ) ; } } Token [ ] stmtTokens = getRecordedStatement ( ) ; // This captures the DELETE statement exactly, including embedded whitespace, etc. c . rowsLimitDeleteStmt = Token . getSQL ( stmtTokens ) ; readThis ( Tokens . CLOSEBRACKET ) ; } }
|
Responsible for handling Volt limit constraints section of CREATE TABLE ...
| 307
| 14
|
154,562
|
private java . util . List < Expression > XreadExpressions ( java . util . List < Boolean > ascDesc ) { return XreadExpressions ( ascDesc , false ) ; }
|
Default disallow empty parenthesis
| 39
| 6
|
154,563
|
static boolean isValid ( int type ) { // make sure this is always synchronized with Zoodefs!! switch ( type ) { case OpCode . notification : return false ; case OpCode . create : case OpCode . delete : case OpCode . createSession : case OpCode . exists : case OpCode . getData : case OpCode . setData : case OpCode . sync : case OpCode . getACL : case OpCode . setACL : case OpCode . getChildren : case OpCode . getChildren2 : case OpCode . ping : case OpCode . closeSession : case OpCode . setWatches : return true ; default : return false ; } }
|
is the packet type a valid packet in zookeeper
| 143
| 11
|
154,564
|
public void startSeekingFor ( final Set < Long > hsids , final Map < Long , Boolean > inTrouble ) { // if the mesh hsids change we need to reset if ( ! m_hsids . equals ( hsids ) ) { if ( ! m_hsids . isEmpty ( ) ) clear ( ) ; m_hsids = ImmutableSortedSet . copyOf ( hsids ) ; } // determine the survivors m_survivors = m_strategy . accept ( survivorPicker , Pair . of ( m_hsids , inTrouble ) ) ; // start accumulating link failure graphing info add ( m_selfHsid , inTrouble ) ; }
|
Start accumulate site links graphing information
| 154
| 7
|
154,565
|
static protected void removeValues ( TreeMultimap < Long , Long > mm , Set < Long > values ) { Iterator < Map . Entry < Long , Long > > itr = mm . entries ( ) . iterator ( ) ; while ( itr . hasNext ( ) ) { Map . Entry < Long , Long > e = itr . next ( ) ; if ( values . contains ( e . getValue ( ) ) ) { itr . remove ( ) ; } } }
|
Convenience method that remove all instances of the given values from the given map
| 102
| 16
|
154,566
|
public static Predicate < Map . Entry < Long , Boolean > > amongDeadHsids ( final Set < Long > hsids ) { return new Predicate < Map . Entry < Long , Boolean > > ( ) { @ Override public boolean apply ( Entry < Long , Boolean > e ) { return hsids . contains ( e . getKey ( ) ) && e . getValue ( ) ; } } ; }
|
returns a map entry predicate that tests whether or not the given map entry describes a dead site
| 89
| 19
|
154,567
|
private void removeValue ( TreeMultimap < Long , Long > mm , long value ) { Iterator < Map . Entry < Long , Long > > itr = mm . entries ( ) . iterator ( ) ; while ( itr . hasNext ( ) ) { Map . Entry < Long , Long > e = itr . next ( ) ; if ( e . getValue ( ) . equals ( value ) ) { itr . remove ( ) ; } } }
|
Convenience method that remove all instances of the given value from the given map
| 98
| 16
|
154,568
|
void add ( long reportingHsid , final Map < Long , Boolean > failed ) { // skip if the reporting site did not belong to the pre // failure mesh if ( ! m_hsids . contains ( reportingHsid ) ) return ; // ship if the reporting site is reporting itself dead Boolean harakiri = failed . get ( reportingHsid ) ; if ( harakiri != null && harakiri . booleanValue ( ) ) return ; Set < Long > dead = Sets . newHashSet ( ) ; for ( Map . Entry < Long , Boolean > e : failed . entrySet ( ) ) { // skip if the failed site did not belong to the // pre failure mesh if ( ! m_hsids . contains ( e . getKey ( ) ) ) continue ; m_reported . put ( e . getKey ( ) , reportingHsid ) ; // if the failure is witnessed add it to the dead graph if ( e . getValue ( ) ) { m_dead . put ( e . getKey ( ) , reportingHsid ) ; dead . add ( e . getKey ( ) ) ; } } // once you are witnessed dead you cannot become undead, // but it is not the case for alive nodes, as they can // die. So remove all what the reporting site thought // was alive before this invocation removeValue ( m_alive , reportingHsid ) ; for ( Long alive : Sets . difference ( m_hsids , dead ) ) { m_alive . put ( alive , reportingHsid ) ; } }
|
Adds alive and dead graph information
| 323
| 6
|
154,569
|
public void add ( long reportingHsid , SiteFailureMessage sfm ) { // skip if the reporting site did not belong to the pre // failure mesh, or the reporting site is reporting itself // dead, or none of the sites in the safe transaction map // are among the known hsids if ( ! m_hsids . contains ( reportingHsid ) || ! sfm . m_survivors . contains ( reportingHsid ) ) return ; Set < Long > survivors = sfm . m_survivors ; if ( Sets . filter ( sfm . getObservedFailedSites ( ) , in ( m_hsids ) ) . isEmpty ( ) ) { survivors = m_hsids ; } // dead = pre failure mesh - survivors Set < Long > dead = Sets . difference ( m_hsids , survivors ) ; removeValue ( m_dead , reportingHsid ) ; // add dead graph nodes for ( long w : dead ) { if ( ! m_hsids . contains ( w ) ) continue ; m_dead . put ( w , reportingHsid ) ; } // Remove all what the reporting site thought // was alive before this invocation removeValue ( m_alive , reportingHsid ) ; // add alive graph nodes for ( long s : survivors ) { if ( ! m_hsids . contains ( s ) ) continue ; m_alive . put ( s , reportingHsid ) ; } for ( long s : sfm . getFailedSites ( ) ) { if ( ! m_hsids . contains ( s ) ) continue ; m_reported . put ( s , reportingHsid ) ; } }
|
Adds alive and dead graph information from a reporting site survivor set
| 348
| 12
|
154,570
|
protected boolean seenByInterconnectedPeers ( Set < Long > destinations , Set < Long > origins ) { Set < Long > seers = Multimaps . filterValues ( m_alive , in ( origins ) ) . keySet ( ) ; int before = origins . size ( ) ; origins . addAll ( seers ) ; if ( origins . containsAll ( destinations ) ) { return true ; } else if ( origins . size ( ) == before ) { return false ; } return seenByInterconnectedPeers ( destinations , origins ) ; }
|
Walk the alive graph to see if there is a connected path between origins and destinations
| 115
| 16
|
154,571
|
public Set < Long > forWhomSiteIsDead ( long hsid ) { ImmutableSet . Builder < Long > isb = ImmutableSet . builder ( ) ; Set < Long > deadBy = m_dead . get ( hsid ) ; if ( ! deadBy . isEmpty ( ) && m_survivors . contains ( hsid ) && m_strategy == ArbitrationStrategy . MATCHING_CARDINALITY ) { isb . addAll ( Sets . filter ( deadBy , amongSurvivors ) ) ; } return isb . build ( ) ; }
|
Is the given hsid considered dead by anyone in my survivor set?
| 126
| 14
|
154,572
|
protected static InMemoryJarfile addDDLToCatalog ( Catalog oldCatalog , InMemoryJarfile jarfile , String [ ] adhocDDLStmts , boolean isXDCR ) throws IOException , VoltCompilerException { StringBuilder sb = new StringBuilder ( ) ; compilerLog . info ( "Applying the following DDL to cluster:" ) ; for ( String stmt : adhocDDLStmts ) { compilerLog . info ( "\t" + stmt ) ; sb . append ( stmt ) ; sb . append ( ";\n" ) ; } String newDDL = sb . toString ( ) ; compilerLog . trace ( "Adhoc-modified DDL:\n" + newDDL ) ; VoltCompiler compiler = new VoltCompiler ( isXDCR ) ; compiler . compileInMemoryJarfileWithNewDDL ( jarfile , newDDL , oldCatalog ) ; return jarfile ; }
|
Append the supplied adhoc DDL to the current catalog s DDL and recompile the jarfile
| 208
| 22
|
154,573
|
static protected CompletableFuture < ClientResponse > makeQuickResponse ( byte statusCode , String msg ) { ClientResponseImpl cri = new ClientResponseImpl ( statusCode , new VoltTable [ 0 ] , msg ) ; CompletableFuture < ClientResponse > f = new CompletableFuture <> ( ) ; f . complete ( cri ) ; return f ; }
|
Error generating shortcut method
| 78
| 4
|
154,574
|
protected String verifyAndWriteCatalogJar ( CatalogChangeResult ccr ) { String procedureName = "@VerifyCatalogAndWriteJar" ; CompletableFuture < Map < Integer , ClientResponse > > cf = callNTProcedureOnAllHosts ( procedureName , ccr . catalogBytes , ccr . encodedDiffCommands , ccr . catalogHash , ccr . deploymentBytes ) ; Map < Integer , ClientResponse > resultMapByHost = null ; String err ; long timeoutSeconds = VerifyCatalogAndWriteJar . TIMEOUT ; hostLog . info ( "Max timeout setting for VerifyCatalogAndWriteJar is " + timeoutSeconds + " seconds" ) ; try { Stopwatch sw = Stopwatch . createStarted ( ) ; long elapsed = 0 ; while ( ( elapsed = sw . elapsed ( TimeUnit . SECONDS ) ) < ( timeoutSeconds ) ) { resultMapByHost = cf . getNow ( null ) ; if ( resultMapByHost != null ) { sw . stop ( ) ; break ; } if ( elapsed < 5 ) { // do not log under 5 seconds and sleep for 100 milliseconds Thread . sleep ( 100 ) ; continue ; } hostLog . info ( elapsed + " seconds has elapsed but " + procedureName + " is still wait for remote response." + "The max timeout value is " + timeoutSeconds + " seconds." ) ; Thread . sleep ( TimeUnit . SECONDS . toMillis ( 5 ) ) ; } } catch ( Exception e ) { err = procedureName + " run everywhere call failed: " + e . getMessage ( ) ; hostLog . info ( err + ", " + com . google_voltpatches . common . base . Throwables . getStackTraceAsString ( e ) ) ; return err ; } if ( resultMapByHost == null ) { err = "An invocation of procedure " + procedureName + " on all hosts timed out." ; hostLog . info ( err ) ; return err ; } for ( Entry < Integer , ClientResponse > entry : resultMapByHost . entrySet ( ) ) { if ( entry . getValue ( ) . getStatus ( ) != ClientResponseImpl . SUCCESS ) { err = "The response from host " + entry . getKey ( ) . toString ( ) + " for " + procedureName + " returned failures: " + entry . getValue ( ) . getStatusString ( ) ; compilerLog . info ( err ) ; // hide the internal NT-procedure @VerifyCatalogAndWriteJar from the client message return err ; } } return null ; }
|
Run the catalog jar NT procedure to check and write the catalog file . Check the results map from every host and return error message if needed .
| 548
| 28
|
154,575
|
public static long getNextGenerationId ( ) { // ENG-14511- these calls may hit assertion failures in testing environments try { return UniqueIdGenerator . makeIdFromComponents ( System . currentTimeMillis ( ) , m_generationId . incrementAndGet ( ) , MpInitiator . MP_INIT_PID ) ; } catch ( Throwable t ) { // Try resetting the generation m_generationId . set ( 0L ) ; return UniqueIdGenerator . makeIdFromComponents ( System . currentTimeMillis ( ) , m_generationId . incrementAndGet ( ) , MpInitiator . MP_INIT_PID ) ; } }
|
Get a unique id for the next generation for export .
| 149
| 11
|
154,576
|
public User getUser ( String name , String password ) { if ( name == null ) { name = "" ; } if ( password == null ) { password = "" ; } User user = get ( name ) ; user . checkPassword ( password ) ; return user ; }
|
Returns the User object with the specified name and password from this object s set .
| 56
| 16
|
154,577
|
public User get ( String name ) { User user = ( User ) userList . get ( name ) ; if ( user == null ) { throw Error . error ( ErrorCode . X_28501 , name ) ; } return user ; }
|
Returns the User object identified by the name argument .
| 50
| 10
|
154,578
|
void reset ( int hashTableSize , int capacity ) { // A VoltDB extension to diagnose ArrayOutOfBounds. if ( linkTable != null ) { voltDBresetCapacity = linkTable . length ; } ++ voltDBresetCount ; voltDBlastResetEvent = voltDBhistoryDepth ; voltDBhistoryCapacity = Math . min ( voltDBhistoryMaxCapacity , Math . max ( voltDBhistoryMinCapacity , voltDBhistoryDepth ) ) ; voltDBhistory = new int [ voltDBhistoryCapacity ] ; // End of VoltDB extension int [ ] newHT = new int [ hashTableSize ] ; int [ ] newLT = new int [ capacity ] ; // allocate memory before assigning hashTable = newHT ; linkTable = newLT ; resetTables ( ) ; }
|
Reset the structure with a new size as empty .
| 169
| 11
|
154,579
|
void clear ( ) { // A VoltDB extension to diagnose ArrayOutOfBounds. if ( linkTable != null ) { voltDBclearCapacity = linkTable . length ; } ++ voltDBclearCount ; voltDBlastClearEvent = voltDBhistoryDepth ; // End of VoltDB extension int to = linkTable . length ; int [ ] intArray = linkTable ; while ( -- to >= 0 ) { intArray [ to ] = 0 ; } resetTables ( ) ; }
|
Reset the index as empty .
| 103
| 7
|
154,580
|
void unlinkNode ( int index , int lastLookup , int lookup ) { // A VoltDB extension to diagnose ArrayOutOfBounds. voltDBhistory [ voltDBhistoryDepth ++ % voltDBhistoryCapacity ] = - index - 1 ; // End of VoltDB extension // unlink the node if ( lastLookup == - 1 ) { hashTable [ index ] = linkTable [ lookup ] ; } else { linkTable [ lastLookup ] = linkTable [ lookup ] ; } // add to reclaimed list linkTable [ lookup ] = reclaimedNodePointer ; reclaimedNodePointer = lookup ; elementCount -- ; }
|
Unlink a node from a linked list and link into the reclaimed list .
| 131
| 15
|
154,581
|
boolean removeEmptyNode ( int lookup ) { // A VoltDB extension to diagnose ArrayOutOfBounds. voltDBhistory [ voltDBhistoryDepth ++ % voltDBhistoryCapacity ] = 1000000 + lookup ; // End of VoltDB extension boolean found = false ; int lastLookup = - 1 ; for ( int i = reclaimedNodePointer ; i >= 0 ; lastLookup = i , i = linkTable [ i ] ) { if ( i == lookup ) { if ( lastLookup == - 1 ) { reclaimedNodePointer = linkTable [ lookup ] ; } else { linkTable [ lastLookup ] = linkTable [ lookup ] ; } found = true ; break ; } } if ( ! found ) { return false ; } for ( int i = 0 ; i < newNodePointer ; i ++ ) { if ( linkTable [ i ] > lookup ) { linkTable [ i ] -- ; } } System . arraycopy ( linkTable , lookup + 1 , linkTable , lookup , newNodePointer - lookup - 1 ) ; linkTable [ newNodePointer - 1 ] = 0 ; newNodePointer -- ; for ( int i = 0 ; i < hashTable . length ; i ++ ) { if ( hashTable [ i ] > lookup ) { hashTable [ i ] -- ; } } return true ; }
|
Remove a node that has already been unlinked . This is not required for index operations . It is used only when the row needs to be removed from the data structures that store the actual indexed data and the nodes need to be contiguous .
| 283
| 47
|
154,582
|
private static String translateSep ( String sep , boolean isProperty ) { if ( sep == null ) { return null ; } int next = sep . indexOf ( BACKSLASH_CHAR ) ; if ( next != - 1 ) { int start = 0 ; char [ ] sepArray = sep . toCharArray ( ) ; char ch = 0 ; int len = sep . length ( ) ; StringBuffer sb = new StringBuffer ( len ) ; do { sb . append ( sepArray , start , next - start ) ; start = ++ next ; if ( next >= len ) { sb . append ( BACKSLASH_CHAR ) ; break ; } if ( ! isProperty ) { ch = sepArray [ next ] ; } if ( ch == ' ' ) { sb . append ( LF_CHAR ) ; start ++ ; } else if ( ch == ' ' ) { sb . append ( CR_CHAR ) ; start ++ ; } else if ( ch == ' ' ) { sb . append ( ' ' ) ; start ++ ; } else if ( ch == BACKSLASH_CHAR ) { sb . append ( BACKSLASH_CHAR ) ; start ++ ; } else if ( ch == ' ' ) { start ++ ; sb . append ( ( char ) Integer . parseInt ( sep . substring ( start , start + 4 ) , 16 ) ) ; start += 4 ; } else if ( sep . startsWith ( "semi" , next ) ) { sb . append ( ' ' ) ; start += 4 ; } else if ( sep . startsWith ( "space" , next ) ) { sb . append ( ' ' ) ; start += 5 ; } else if ( sep . startsWith ( "quote" , next ) ) { sb . append ( DOUBLE_QUOTE_CHAR ) ; start += 5 ; } else if ( sep . startsWith ( "apos" , next ) ) { sb . append ( ' ' ) ; start += 4 ; } else { sb . append ( BACKSLASH_CHAR ) ; sb . append ( sepArray [ next ] ) ; start ++ ; } } while ( ( next = sep . indexOf ( BACKSLASH_CHAR , start ) ) != - 1 ) ; sb . append ( sepArray , start , len - start ) ; sep = sb . toString ( ) ; } return sep ; }
|
Translates the escaped characters in a separator string and returns the non - escaped string .
| 507
| 19
|
154,583
|
public void open ( boolean readonly ) { fileFreePosition = 0 ; try { dataFile = ScaledRAFile . newScaledRAFile ( database , fileName , readonly , ScaledRAFile . DATA_FILE_RAF , null , null ) ; fileFreePosition = dataFile . length ( ) ; if ( fileFreePosition > Integer . MAX_VALUE ) { throw new HsqlException ( "" , "" , 0 ) ; } initBuffers ( ) ; } catch ( Exception e ) { throw Error . error ( ErrorCode . FILE_IO_ERROR , ErrorCode . M_TextCache_openning_file_error , new Object [ ] { fileName , e } ) ; } cacheReadonly = readonly ; }
|
Opens a data source file .
| 157
| 7
|
154,584
|
public synchronized void close ( boolean write ) { if ( dataFile == null ) { return ; } try { cache . saveAll ( ) ; boolean empty = ( dataFile . length ( ) <= NL . length ( ) ) ; dataFile . close ( ) ; dataFile = null ; if ( empty && ! cacheReadonly ) { FileUtil . getDefaultInstance ( ) . delete ( fileName ) ; } } catch ( Exception e ) { throw Error . error ( ErrorCode . FILE_IO_ERROR , ErrorCode . M_TextCache_closing_file_error , new Object [ ] { fileName , e } ) ; } }
|
Writes newly created rows to disk . In the current implentation such rows have already been saved so this method just removes a source file that has no rows .
| 136
| 33
|
154,585
|
void purge ( ) { uncommittedCache . clear ( ) ; try { if ( cacheReadonly ) { close ( false ) ; } else { if ( dataFile != null ) { dataFile . close ( ) ; dataFile = null ; } FileUtil . getDefaultInstance ( ) . delete ( fileName ) ; } } catch ( Exception e ) { throw Error . error ( ErrorCode . FILE_IO_ERROR , ErrorCode . M_TextCache_purging_file_error , new Object [ ] { fileName , e } ) ; } }
|
Closes the source file and deletes it if it is not read - only .
| 118
| 17
|
154,586
|
int findNextUsedLinePos ( int pos ) { try { int firstPos = pos ; int currentPos = pos ; boolean wasCR = false ; dataFile . seek ( pos ) ; while ( true ) { int c = dataFile . read ( ) ; currentPos ++ ; switch ( c ) { case CR_CHAR : wasCR = true ; break ; case LF_CHAR : wasCR = false ; ( ( RowInputText ) rowIn ) . skippedLine ( ) ; firstPos = currentPos ; break ; case ' ' : if ( wasCR ) { wasCR = false ; ( ( RowInputText ) rowIn ) . skippedLine ( ) ; } break ; case - 1 : return - 1 ; default : return firstPos ; } } } catch ( IOException e ) { throw new HsqlException ( e . getMessage ( ) , "" , 0 ) ; } }
|
Searches from file pointer pos and finds the beginning of the first line that contains any non - space character . Increments the row counter when a blank line is skipped .
| 185
| 35
|
154,587
|
protected synchronized void saveRows ( CachedObject [ ] rows , int offset , int count ) { if ( count == 0 ) { return ; } for ( int i = offset ; i < offset + count ; i ++ ) { CachedObject r = rows [ i ] ; uncommittedCache . put ( r . getPos ( ) , r ) ; rows [ i ] = null ; } }
|
This is called internally when old rows need to be removed from the cache . Text table rows that have not been saved are those that have not been committed yet . So we don t save them but add them to the uncommitted cache until such time that they are committed or rolled back - fredt
| 83
| 60
|
154,588
|
public DoubleHistogram copy ( ) { final DoubleHistogram targetHistogram = new DoubleHistogram ( configuredHighestToLowestValueRatio , getNumberOfSignificantValueDigits ( ) ) ; targetHistogram . setTrackableValueRange ( currentLowestValueInAutoRange , currentHighestValueLimitInAutoRange ) ; integerValuesHistogram . copyInto ( targetHistogram . integerValuesHistogram ) ; return targetHistogram ; }
|
Create a copy of this histogram complete with data and everything .
| 96
| 13
|
154,589
|
public void add ( final DoubleHistogram fromHistogram ) throws ArrayIndexOutOfBoundsException { int arrayLength = fromHistogram . integerValuesHistogram . countsArrayLength ; AbstractHistogram fromIntegerHistogram = fromHistogram . integerValuesHistogram ; for ( int i = 0 ; i < arrayLength ; i ++ ) { long count = fromIntegerHistogram . getCountAtIndex ( i ) ; if ( count > 0 ) { recordValueWithCount ( fromIntegerHistogram . valueFromIndex ( i ) * fromHistogram . integerToDoubleValueConversionRatio , count ) ; } } }
|
Add the contents of another histogram to this one .
| 130
| 11
|
154,590
|
public void subtract ( final DoubleHistogram otherHistogram ) { int arrayLength = otherHistogram . integerValuesHistogram . countsArrayLength ; AbstractHistogram otherIntegerHistogram = otherHistogram . integerValuesHistogram ; for ( int i = 0 ; i < arrayLength ; i ++ ) { long otherCount = otherIntegerHistogram . getCountAtIndex ( i ) ; if ( otherCount > 0 ) { double otherValue = otherIntegerHistogram . valueFromIndex ( i ) * otherHistogram . integerToDoubleValueConversionRatio ; if ( getCountAtValue ( otherValue ) < otherCount ) { throw new IllegalArgumentException ( "otherHistogram count (" + otherCount + ") at value " + otherValue + " is larger than this one's (" + getCountAtValue ( otherValue ) + ")" ) ; } recordValueWithCount ( otherValue , - otherCount ) ; } } }
|
Subtract the contents of another histogram from this one .
| 197
| 13
|
154,591
|
public double highestEquivalentValue ( final double value ) { double nextNonEquivalentValue = nextNonEquivalentValue ( value ) ; // Theoretically, nextNonEquivalentValue - ulp(nextNonEquivalentValue) == nextNonEquivalentValue // is possible (if the ulp size switches right at nextNonEquivalentValue), so drop by 2 ulps and // increment back up to closest within-ulp value. double highestEquivalentValue = nextNonEquivalentValue - ( 2 * Math . ulp ( nextNonEquivalentValue ) ) ; while ( highestEquivalentValue + Math . ulp ( highestEquivalentValue ) < nextNonEquivalentValue ) { highestEquivalentValue += Math . ulp ( highestEquivalentValue ) ; } return highestEquivalentValue ; }
|
Get the highest value that is equivalent to the given value within the histogram s resolution . Where equivalent means that value samples recorded for any two equivalent values are counted in a common total count .
| 167
| 38
|
154,592
|
public static DoubleHistogram decodeFromCompressedByteBuffer ( final ByteBuffer buffer , final long minBarForHighestToLowestValueRatio ) throws DataFormatException { return decodeFromCompressedByteBuffer ( buffer , Histogram . class , minBarForHighestToLowestValueRatio ) ; }
|
Construct a new DoubleHistogram by decoding it from a compressed form in a ByteBuffer .
| 65
| 18
|
154,593
|
public Object getValueAt ( int row , int col ) { if ( row >= rows . size ( ) ) { return null ; } Object [ ] colArray = ( Object [ ] ) rows . elementAt ( row ) ; if ( col >= colArray . length ) { return null ; } return colArray [ col ] ; }
|
Get the object at the specified cell location .
| 69
| 9
|
154,594
|
public void setHead ( Object [ ] h ) { headers = new Object [ h . length ] ; // System.arraycopy(h, 0, headers, 0, h.length); for ( int i = 0 ; i < h . length ; i ++ ) { headers [ i ] = h [ i ] ; } }
|
Set the name of the column headings .
| 68
| 9
|
154,595
|
public void addRow ( Object [ ] r ) { Object [ ] row = new Object [ r . length ] ; // System.arraycopy(r, 0, row, 0, r.length); for ( int i = 0 ; i < r . length ; i ++ ) { row [ i ] = r [ i ] ; if ( row [ i ] == null ) { // row[i] = "(null)"; } } rows . addElement ( row ) ; }
|
Append a tuple to the end of the table .
| 100
| 11
|
154,596
|
public Object [ ] getCurrent ( ) { if ( currentPos < 0 || currentPos >= size ) { return null ; } if ( currentPos == currentOffset + table . length ) { getBlock ( currentOffset + table . length ) ; } return table [ currentPos - currentOffset ] ; }
|
Returns the current row object . Type of object is implementation defined .
| 62
| 13
|
154,597
|
void getBlock ( int offset ) { try { RowSetNavigatorClient source = session . getRows ( id , offset , baseBlockSize ) ; table = source . table ; currentOffset = source . currentOffset ; } catch ( HsqlException e ) { } }
|
baseBlockSize remains unchanged .
| 57
| 6
|
154,598
|
@ Override public ResultSet getBestRowIdentifier ( String catalog , String schema , String table , int scope , boolean nullable ) throws SQLException { checkClosed ( ) ; throw SQLError . noSupport ( ) ; }
|
Retrieves a description of a table s optimal set of columns that uniquely identifies a row .
| 52
| 19
|
154,599
|
@ Override public ResultSet getCatalogs ( ) throws SQLException { checkClosed ( ) ; VoltTable result = new VoltTable ( new VoltTable . ColumnInfo ( "TABLE_CAT" , VoltType . STRING ) ) ; result . addRow ( new Object [ ] { catalogString } ) ; return new JDBC4ResultSet ( null , result ) ; }
|
Retrieves the catalog names available in this database .
| 82
| 11
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.