idx
int64
0
165k
question
stringlengths
73
4.15k
target
stringlengths
5
918
len_question
int64
21
890
len_target
int64
3
255
155,800
public static int findNot ( int [ ] array , int value ) { for ( int i = 0 ; i < array . length ; i ++ ) { if ( array [ i ] != value ) { return i ; } } return - 1 ; }
Finds the first element of the array that is not equal to the given value .
52
17
155,801
public static boolean areEqualSets ( int [ ] arra , int [ ] arrb ) { return arra . length == arrb . length && ArrayUtil . haveEqualSets ( arra , arrb , arra . length ) ; }
Returns true if arra and arrb contain the same set of integers not necessarily in the same order . This implies the arrays are of the same length .
52
30
155,802
public static boolean haveEqualArrays ( int [ ] arra , int [ ] arrb , int count ) { if ( count > arra . length || count > arrb . length ) { return false ; } for ( int j = 0 ; j < count ; j ++ ) { if ( arra [ j ] != arrb [ j ] ) { return false ; } } return true ; }
Returns true if the first count elements of arra and arrb are identical subarrays of integers
82
19
155,803
public static boolean haveEqualArrays ( Object [ ] arra , Object [ ] arrb , int count ) { if ( count > arra . length || count > arrb . length ) { return false ; } for ( int j = 0 ; j < count ; j ++ ) { if ( arra [ j ] != arrb [ j ] ) { if ( arra [ j ] == null || ! arra [ j ] . equals ( arrb [ j ] ) ) { return false ; } } } return true ; }
Returns true if the first count elements of arra and arrb are identical subarrays of Objects
108
19
155,804
public static int countSameElements ( byte [ ] arra , int start , byte [ ] arrb ) { int k = 0 ; int limit = arra . length - start ; if ( limit > arrb . length ) { limit = arrb . length ; } for ( int i = 0 ; i < limit ; i ++ ) { if ( arra [ i + start ] == arrb [ i ] ) { k ++ ; } else { break ; } } return k ; }
Returns the count of elements in arra from position start that are sequentially equal to the elements of arrb .
100
22
155,805
public static int find ( byte [ ] arra , int start , int limit , byte [ ] arrb ) { int k = start ; limit = limit - arrb . length + 1 ; int value = arrb [ 0 ] ; for ( ; k < limit ; k ++ ) { if ( arra [ k ] == value ) { if ( arrb . length == 1 ) { return k ; } if ( containsAt ( arra , k , arrb ) ) { return k ; } } } return - 1 ; }
Returns the index of the first occurence of arrb in arra . Or - 1 if not found .
108
22
155,806
public static int find ( byte [ ] arra , int start , int limit , int b , int c ) { int k = 0 ; for ( ; k < limit ; k ++ ) { if ( arra [ k ] == b || arra [ k ] == c ) { return k ; } } return - 1 ; }
Returns the index of b or c in arra . Or - 1 if not found .
66
17
155,807
public static void intIndexesToBooleanArray ( int [ ] arra , boolean [ ] arrb ) { for ( int i = 0 ; i < arra . length ; i ++ ) { if ( arra [ i ] < arrb . length ) { arrb [ arra [ i ] ] = true ; } } }
Set elements of arrb true if their indexes appear in arrb .
67
14
155,808
public static boolean containsAllTrueElements ( boolean [ ] arra , boolean [ ] arrb ) { for ( int i = 0 ; i < arra . length ; i ++ ) { if ( arrb [ i ] && ! arra [ i ] ) { return false ; } } return true ; }
Return true if for each true element in arrb the corresponding element in arra is true
62
17
155,809
public static int countTrueElements ( boolean [ ] arra ) { int count = 0 ; for ( int i = 0 ; i < arra . length ; i ++ ) { if ( arra [ i ] ) { count ++ ; } } return count ; }
Return count of true elements in array
53
7
155,810
public static boolean hasNull ( Object [ ] array , int [ ] columnMap ) { int count = columnMap . length ; for ( int i = 0 ; i < count ; i ++ ) { if ( array [ columnMap [ i ] ] == null ) { return true ; } } return false ; }
Determines if the array has a null column for any of the positions given in the rowColMap array .
64
23
155,811
public static boolean containsAt ( byte [ ] arra , int start , byte [ ] arrb ) { return countSameElements ( arra , start , arrb ) == arrb . length ; }
Returns true if arra from position start contains all elements of arrb in sequential order .
41
17
155,812
public static int countStartElementsAt ( byte [ ] arra , int start , byte [ ] arrb ) { int k = 0 ; mainloop : for ( int i = start ; i < arra . length ; i ++ ) { for ( int j = 0 ; j < arrb . length ; j ++ ) { if ( arra [ i ] == arrb [ j ] ) { k ++ ; continue mainloop ; } } break ; } return k ; }
Returns the count of elements in arra from position start that are among the elements of arrb . Stops at any element not in arrb .
97
29
155,813
public static int [ ] arraySlice ( int [ ] source , int start , int count ) { int [ ] slice = new int [ count ] ; System . arraycopy ( source , start , slice , 0 , count ) ; return slice ; }
Returns a range of elements of source from start to end of the array .
52
15
155,814
public static void fillArray ( Object [ ] array , Object value ) { int to = array . length ; while ( -- to >= 0 ) { array [ to ] = value ; } }
Fills the array with a value .
39
8
155,815
public static Object duplicateArray ( Object source ) { int size = Array . getLength ( source ) ; Object newarray = Array . newInstance ( source . getClass ( ) . getComponentType ( ) , size ) ; System . arraycopy ( source , 0 , newarray , 0 , size ) ; return newarray ; }
Returns a duplicates of an array .
68
8
155,816
public static Object resizeArrayIfDifferent ( Object source , int newsize ) { int oldsize = Array . getLength ( source ) ; if ( oldsize == newsize ) { return source ; } Object newarray = Array . newInstance ( source . getClass ( ) . getComponentType ( ) , newsize ) ; if ( oldsize < newsize ) { newsize = oldsize ; } System . arraycopy ( source , 0 , newarray , 0 , newsize ) ; return newarray ; }
Returns the given array if newsize is the same as existing . Returns a new array of given size containing as many elements of the original array as it can hold .
106
33
155,817
public static void copyAdjustArray ( Object source , Object dest , Object addition , int colindex , int adjust ) { int length = Array . getLength ( source ) ; if ( colindex < 0 ) { System . arraycopy ( source , 0 , dest , 0 , length ) ; return ; } System . arraycopy ( source , 0 , dest , 0 , colindex ) ; if ( adjust == 0 ) { int endcount = length - colindex - 1 ; Array . set ( dest , colindex , addition ) ; if ( endcount > 0 ) { System . arraycopy ( source , colindex + 1 , dest , colindex + 1 , endcount ) ; } } else if ( adjust < 0 ) { int endcount = length - colindex - 1 ; if ( endcount > 0 ) { System . arraycopy ( source , colindex + 1 , dest , colindex , endcount ) ; } } else { int endcount = length - colindex ; Array . set ( dest , colindex , addition ) ; if ( endcount > 0 ) { System . arraycopy ( source , colindex , dest , colindex + 1 , endcount ) ; } } }
Copies elements of source to dest . If adjust is - 1 the element at colindex is not copied . If adjust is + 1 that element is filled with the Object addition . All the rest of the elements in source are shifted left or right accordingly when they are copied . If adjust is 0 the addition is copied over the element at colindex .
247
70
155,818
private static ColumnInfo [ ] prependColumn ( ColumnInfo firstColumn , ColumnInfo [ ] columns ) { int allLen = 1 + columns . length ; ColumnInfo [ ] allColumns = new ColumnInfo [ allLen ] ; allColumns [ 0 ] = firstColumn ; for ( int i = 0 ; i < columns . length ; i ++ ) { allColumns [ i + 1 ] = columns [ i ] ; } return allColumns ; }
Given a column and an array of columns return a new array of columns with the single guy prepended onto the others . This function is used in the constructor below so that one constructor can call another without breaking Java rules about chained constructors being the first thing called .
96
53
155,819
public final String getColumnName ( int index ) { assert ( verifyTableInvariants ( ) ) ; if ( ( index < 0 ) || ( index >= m_colCount ) ) { throw new IllegalArgumentException ( "Not a valid column index." ) ; } // move to the start of the list of column names int pos = POS_COL_TYPES + m_colCount ; String name = null ; for ( int i = 0 ; i < index ; i ++ ) { pos += m_buffer . getInt ( pos ) + 4 ; } name = readString ( pos , METADATA_ENCODING ) ; assert ( name != null ) ; assert ( verifyTableInvariants ( ) ) ; return name ; }
Return the name of the column with the specified index .
157
11
155,820
public final void addRow ( Object ... values ) { assert ( verifyTableInvariants ( ) ) ; if ( m_readOnly ) { throw new IllegalStateException ( "Table is read-only. Make a copy before changing." ) ; } if ( m_colCount == 0 ) { throw new IllegalStateException ( "Table has no columns defined" ) ; } if ( values . length != m_colCount ) { throw new IllegalArgumentException ( values . length + " arguments but table has " + m_colCount + " columns" ) ; } // memoize the start of this row in case we roll back final int pos = m_buffer . position ( ) ; try { // Allow the buffer to grow to max capacity m_buffer . limit ( m_buffer . capacity ( ) ) ; // advance the row size value m_buffer . position ( pos + 4 ) ; // where does the type bytes start // skip rowstart + status code + colcount int typePos = POS_COL_TYPES ; for ( int col = 0 ; col < m_colCount ; col ++ ) { Object value = values [ col ] ; VoltType columnType = VoltType . get ( m_buffer . get ( typePos + col ) ) ; addColumnValue ( value , columnType , col ) ; } // // Note, there is some near-identical code in both row add methods. // [ add(..) and addRow(..) ] // If you change code below here, change it in the other method too. // (It would be nice to re-factor, but I couldn't make a clean go at // it quickly - Hugg) // final int rowsize = m_buffer . position ( ) - pos - 4 ; assert ( rowsize >= 0 ) ; // check for too big rows if ( rowsize > VoltTableRow . MAX_TUPLE_LENGTH ) { throw new VoltOverflowException ( "Table row total length larger than allowed max " + VoltTableRow . MAX_TUPLE_LENGTH_STR ) ; } // buffer overflow is caught and handled below. m_buffer . putInt ( pos , rowsize ) ; m_rowCount ++ ; m_buffer . putInt ( m_rowStart , m_rowCount ) ; } catch ( VoltTypeException vte ) { // revert the row size advance and any other // buffer additions m_buffer . position ( pos ) ; throw vte ; } catch ( BufferOverflowException e ) { m_buffer . position ( pos ) ; expandBuffer ( ) ; addRow ( values ) ; } // row was too big, reset and rethrow catch ( VoltOverflowException e ) { m_buffer . position ( pos ) ; throw e ; } catch ( IllegalArgumentException e ) { m_buffer . position ( pos ) ; // if this was thrown because of a lack of space // then grow the buffer // the number 32 was picked out of a hat ( maybe a bug if str > 32 ) if ( m_buffer . limit ( ) - m_buffer . position ( ) < 32 ) { expandBuffer ( ) ; addRow ( values ) ; } else { throw e ; } } finally { // constrain buffer limit back to the new position m_buffer . limit ( m_buffer . position ( ) ) ; } assert ( verifyTableInvariants ( ) ) ; }
Append a new row to the table using the supplied column values .
715
14
155,821
public static String varbinaryToPrintableString ( byte [ ] bin ) { PureJavaCrc32 crc = new PureJavaCrc32 ( ) ; StringBuilder sb = new StringBuilder ( ) ; sb . append ( "bin[crc:" ) ; crc . update ( bin ) ; sb . append ( crc . getValue ( ) ) ; sb . append ( ",value:0x" ) ; String hex = Encoder . hexEncode ( bin ) ; if ( hex . length ( ) > 13 ) { sb . append ( hex . substring ( 0 , 10 ) ) ; sb . append ( "..." ) ; } else { sb . append ( hex ) ; } sb . append ( "]" ) ; return sb . toString ( ) ; }
Make a printable short string for a varbinary . String includes a CRC and the contents of the varbinary in hex . Contents longer than 13 chars are truncated and elipsized . Yes elipsized is totally a word .
172
47
155,822
@ Override public String toJSONString ( ) { JSONStringer js = new JSONStringer ( ) ; try { js . object ( ) ; // status code (1 byte) js . keySymbolValuePair ( JSON_STATUS_KEY , getStatusCode ( ) ) ; // column schema js . key ( JSON_SCHEMA_KEY ) . array ( ) ; for ( int i = 0 ; i < getColumnCount ( ) ; i ++ ) { js . object ( ) ; js . keySymbolValuePair ( JSON_NAME_KEY , getColumnName ( i ) ) ; js . keySymbolValuePair ( JSON_TYPE_KEY , getColumnType ( i ) . getValue ( ) ) ; js . endObject ( ) ; } js . endArray ( ) ; // row data js . key ( JSON_DATA_KEY ) . array ( ) ; VoltTableRow row = cloneRow ( ) ; row . resetRowPosition ( ) ; while ( row . advanceRow ( ) ) { js . array ( ) ; for ( int i = 0 ; i < getColumnCount ( ) ; i ++ ) { row . putJSONRep ( i , js ) ; } js . endArray ( ) ; } js . endArray ( ) ; js . endObject ( ) ; } catch ( JSONException e ) { e . printStackTrace ( ) ; throw new RuntimeException ( "Failed to serialized a table to JSON." , e ) ; } return js . toString ( ) ; }
Get a JSON representation of this table .
324
8
155,823
public static VoltTable fromJSONString ( String json ) throws JSONException , IOException { JSONObject jsonObj = new JSONObject ( json ) ; return fromJSONObject ( jsonObj ) ; }
Construct a table from a JSON string . Only parses VoltDB VoltTable JSON format .
40
18
155,824
VoltTable semiDeepCopy ( ) { assert ( verifyTableInvariants ( ) ) ; // share the immutable metadata if it's present for tests final VoltTable cloned = new VoltTable ( m_extraMetadata ) ; cloned . m_colCount = m_colCount ; cloned . m_rowCount = m_rowCount ; cloned . m_rowStart = m_rowStart ; cloned . m_buffer = m_buffer . duplicate ( ) ; cloned . m_activeRowIndex = m_activeRowIndex ; cloned . m_hasCalculatedOffsets = m_hasCalculatedOffsets ; cloned . m_memoizedBufferOffset = m_memoizedBufferOffset ; cloned . m_memoizedRowOffset = m_memoizedRowOffset ; cloned . m_offsets = m_offsets == null ? null : m_offsets . clone ( ) ; cloned . m_position = m_position ; cloned . m_schemaString = m_schemaString == null ? null : m_schemaString . clone ( ) ; cloned . m_wasNull = m_wasNull ; // make the new table read only cloned . m_readOnly = true ; assert ( verifyTableInvariants ( ) ) ; assert ( cloned . verifyTableInvariants ( ) ) ; return cloned ; }
Non - public method to duplicate a table . It s possible this might be useful to end - users of VoltDB but we should talk about naming and semantics first don t just make this public .
300
39
155,825
public ColumnInfo [ ] getTableSchema ( ) { ColumnInfo [ ] schema = new ColumnInfo [ m_colCount ] ; for ( int i = 0 ; i < m_colCount ; i ++ ) { ColumnInfo col = new ColumnInfo ( getColumnName ( i ) , getColumnType ( i ) ) ; schema [ i ] = col ; } return schema ; }
Get the schema of the table . Can be fed into another table s constructor .
81
16
155,826
@ Override public void checkProcessorConfig ( Properties properties ) { String exportClientClass = properties . getProperty ( EXPORT_TO_TYPE ) ; Preconditions . checkNotNull ( exportClientClass , "export to type is undefined or custom export plugin class missing." ) ; try { final Class < ? > clientClass = Class . forName ( exportClientClass ) ; ExportClientBase client = ( ExportClientBase ) clientClass . newInstance ( ) ; client . configure ( properties ) ; } catch ( Throwable t ) { throw new RuntimeException ( t ) ; } }
Pass processor specific processor configuration properties for checking
122
8
155,827
private long extractCommittedSpHandle ( ExportRow row , long committedSeqNo ) { long ret = 0 ; if ( committedSeqNo == ExportDataSource . NULL_COMMITTED_SEQNO ) { return ret ; } // Get the rows's sequence number (3rd column) long seqNo = ( long ) row . values [ 2 ] ; if ( seqNo != committedSeqNo ) { return ret ; } // Get the row's sp handle (1rst column) ret = ( long ) row . values [ 0 ] ; return ret ; }
If the row is the last committed row return the SpHandle otherwise return 0
121
15
155,828
public void processMaterializedViewWarnings ( Database db , HashMap < Table , String > matViewMap ) throws VoltCompilerException { for ( Table table : db . getTables ( ) ) { for ( MaterializedViewInfo mvInfo : table . getViews ( ) ) { for ( Statement stmt : mvInfo . getFallbackquerystmts ( ) ) { // If there is any statement in the fallBackQueryStmts map, then // there must be some min/max columns. // Only check if the plan uses index scan. if ( needsWarningForSingleTableView ( getPlanNodeTreeFromCatalogStatement ( db , stmt ) ) ) { // If we are using IS NOT DISTINCT FROM as our equality operator (which is necessary // to get correct answers), then there will often be no index scans in the plan, // since we cannot optimize IS NOT DISTINCT FROM. m_compiler . addWarn ( "No index found to support UPDATE and DELETE on some of the min() / max() columns " + "in the materialized view " + mvInfo . getTypeName ( ) + ", and a sequential scan might be issued when current min / max value is updated / deleted." ) ; break ; } } } // If it's a view on join query case, we check if the join can utilize indices. // We throw out warning only if no index scan is used in the plan (ENG-10864). MaterializedViewHandlerInfo mvHandlerInfo = table . getMvhandlerinfo ( ) . get ( "mvHandlerInfo" ) ; if ( mvHandlerInfo != null ) { Statement createQueryStatement = mvHandlerInfo . getCreatequery ( ) . get ( "createQuery" ) ; if ( needsWarningForJoinQueryView ( getPlanNodeTreeFromCatalogStatement ( db , createQueryStatement ) ) ) { m_compiler . addWarn ( "No index found to support some of the join operations required to refresh the materialized view " + table . getTypeName ( ) + ". The refreshing may be slow." ) ; } } } }
Process materialized view warnings .
454
6
155,829
public static MaterializedViewInfo getMaterializedViewInfo ( Table tbl ) { MaterializedViewInfo mvInfo = null ; Table source = tbl . getMaterializer ( ) ; if ( source != null ) { mvInfo = source . getViews ( ) . get ( tbl . getTypeName ( ) ) ; } return mvInfo ; }
If the argument table is a single - table materialized view then return the attendant MaterializedViewInfo object . Otherwise return null .
77
26
155,830
public static long getFragmentIdForPlanHash ( byte [ ] planHash ) { Sha1Wrapper key = new Sha1Wrapper ( planHash ) ; FragInfo frag = null ; synchronized ( FragInfo . class ) { frag = m_plansByHash . get ( key ) ; } assert ( frag != null ) ; return frag . fragId ; }
Get the site - local fragment id for a given plan identified by 20 - byte sha - 1 hash
77
21
155,831
public static String getStmtTextForPlanHash ( byte [ ] planHash ) { Sha1Wrapper key = new Sha1Wrapper ( planHash ) ; FragInfo frag = null ; synchronized ( FragInfo . class ) { frag = m_plansByHash . get ( key ) ; } assert ( frag != null ) ; // SQL statement text is not stored in the repository for ad hoc statements // -- it may be inaccurate because we parameterize the statement on its constants. // Callers know if they are asking about ad hoc or pre-planned fragments, // and shouldn't call this method for the ad hoc case. assert ( frag . stmtText != null ) ; return frag . stmtText ; }
Get the statement text for the fragment identified by its hash
149
11
155,832
public static long loadOrAddRefPlanFragment ( byte [ ] planHash , byte [ ] plan , String stmtText ) { Sha1Wrapper key = new Sha1Wrapper ( planHash ) ; synchronized ( FragInfo . class ) { FragInfo frag = m_plansByHash . get ( key ) ; if ( frag == null ) { frag = new FragInfo ( key , plan , m_nextFragId ++ , stmtText ) ; m_plansByHash . put ( frag . hash , frag ) ; m_plansById . put ( frag . fragId , frag ) ; if ( m_plansById . size ( ) > ExecutionEngine . EE_PLAN_CACHE_SIZE ) { evictLRUfragment ( ) ; } } // Bit of a hack to work around an issue where a statement-less adhoc // fragment could be identical to a statement-needing regular procedure. // This doesn't really address the broader issue that fragment hashes // are not 1-1 with SQL statements. if ( frag . stmtText == null ) { frag . stmtText = stmtText ; } // The fragment MAY be in the LRU map. // An incremented refCount is a lazy way to keep it safe from eviction // without having to update the map. // This optimizes for popular fragments in a small or stable cache that may be reused // many times before the eviction process needs to take any notice. frag . refCount ++ ; return frag . fragId ; } }
Get the site - local fragment id for a given plan identified by 20 - byte sha - 1 hash If the plan isn t known to this SPC load it up . Otherwise addref it .
322
40
155,833
public static byte [ ] planForFragmentId ( long fragmentId ) { assert ( fragmentId > 0 ) ; FragInfo frag = null ; synchronized ( FragInfo . class ) { frag = m_plansById . get ( fragmentId ) ; } assert ( frag != null ) ; return frag . plan ; }
Get the full JSON plan associated with a given site - local fragment id . Called by the EE
66
19
155,834
public List < AbstractExpression > bindingToIndexedExpression ( AbstractExpression expr ) { // Defer the result construction for as long as possible on the // assumption that this function mostly gets applied to eliminate // negative cases. if ( m_type != expr . m_type ) { // The only allowed difference in expression types is between a // parameter and its original constant value. // That's handled in the independent override. return null ; } // From here, this is much like the straight equality check, // except that this function and "equals" must each call themselves // in their recursions. // Delegating to this factored-out component of the "equals" // implementation eases simultaneous refinement of both methods. if ( ! hasEqualAttributes ( expr ) ) { return null ; } // The derived classes have verified that any added attributes // are identical. // Check that the presence, or lack, of children is the same if ( ( expr . m_left == null ) != ( m_left == null ) ) { return null ; } if ( ( expr . m_right == null ) != ( m_right == null ) ) { return null ; } if ( ( expr . m_args == null ) != ( m_args == null ) ) { return null ; } // Check that the children identify themselves as matching List < AbstractExpression > leftBindings = null ; if ( m_left != null ) { leftBindings = m_left . bindingToIndexedExpression ( expr . m_left ) ; if ( leftBindings == null ) { return null ; } } List < AbstractExpression > rightBindings = null ; if ( m_right != null ) { rightBindings = m_right . bindingToIndexedExpression ( expr . m_right ) ; if ( rightBindings == null ) { return null ; } } List < AbstractExpression > argBindings = null ; if ( m_args != null ) { if ( m_args . size ( ) != expr . m_args . size ( ) ) { return null ; } argBindings = new ArrayList <> ( ) ; int ii = 0 ; // iterate the args lists in parallel, binding pairwise for ( AbstractExpression rhs : expr . m_args ) { AbstractExpression lhs = m_args . get ( ii ++ ) ; List < AbstractExpression > moreBindings = lhs . bindingToIndexedExpression ( rhs ) ; if ( moreBindings == null ) { // fail on any non-match return null ; } argBindings . addAll ( moreBindings ) ; } } // It's a match, so gather up the details. // It's rare (if even possible) for the same bound parameter to get // listed twice, so don't worry about duplicate entries, here. // That should not cause any issue for the caller. List < AbstractExpression > result = new ArrayList <> ( ) ; if ( leftBindings != null ) { // null here can only mean no left child result . addAll ( leftBindings ) ; } if ( rightBindings != null ) { // null here can only mean no right child result . addAll ( rightBindings ) ; } if ( argBindings != null ) { // null here can only mean no args result . addAll ( argBindings ) ; } return result ; }
strict expression equality that didn t involve parameters .
717
10
155,835
public static void toJSONArrayFromSortList ( JSONStringer stringer , List < AbstractExpression > sortExpressions , List < SortDirectionType > sortDirections ) throws JSONException { stringer . key ( SortMembers . SORT_COLUMNS ) ; stringer . array ( ) ; int listSize = sortExpressions . size ( ) ; for ( int ii = 0 ; ii < listSize ; ii ++ ) { stringer . object ( ) ; stringer . key ( SortMembers . SORT_EXPRESSION ) . object ( ) ; sortExpressions . get ( ii ) . toJSONString ( stringer ) ; stringer . endObject ( ) ; if ( sortDirections != null ) { stringer . keySymbolValuePair ( SortMembers . SORT_DIRECTION , sortDirections . get ( ii ) . toString ( ) ) ; } stringer . endObject ( ) ; } stringer . endArray ( ) ; }
Given a JSONStringer and a sequence of sort expressions and directions serialize the sort expressions . These will be in an array which is the value of SortMembers . SORT_COLUMNS in the current object of the JSONString . The JSONString should be in object state not array state .
207
60
155,836
public static void loadSortListFromJSONArray ( List < AbstractExpression > sortExpressions , List < SortDirectionType > sortDirections , JSONObject jobj ) throws JSONException { if ( jobj . has ( SortMembers . SORT_COLUMNS ) ) { sortExpressions . clear ( ) ; if ( sortDirections != null ) { sortDirections . clear ( ) ; } JSONArray jarray = jobj . getJSONArray ( SortMembers . SORT_COLUMNS ) ; int size = jarray . length ( ) ; for ( int ii = 0 ; ii < size ; ++ ii ) { JSONObject tempObj = jarray . getJSONObject ( ii ) ; sortExpressions . add ( fromJSONChild ( tempObj , SortMembers . SORT_EXPRESSION ) ) ; if ( sortDirections == null || ! tempObj . has ( SortMembers . SORT_DIRECTION ) ) { continue ; } String sdAsString = tempObj . getString ( SortMembers . SORT_DIRECTION ) ; sortDirections . add ( SortDirectionType . get ( sdAsString ) ) ; } } assert ( sortDirections == null || sortExpressions . size ( ) == sortDirections . size ( ) ) ; }
Load two lists from a JSONObject . One list is for sort expressions and the other is for sort directions . The lists are cleared before they are filled in . This is the inverse of toJSONArrayFromSortList .
271
44
155,837
public static List < AbstractExpression > loadFromJSONArrayChild ( List < AbstractExpression > starter , JSONObject parent , String label , StmtTableScan tableScan ) throws JSONException { if ( parent . isNull ( label ) ) { return null ; } JSONArray jarray = parent . getJSONArray ( label ) ; return loadFromJSONArray ( starter , jarray , tableScan ) ; }
For TVEs it is only serialized column index and table index . In order to match expression there needs more information to revert back the table name table alisa and column name . By adding
85
38
155,838
public AbstractExpression replaceWithTVE ( Map < AbstractExpression , Integer > aggTableIndexMap , Map < Integer , ParsedColInfo > indexToColumnMap ) { Integer ii = aggTableIndexMap . get ( this ) ; if ( ii != null ) { ParsedColInfo col = indexToColumnMap . get ( ii ) ; TupleValueExpression tve = new TupleValueExpression ( col . m_tableName , col . m_tableAlias , col . m_columnName , col . m_alias , this , ii ) ; if ( this instanceof TupleValueExpression ) { tve . setOrigStmtId ( ( ( TupleValueExpression ) this ) . getOrigStmtId ( ) ) ; } // To prevent pushdown of LIMIT when ORDER BY references an agg. ENG-3487. if ( hasAnySubexpressionOfClass ( AggregateExpression . class ) ) { tve . setHasAggregate ( true ) ; } return tve ; } AbstractExpression lnode = null ; AbstractExpression rnode = null ; if ( m_left != null ) { lnode = m_left . replaceWithTVE ( aggTableIndexMap , indexToColumnMap ) ; } if ( m_right != null ) { rnode = m_right . replaceWithTVE ( aggTableIndexMap , indexToColumnMap ) ; } ArrayList < AbstractExpression > newArgs = null ; boolean changed = false ; if ( m_args != null ) { newArgs = new ArrayList <> ( ) ; for ( AbstractExpression expr : m_args ) { AbstractExpression ex = expr . replaceWithTVE ( aggTableIndexMap , indexToColumnMap ) ; newArgs . add ( ex ) ; if ( ex != expr ) { changed = true ; } } } if ( m_left != lnode || m_right != rnode || changed ) { AbstractExpression resExpr = clone ( ) ; resExpr . setLeft ( lnode ) ; resExpr . setRight ( rnode ) ; resExpr . setArgs ( newArgs ) ; return resExpr ; } return this ; }
This function recursively replaces any subexpression matching an entry in aggTableIndexMap with an equivalent TVE . Its column index and alias are also built up here .
466
34
155,839
public boolean hasAnySubexpressionWithPredicate ( SubexprFinderPredicate pred ) { if ( pred . matches ( this ) ) { return true ; } if ( m_left != null && m_left . hasAnySubexpressionWithPredicate ( pred ) ) { return true ; } if ( m_right != null && m_right . hasAnySubexpressionWithPredicate ( pred ) ) { return true ; } if ( m_args != null ) { for ( AbstractExpression argument : m_args ) { if ( argument . hasAnySubexpressionWithPredicate ( pred ) ) { return true ; } } } return false ; }
Searches the expression tree rooted at this for nodes for which pred evaluates to true .
136
18
155,840
void refineOperandType ( VoltType valueType ) { if ( m_valueType != VoltType . NUMERIC ) { return ; } if ( valueType == VoltType . DECIMAL ) { m_valueType = VoltType . DECIMAL ; m_valueSize = VoltType . DECIMAL . getLengthInBytesForFixedTypes ( ) ; } else { m_valueType = VoltType . FLOAT ; m_valueSize = VoltType . FLOAT . getLengthInBytesForFixedTypes ( ) ; } }
Helper function to patch up NUMERIC typed constant operands and the functions and operators that they parameterize .
116
22
155,841
protected final void finalizeChildValueTypes ( ) { if ( m_left != null ) { m_left . finalizeValueTypes ( ) ; updateContentDeterminismMessage ( m_left . getContentDeterminismMessage ( ) ) ; } if ( m_right != null ) { m_right . finalizeValueTypes ( ) ; updateContentDeterminismMessage ( m_right . getContentDeterminismMessage ( ) ) ; } if ( m_args != null ) { for ( AbstractExpression argument : m_args ) { argument . finalizeValueTypes ( ) ; updateContentDeterminismMessage ( argument . getContentDeterminismMessage ( ) ) ; } } }
Do the recursive part of finalizeValueTypes as requested . Note that this updates the content non - determinism state .
147
24
155,842
protected final void resolveChildrenForTable ( Table table ) { if ( m_left != null ) { m_left . resolveForTable ( table ) ; } if ( m_right != null ) { m_right . resolveForTable ( table ) ; } if ( m_args != null ) { for ( AbstractExpression argument : m_args ) { argument . resolveForTable ( table ) ; } } }
Walk the expression tree resolving TVEs and function expressions as we go .
87
14
155,843
public boolean isValidExprForIndexesAndMVs ( StringBuffer msg , boolean isMV ) { if ( containsFunctionById ( FunctionSQL . voltGetCurrentTimestampId ( ) ) ) { msg . append ( "cannot include the function NOW or CURRENT_TIMESTAMP." ) ; return false ; } else if ( hasAnySubexpressionOfClass ( AggregateExpression . class ) ) { msg . append ( "cannot contain aggregate expressions." ) ; return false ; } else if ( hasAnySubexpressionOfClass ( AbstractSubqueryExpression . class ) ) { // There may not be any of these in HSQL1.9.3b. However, in // HSQL2.3.2 subqueries are stored as expressions. So, we may // find some here. We will keep it here for the moment. if ( isMV ) { msg . append ( "cannot contain subquery sources." ) ; } else { msg . append ( "cannot contain subqueries." ) ; } return false ; } else if ( hasUserDefinedFunctionExpression ( ) ) { msg . append ( "cannot contain calls to user defined functions." ) ; return false ; } else { return true ; } }
Return true if the given expression usable as part of an index or MV s group by and where clause expression . If false put the tail of an error message in the string buffer . The string buffer will be initialized with the name of the index .
264
49
155,844
public static boolean validateExprsForIndexesAndMVs ( List < AbstractExpression > checkList , StringBuffer msg , boolean isMV ) { for ( AbstractExpression expr : checkList ) { if ( ! expr . isValidExprForIndexesAndMVs ( msg , isMV ) ) { return false ; } } return true ; }
Return true if the all of the expressions in the list can be part of an index expression or in group by and where clause of MV . As with validateExprForIndexesAndMVs for individual expression the StringBuffer parameter msg contains the name of the index . Error messages should be appended to it .
77
63
155,845
private boolean containsFunctionById ( int functionId ) { if ( this instanceof AbstractValueExpression ) { return false ; } List < AbstractExpression > functionsList = findAllFunctionSubexpressions ( ) ; for ( AbstractExpression funcExpr : functionsList ) { assert ( funcExpr instanceof FunctionExpression ) ; if ( ( ( FunctionExpression ) funcExpr ) . hasFunctionId ( functionId ) ) { return true ; } } return false ; }
This function will recursively find any function expression with ID functionId . If found return true . Otherwise return false .
100
24
155,846
public boolean isValueTypeIndexable ( StringBuffer msg ) { if ( ! m_valueType . isIndexable ( ) ) { msg . append ( "expression of type " + m_valueType . getName ( ) ) ; return false ; } return true ; }
Returns true iff the expression is indexable . If the expression is not indexable expression information gets populated in the msg string buffer passed in .
57
29
155,847
public boolean isValueTypeUniqueIndexable ( StringBuffer msg ) { // This call to isValueTypeIndexable is needed because // all comparison, all conjunction, and some operator expressions // need to refine it to compensate for their false claims that // their value types (actually non-indexable boolean) is BIGINT. // that their value type is actually boolean. // If they were fixed, isValueTypeIndexable and // isValueTypeUniqueIndexable could be replaced by VoltType functions. if ( ! isValueTypeIndexable ( msg ) ) { return false ; } if ( ! m_valueType . isUniqueIndexable ( ) ) { msg . append ( "expression of type " + m_valueType . getName ( ) ) ; return false ; } return true ; }
Returns true iff the expression is indexable in a unique index . If the expression is not indexable expression information gets populated in the msg string buffer passed in .
163
33
155,848
public void findUnsafeOperatorsForDDL ( UnsafeOperatorsForDDL ops ) { if ( ! m_type . isSafeForDDL ( ) ) { ops . add ( m_type . symbol ( ) ) ; } if ( m_left != null ) { m_left . findUnsafeOperatorsForDDL ( ops ) ; } if ( m_right != null ) { m_right . findUnsafeOperatorsForDDL ( ops ) ; } if ( m_args != null ) { for ( AbstractExpression arg : m_args ) { arg . findUnsafeOperatorsForDDL ( ops ) ; } } }
Returns true iff this expression is allowable when creating materialized views on nonempty tables . We have marked all the ExpressionType enumerals and all the function id integers which are safe . These are marked statically . So we just recurse through the tree looking at operation types and function types until we find something we don t like . If we get all the way through the search we are happy and return true .
141
82
155,849
public AbstractExpression getFirstArgument ( ) { if ( m_left != null ) { assert ( m_args == null ) ; return m_left ; } if ( m_args != null && m_args . size ( ) > 0 ) { assert ( m_left == null && m_right == null ) ; return m_args . get ( 0 ) ; } return null ; }
Ferret out the first argument . This can be m_left or else the first element of m_args .
84
23
155,850
public static byte [ ] getHashedPassword ( ClientAuthScheme scheme , String password ) { if ( password == null ) { return null ; } MessageDigest md = null ; try { md = MessageDigest . getInstance ( ClientAuthScheme . getDigestScheme ( scheme ) ) ; } catch ( NoSuchAlgorithmException e ) { e . printStackTrace ( ) ; System . exit ( - 1 ) ; } byte hashedPassword [ ] = null ; hashedPassword = md . digest ( password . getBytes ( Constants . UTF8ENCODING ) ) ; return hashedPassword ; }
Get a hashed password using SHA - 1 in a consistent way .
132
14
155,851
public static Object [ ] getAuthenticatedConnection ( String host , String username , byte [ ] hashedPassword , int port , final Subject subject , ClientAuthScheme scheme , long timeoutMillis ) throws IOException { String service = subject == null ? "database" : Constants . KERBEROS ; return getAuthenticatedConnection ( service , host , username , hashedPassword , port , subject , scheme , null , timeoutMillis ) ; }
Create a connection to a Volt server and authenticate the connection .
94
13
155,852
public JSONObject getJSONObjectForZK ( ) throws JSONException { final JSONObject jsObj = new JSONObject ( ) ; jsObj . put ( SnapshotUtil . JSON_PATH , m_path ) ; jsObj . put ( SnapshotUtil . JSON_PATH_TYPE , m_stype . toString ( ) ) ; jsObj . put ( SnapshotUtil . JSON_NONCE , m_nonce ) ; jsObj . put ( SnapshotUtil . JSON_BLOCK , m_blocking ) ; jsObj . put ( SnapshotUtil . JSON_FORMAT , m_format . toString ( ) ) ; jsObj . putOpt ( SnapshotUtil . JSON_DATA , m_data ) ; jsObj . putOpt ( SnapshotUtil . JSON_TERMINUS , m_terminus ) ; return jsObj ; }
When we write to ZK to request the snapshot generate the JSON which will be written to the node s data .
188
23
155,853
public static DatabaseSizes getCatalogSizes ( Database dbCatalog , boolean isXDCR ) { DatabaseSizes dbSizes = new DatabaseSizes ( ) ; for ( Table table : dbCatalog . getTables ( ) ) { dbSizes . addTable ( getTableSize ( table , isXDCR ) ) ; } return dbSizes ; }
Produce a sizing of all significant database objects .
77
10
155,854
static public void main ( String [ ] sa ) throws IOException , TarMalformatException { if ( sa . length < 1 ) { System . out . println ( RB . singleton . getString ( RB . TARGENERATOR_SYNTAX , DbBackup . class . getName ( ) ) ) ; System . exit ( 0 ) ; } TarGenerator generator = new TarGenerator ( new File ( sa [ 0 ] ) , true , null ) ; if ( sa . length == 1 ) { generator . queueEntry ( "stdin" , System . in , 10240 ) ; } else { for ( int i = 1 ; i < sa . length ; i ++ ) { generator . queueEntry ( new File ( sa [ i ] ) ) ; } } generator . write ( ) ; }
Creates specified tar file to contain specified files or stdin using default blocks - per - record and replacing tar file if it already exists .
172
28
155,855
public static byte [ ] fileToBytes ( File path ) throws IOException { FileInputStream fin = new FileInputStream ( path ) ; byte [ ] buffer = new byte [ ( int ) fin . getChannel ( ) . size ( ) ] ; try { if ( fin . read ( buffer ) == - 1 ) { throw new IOException ( "File " + path . getAbsolutePath ( ) + " is empty" ) ; } } finally { fin . close ( ) ; } return buffer ; }
Serialize a file into bytes . Used to serialize catalog and deployment file for UpdateApplicationCatalog on the client .
106
23
155,856
public VoltTable run ( SystemProcedureExecutionContext ctx , String tableName , String columnName , String compStr , VoltTable parameter , long chunksize ) { return nibbleDeleteCommon ( ctx , tableName , columnName , compStr , parameter , chunksize , true ) ; }
Nibble delete procedure for replicated tables
63
7
155,857
public byte [ ] read ( ) throws IOException { if ( m_exception . get ( ) != null ) { throw m_exception . get ( ) ; } byte bytes [ ] = null ; if ( m_activeConverters . get ( ) == 0 ) { bytes = m_available . poll ( ) ; } else { try { bytes = m_available . take ( ) ; } catch ( InterruptedException e ) { throw new IOException ( e ) ; } } if ( bytes != null ) { m_availableBytes . addAndGet ( - 1 * bytes . length ) ; } return bytes ; }
Returns a more CSV data in UTF - 8 format . Returns null when there is no more data . May block .
132
23
155,858
public boolean compileFromDDL ( final String jarOutputPath , final String ... ddlFilePaths ) { if ( ddlFilePaths . length == 0 ) { compilerLog . error ( "At least one DDL file is required." ) ; return false ; } List < VoltCompilerReader > ddlReaderList ; try { ddlReaderList = DDLPathsToReaderList ( ddlFilePaths ) ; } catch ( VoltCompilerException e ) { compilerLog . error ( "Unable to open DDL file." , e ) ; return false ; } return compileInternalToFile ( jarOutputPath , null , null , ddlReaderList , null ) ; }
Compile from a set of DDL files .
146
10
155,859
public boolean compileDDLString ( String ddl , String jarPath ) { final File schemaFile = VoltProjectBuilder . writeStringToTempFile ( ddl ) ; schemaFile . deleteOnExit ( ) ; final String schemaPath = schemaFile . getPath ( ) ; return compileFromDDL ( jarPath , schemaPath ) ; }
Compile from DDL in a single string
71
9
155,860
public boolean compileEmptyCatalog ( final String jarOutputPath ) { // Use a special DDL reader to provide the contents. List < VoltCompilerReader > ddlReaderList = new ArrayList <> ( 1 ) ; ddlReaderList . add ( new VoltCompilerStringReader ( "ddl.sql" , m_emptyDDLComment ) ) ; // Seed it with the DDL so that a version upgrade hack in compileInternalToFile() // doesn't try to get the DDL file from the path. InMemoryJarfile jarFile = new InMemoryJarfile ( ) ; try { ddlReaderList . get ( 0 ) . putInJar ( jarFile , "ddl.sql" ) ; } catch ( IOException e ) { compilerLog . error ( "Failed to add DDL file to empty in-memory jar." ) ; return false ; } return compileInternalToFile ( jarOutputPath , null , null , ddlReaderList , jarFile ) ; }
Compile empty catalog jar
209
5
155,861
private void debugVerifyCatalog ( InMemoryJarfile origJarFile , Catalog origCatalog ) { final VoltCompiler autoGenCompiler = new VoltCompiler ( m_isXDCR ) ; // Make the new compiler use the original jarfile's classloader so it can // pull in the class files for procedures and imports autoGenCompiler . m_classLoader = origJarFile . getLoader ( ) ; List < VoltCompilerReader > autogenReaderList = new ArrayList <> ( 1 ) ; autogenReaderList . add ( new VoltCompilerJarFileReader ( origJarFile , AUTOGEN_DDL_FILE_NAME ) ) ; InMemoryJarfile autoGenJarOutput = new InMemoryJarfile ( ) ; autoGenCompiler . m_currentFilename = AUTOGEN_DDL_FILE_NAME ; // This call is purposely replicated in retryFailedCatalogRebuildUnderDebug, // where it provides an opportunity to set a breakpoint on a do-over when this // mainline call produces a flawed catalog that fails the catalog diff. // Keep the two calls in synch to allow debugging under the same exact conditions. Catalog autoGenCatalog = autoGenCompiler . compileCatalogInternal ( null , null , autogenReaderList , autoGenJarOutput ) ; if ( autoGenCatalog == null ) { Log . info ( "Did not verify catalog because it could not be compiled." ) ; return ; } FilteredCatalogDiffEngine diffEng = new FilteredCatalogDiffEngine ( origCatalog , autoGenCatalog , false ) ; String diffCmds = diffEng . commands ( ) ; if ( diffCmds != null && ! diffCmds . equals ( "" ) ) { // This retry is disabled by default to avoid confusing the unwary developer // with a "pointless" replay of an apparently flawed catalog rebuild. // Enable it via this flag to provide a chance to set an early breakpoint // that is only triggered in hopeless cases. if ( RETRY_FAILED_CATALOG_REBUILD_UNDER_DEBUG ) { autoGenCatalog = replayFailedCatalogRebuildUnderDebug ( autoGenCompiler , autogenReaderList , autoGenJarOutput ) ; } // Re-run a failed diff more verbosely as a pre-crash test diagnostic. diffEng = new FilteredCatalogDiffEngine ( origCatalog , autoGenCatalog , true ) ; diffCmds = diffEng . commands ( ) ; String crashAdvice = "Catalog Verification from Generated DDL failed! " + "VoltDB dev: Consider" + ( RETRY_FAILED_CATALOG_REBUILD_UNDER_DEBUG ? "" : " setting VoltCompiler.RETRY_FAILED_CATALOG_REBUILD_UNDER_DEBUG = true and" ) + " setting a breakpoint in VoltCompiler.replayFailedCatalogRebuildUnderDebug" + " to debug a replay of the faulty catalog rebuild roundtrip. " ; VoltDB . crashLocalVoltDB ( crashAdvice + "The offending diffcmds were: " + diffCmds ) ; } else { Log . info ( "Catalog verification completed successfuly." ) ; } }
Internal method that takes the generated DDL from the catalog and builds a new catalog . The generated catalog is diffed with the original catalog to verify compilation and catalog generation consistency .
684
35
155,862
private Catalog replayFailedCatalogRebuildUnderDebug ( VoltCompiler autoGenCompiler , List < VoltCompilerReader > autogenReaderList , InMemoryJarfile autoGenJarOutput ) { // Be sure to set RETRY_FAILED_CATALOG_REBUILD_UNDER_DEBUG = true to enable // this last ditch retry before crashing. // BREAKPOINT HERE! // Then step IN to debug the failed rebuild -- or, just as likely, the canonical ddl. // Or step OVER to debug just the catalog diff process, retried with verbose output -- // maybe it's just being too sensitive to immaterial changes? Catalog autoGenCatalog = autoGenCompiler . compileCatalogInternal ( null , null , autogenReaderList , autoGenJarOutput ) ; return autoGenCatalog ; }
Take two steps back to retry and potentially debug a catalog rebuild that generated an unintended change . This code is PURPOSELY redundant with the mainline call in debugVerifyCatalog above . Keep the two calls in synch and only redirect through this function in the post - mortem replay after the other call created a flawed catalog .
174
66
155,863
HashMap < String , byte [ ] > getExplainPlans ( Catalog catalog ) { HashMap < String , byte [ ] > retval = new HashMap <> ( ) ; Database db = getCatalogDatabase ( m_catalog ) ; assert ( db != null ) ; for ( Procedure proc : db . getProcedures ( ) ) { for ( Statement stmt : proc . getStatements ( ) ) { String s = "SQL: " + stmt . getSqltext ( ) + "\n" ; s += "COST: " + Integer . toString ( stmt . getCost ( ) ) + "\n" ; s += "PLAN:\n\n" ; s += Encoder . hexDecodeToString ( stmt . getExplainplan ( ) ) + "\n" ; byte [ ] b = s . getBytes ( Constants . UTF8ENCODING ) ; retval . put ( proc . getTypeName ( ) + "_" + stmt . getTypeName ( ) + ".txt" , b ) ; } } return retval ; }
Get textual explain plan info for each plan from the catalog to be shoved into the catalog jarfile .
233
20
155,864
private Catalog compileCatalogInternal ( final VoltCompilerReader cannonicalDDLIfAny , final Catalog previousCatalogIfAny , final List < VoltCompilerReader > ddlReaderList , final InMemoryJarfile jarOutput ) { m_catalog = new Catalog ( ) ; // Initialize the catalog for one cluster m_catalog . execute ( "add / clusters cluster" ) ; m_catalog . getClusters ( ) . get ( "cluster" ) . setSecurityenabled ( false ) ; // shutdown and make a new hsqldb try { Database previousDBIfAny = null ; if ( previousCatalogIfAny != null ) { previousDBIfAny = previousCatalogIfAny . getClusters ( ) . get ( "cluster" ) . getDatabases ( ) . get ( "database" ) ; } compileDatabaseNode ( cannonicalDDLIfAny , previousDBIfAny , ddlReaderList , jarOutput ) ; } catch ( final VoltCompilerException e ) { return null ; } assert ( m_catalog != null ) ; // add epoch info to catalog final int epoch = ( int ) ( TransactionIdManager . getEpoch ( ) / 1000 ) ; m_catalog . getClusters ( ) . get ( "cluster" ) . setLocalepoch ( epoch ) ; return m_catalog ; }
Internal method for compiling the catalog .
285
7
155,865
private void addExtraClasses ( final InMemoryJarfile jarOutput ) throws VoltCompilerException { List < String > addedClasses = new ArrayList <> ( ) ; for ( String className : m_addedClasses ) { /* * Only add the class if it isn't already in the output jar. * The jar will be pre-populated when performing an automatic * catalog version upgrade. */ if ( ! jarOutput . containsKey ( className ) ) { try { Class < ? > clz = Class . forName ( className , true , m_classLoader ) ; if ( addClassToJar ( jarOutput , clz ) ) { addedClasses . add ( className ) ; } } catch ( Exception e ) { String msg = "Class %s could not be loaded/found/added to the jar." ; msg = String . format ( msg , className ) ; throw new VoltCompilerException ( msg ) ; } // reset the added classes to the actual added classes } } m_addedClasses = addedClasses . toArray ( new String [ 0 ] ) ; }
Once the DDL file is over take all of the extra classes found and add them to the jar .
233
21
155,866
public List < String > harvestCapturedDetail ( ) { List < String > harvested = m_capturedDiagnosticDetail ; m_capturedDiagnosticDetail = null ; return harvested ; }
Access recent plan output for diagnostic purposes
45
7
155,867
String getKeyPrefix ( StatementPartitioning partitioning , DeterminismMode detMode , String joinOrder ) { // no caching for inferred yet if ( partitioning . isInferred ( ) ) { return null ; } String joinOrderPrefix = "#" ; if ( joinOrder != null ) { joinOrderPrefix += joinOrder ; } boolean partitioned = partitioning . wasSpecifiedAsSingle ( ) ; return joinOrderPrefix + String . valueOf ( detMode . toChar ( ) ) + ( partitioned ? "P#" : "R#" ) ; }
Key prefix includes attributes that make a cached statement usable if they match
124
13
155,868
Statement getCachedStatement ( String keyPrefix , String sql ) { String key = keyPrefix + sql ; Statement candidate = m_previousCatalogStmts . get ( key ) ; if ( candidate == null ) { ++ m_stmtCacheMisses ; return null ; } // check that no underlying tables have been modified since the proc had been compiled String [ ] tablesTouched = candidate . getTablesread ( ) . split ( "," ) ; for ( String tableName : tablesTouched ) { if ( isDirtyTable ( tableName ) ) { ++ m_stmtCacheMisses ; return null ; } } tablesTouched = candidate . getTablesupdated ( ) . split ( "," ) ; for ( String tableName : tablesTouched ) { if ( isDirtyTable ( tableName ) ) { ++ m_stmtCacheMisses ; return null ; } } ++ m_stmtCacheHits ; // easy debugging stmt //printStmtCacheStats(); return candidate ; }
Look for a match from the previous catalog that matches the key + sql
216
14
155,869
public HashRangeExpressionBuilder put ( Integer value1 , Integer value2 ) { m_builder . put ( value1 , value2 ) ; return this ; }
Add a value pair .
34
5
155,870
public HashRangeExpression build ( Integer hashColumnIndex ) { Map < Integer , Integer > ranges = m_builder . build ( ) ; HashRangeExpression predicate = new HashRangeExpression ( ) ; predicate . setRanges ( ranges ) ; predicate . setHashColumnIndex ( hashColumnIndex ) ; return predicate ; }
Generate a hash range expression .
68
7
155,871
@ Override public OrderableTransaction poll ( ) { OrderableTransaction retval = null ; updateQueueState ( ) ; if ( m_state == QueueState . UNBLOCKED ) { retval = super . peek ( ) ; super . poll ( ) ; // not BLOCKED_EMPTY assert ( retval != null ) ; } return retval ; }
Only return transaction state objects that are ready to run .
78
11
155,872
@ Override public boolean add ( OrderableTransaction txnState ) { if ( m_initiatorData . containsKey ( txnState . initiatorHSId ) == false ) { return false ; } boolean retval = super . add ( txnState ) ; // update the queue state if ( retval ) updateQueueState ( ) ; return retval ; }
Drop data for unknown initiators . This is the only valid add interface .
78
15
155,873
public long noteTransactionRecievedAndReturnLastSeen ( long initiatorHSId , long txnId , long lastSafeTxnIdFromInitiator ) { // System.out.printf("Site %d got heartbeat message from initiator %d with txnid/safeid: %d/%d\n", // m_siteId, initiatorSiteId, txnId, lastSafeTxnIdFromInitiator); // this doesn't exclude dummy txnid but is also a sanity check assert ( txnId != 0 ) ; // Drop old data from already-failed initiators. if ( m_initiatorData . containsKey ( initiatorHSId ) == false ) { //hostLog.info("Dropping txn " + txnId + " data from failed initiatorSiteId: " + initiatorSiteId); return DtxnConstants . DUMMY_LAST_SEEN_TXN_ID ; } // update the latest transaction for the specified initiator LastInitiatorData lid = m_initiatorData . get ( initiatorHSId ) ; if ( lid . m_lastSeenTxnId < txnId ) lid . m_lastSeenTxnId = txnId ; if ( lid . m_lastSafeTxnId < lastSafeTxnIdFromInitiator ) lid . m_lastSafeTxnId = lastSafeTxnIdFromInitiator ; /* * Why aren't we asserting that the txnId is > then the last seen/last safe * It seems like this should be guaranteed by TCP ordering and we want to * know if it isn't! */ // find the minimum value across all latest transactions long min = Long . MAX_VALUE ; for ( LastInitiatorData l : m_initiatorData . values ( ) ) if ( l . m_lastSeenTxnId < min ) min = l . m_lastSeenTxnId ; // This transaction is the guaranteed minimum // but is not yet necessarily 2PC'd to every site. m_newestCandidateTransaction = min ; // this will update the state of the queue if needed updateQueueState ( ) ; // return the last seen id for the originating initiator return lid . m_lastSeenTxnId ; }
Update the information stored about the latest transaction seen from each initiator . Compute the newest safe transaction id .
492
22
155,874
public void gotFaultForInitiator ( long initiatorId ) { // calculate the next minimum transaction w/o our dead friend noteTransactionRecievedAndReturnLastSeen ( initiatorId , Long . MAX_VALUE , DtxnConstants . DUMMY_LAST_SEEN_TXN_ID ) ; // remove initiator from minimum. txnid scoreboard LastInitiatorData remove = m_initiatorData . remove ( initiatorId ) ; assert ( remove != null ) ; }
Remove all pending transactions from the specified initiator and do not require heartbeats from that initiator to proceed .
110
23
155,875
public int ensureInitiatorIsKnown ( long initiatorId ) { int newInitiatorCount = 0 ; if ( m_initiatorData . get ( initiatorId ) == null ) { m_initiatorData . put ( initiatorId , new LastInitiatorData ( ) ) ; newInitiatorCount ++ ; } return newInitiatorCount ; }
After a catalog change double check that all initators in the catalog that are known to be up are here in the RPQ s list .
82
28
155,876
public Long getNewestSafeTransactionForInitiator ( Long initiatorId ) { LastInitiatorData lid = m_initiatorData . get ( initiatorId ) ; if ( lid == null ) { return null ; } return lid . m_lastSafeTxnId ; }
Return the largest confirmed txn id for the initiator given . Used to figure out what to do after an initiator fails .
62
26
155,877
public Long safeToRecover ( ) { boolean safe = true ; for ( LastInitiatorData data : m_initiatorData . values ( ) ) { final long lastSeenTxnId = data . m_lastSeenTxnId ; if ( lastSeenTxnId == DtxnConstants . DUMMY_LAST_SEEN_TXN_ID ) { safe = false ; } } if ( ! safe ) { return null ; } OrderableTransaction next = peek ( ) ; if ( next == null ) { // no work - have heard from all initiators. use a heartbeat if ( m_state == QueueState . BLOCKED_EMPTY ) { return m_newestCandidateTransaction ; } // waiting for some txn to be 2pc to this site. else if ( m_state == QueueState . BLOCKED_SAFETY ) { return null ; } else if ( m_state == QueueState . BLOCKED_ORDERING ) { return null ; } m_recoveryLog . error ( "Unexpected RPQ state " + m_state + " when attempting to start recovery at " + " the source site. Consider killing the recovering node and trying again" ) ; return null ; // unreachable } else { // bingo - have a real transaction to return as the recovery point return next . txnId ; } }
Determine if it is safe to recover and if it is what txnid it is safe to recover at . Recovery is initiated by the recovering source partition . It can t be initiated until the recovering partition has heard from every initiator . This is because it is not possible to pick a point in the global txn ordering for the recovery to start at where all subsequent procedure invocations that need to be applied after recovery are available unless every initiator has been heard from .
301
96
155,878
public void unauthenticate ( HttpServletRequest request ) { if ( HTTP_DONT_USE_SESSION ) return ; HttpSession session = request . getSession ( false ) ; if ( session != null ) { session . removeAttribute ( AUTH_USER_SESSION_KEY ) ; session . invalidate ( ) ; } }
reuses it and happily validates it .
72
9
155,879
public AuthenticationResult authenticate ( HttpServletRequest request ) { HttpSession session = null ; AuthenticationResult authResult = null ; if ( ! HTTP_DONT_USE_SESSION && ! m_dontUseSession ) { try { session = request . getSession ( ) ; if ( session != null ) { if ( session . isNew ( ) ) { session . setMaxInactiveInterval ( MAX_SESSION_INACTIVITY_SECONDS ) ; } authResult = ( AuthenticationResult ) session . getAttribute ( AUTH_USER_SESSION_KEY ) ; } } catch ( Exception ex ) { //Use no session mode meaning whatever VMC sends as hashed password is used to authenticate. session = null ; m_rate_limited_log . log ( EstTime . currentTimeMillis ( ) , Level . ERROR , ex , "Failed to get or create HTTP Session. authenticating user explicitely." ) ; } } if ( authResult == null ) { authResult = getAuthenticationResult ( request ) ; if ( ! authResult . isAuthenticated ( ) ) { if ( session != null ) { session . removeAttribute ( AUTH_USER_SESSION_KEY ) ; } m_rate_limited_log . log ( "JSON interface exception: " + authResult . m_message , EstTime . currentTimeMillis ( ) ) ; } else { if ( session != null ) { //Cache the authResult in session so we dont authenticate again. session . setAttribute ( AUTH_USER_SESSION_KEY , authResult ) ; } } } return authResult ; }
Look to get session if no session found or created fallback to always authenticate mode .
344
18
155,880
public static FunctionSQL newSQLFunction ( String token , CompileContext context ) { int id = regularFuncMap . get ( token , - 1 ) ; if ( id == - 1 ) { id = valueFuncMap . get ( token , - 1 ) ; } if ( id == - 1 ) { return null ; } FunctionSQL function = new FunctionSQL ( id ) ; if ( id == FUNC_VALUE ) { if ( context . currentDomain == null ) { return null ; } function . dataType = context . currentDomain ; } return function ; }
End of VoltDB extension
119
5
155,881
public ProcessTxnResult processTxn ( TxnHeader hdr , Record txn ) { return dataTree . processTxn ( hdr , txn ) ; }
the process txn on the data
37
7
155,882
public Stat statNode ( String path , ServerCnxn serverCnxn ) throws KeeperException . NoNodeException { return dataTree . statNode ( path , serverCnxn ) ; }
stat the path
44
3
155,883
public byte [ ] getData ( String path , Stat stat , Watcher watcher ) throws KeeperException . NoNodeException { return dataTree . getData ( path , stat , watcher ) ; }
get data and stat for a path
42
7
155,884
public void setWatches ( long relativeZxid , List < String > dataWatches , List < String > existWatches , List < String > childWatches , Watcher watcher ) { dataTree . setWatches ( relativeZxid , dataWatches , existWatches , childWatches , watcher ) ; }
set watches on the datatree
71
7
155,885
public List < ACL > getACL ( String path , Stat stat ) throws NoNodeException { return dataTree . getACL ( path , stat ) ; }
get acl for a path
34
6
155,886
public List < String > getChildren ( String path , Stat stat , Watcher watcher ) throws KeeperException . NoNodeException { return dataTree . getChildren ( path , stat , watcher ) ; }
get children list for this path
43
6
155,887
public SiteTasker take ( ) throws InterruptedException { SiteTasker task = m_tasks . poll ( ) ; if ( task == null ) { m_starvationTracker . beginStarvation ( ) ; } else { m_queueDepthTracker . pollUpdate ( task . getQueueOfferTime ( ) ) ; return task ; } try { task = CoreUtils . queueSpinTake ( m_tasks ) ; // task is never null m_queueDepthTracker . pollUpdate ( task . getQueueOfferTime ( ) ) ; return task ; } finally { m_starvationTracker . endStarvation ( ) ; } }
Block on the site tasker queue .
136
8
155,888
public SiteTasker poll ( ) { SiteTasker task = m_tasks . poll ( ) ; if ( task != null ) { m_queueDepthTracker . pollUpdate ( task . getQueueOfferTime ( ) ) ; } return task ; }
Non - blocking poll on the site tasker queue .
54
11
155,889
public static BufferedWriter newWriter ( File file , Charset charset ) throws FileNotFoundException { checkNotNull ( file ) ; checkNotNull ( charset ) ; return new BufferedWriter ( new OutputStreamWriter ( new FileOutputStream ( file ) , charset ) ) ; }
Returns a buffered writer that writes to a file using the given character set .
63
16
155,890
private static void write ( CharSequence from , File to , Charset charset , boolean append ) throws IOException { asCharSink ( to , charset , modes ( append ) ) . write ( from ) ; }
Private helper method . Writes a character sequence to a file optionally appending .
48
16
155,891
public static void copy ( File from , Charset charset , Appendable to ) throws IOException { asCharSource ( from , charset ) . copyTo ( to ) ; }
Copies all characters from a file to an appendable object using the given character set .
40
18
155,892
public static boolean equal ( File file1 , File file2 ) throws IOException { checkNotNull ( file1 ) ; checkNotNull ( file2 ) ; if ( file1 == file2 || file1 . equals ( file2 ) ) { return true ; } /* * Some operating systems may return zero as the length for files denoting system-dependent * entities such as devices or pipes, in which case we must fall back on comparing the bytes * directly. */ long len1 = file1 . length ( ) ; long len2 = file2 . length ( ) ; if ( len1 != 0 && len2 != 0 && len1 != len2 ) { return false ; } return asByteSource ( file1 ) . contentEquals ( asByteSource ( file2 ) ) ; }
Returns true if the files contains the same bytes .
164
10
155,893
public static List < String > readLines ( File file , Charset charset ) throws IOException { // don't use asCharSource(file, charset).readLines() because that returns // an immutable list, which would change the behavior of this method return readLines ( file , charset , new LineProcessor < List < String > > ( ) { final List < String > result = Lists . newArrayList ( ) ; @ Override public boolean processLine ( String line ) { result . add ( line ) ; return true ; } @ Override public List < String > getResult ( ) { return result ; } } ) ; }
Reads all of the lines from a file . The lines do not include line - termination characters but do include other leading and trailing whitespace .
138
29
155,894
@ CanIgnoreReturnValue // some processors won't return a useful result public static < T > T readBytes ( File file , ByteProcessor < T > processor ) throws IOException { return asByteSource ( file ) . read ( processor ) ; }
Process the bytes of a file .
53
7
155,895
static Expression decomposeCondition ( Expression e , HsqlArrayList conditions ) { if ( e == null ) { return Expression . EXPR_TRUE ; } Expression arg1 = e . getLeftNode ( ) ; Expression arg2 = e . getRightNode ( ) ; int type = e . getType ( ) ; if ( type == OpTypes . AND ) { arg1 = decomposeCondition ( arg1 , conditions ) ; arg2 = decomposeCondition ( arg2 , conditions ) ; if ( arg1 == Expression . EXPR_TRUE ) { return arg2 ; } if ( arg2 == Expression . EXPR_TRUE ) { return arg1 ; } e . setLeftNode ( arg1 ) ; e . setRightNode ( arg2 ) ; return e ; } else if ( type == OpTypes . EQUAL ) { if ( arg1 . getType ( ) == OpTypes . ROW && arg2 . getType ( ) == OpTypes . ROW ) { for ( int i = 0 ; i < arg1 . nodes . length ; i ++ ) { Expression part = new ExpressionLogical ( arg1 . nodes [ i ] , arg2 . nodes [ i ] ) ; part . resolveTypes ( null , null ) ; conditions . add ( part ) ; } return Expression . EXPR_TRUE ; } } if ( e != Expression . EXPR_TRUE ) { conditions . add ( e ) ; } return Expression . EXPR_TRUE ; }
Divides AND conditions and assigns
313
6
155,896
void assignToLists ( ) { int lastOuterIndex = - 1 ; for ( int i = 0 ; i < rangeVariables . length ; i ++ ) { if ( rangeVariables [ i ] . isLeftJoin || rangeVariables [ i ] . isRightJoin ) { lastOuterIndex = i ; } if ( lastOuterIndex == i ) { joinExpressions [ i ] . addAll ( tempJoinExpressions [ i ] ) ; } else { for ( int j = 0 ; j < tempJoinExpressions [ i ] . size ( ) ; j ++ ) { assignToLists ( ( Expression ) tempJoinExpressions [ i ] . get ( j ) , joinExpressions , lastOuterIndex + 1 ) ; } } } for ( int i = 0 ; i < queryExpressions . size ( ) ; i ++ ) { assignToLists ( ( Expression ) queryExpressions . get ( i ) , whereExpressions , lastOuterIndex ) ; } }
Assigns the conditions to separate lists
212
8
155,897
void assignToLists ( Expression e , HsqlArrayList [ ] expressionLists , int first ) { set . clear ( ) ; e . collectRangeVariables ( rangeVariables , set ) ; int index = rangeVarSet . getLargestIndex ( set ) ; // condition is independent of tables if no range variable is found if ( index == - 1 ) { index = 0 ; } // condition is assigned to first non-outer range variable if ( index < first ) { index = first ; } expressionLists [ index ] . add ( e ) ; }
Assigns a single condition to the relevant list of conditions
120
12
155,898
void assignToRangeVariables ( ) { for ( int i = 0 ; i < rangeVariables . length ; i ++ ) { boolean isOuter = rangeVariables [ i ] . isLeftJoin || rangeVariables [ i ] . isRightJoin ; if ( isOuter ) { assignToRangeVariable ( rangeVariables [ i ] , i , joinExpressions [ i ] , true ) ; assignToRangeVariable ( rangeVariables [ i ] , i , whereExpressions [ i ] , false ) ; } else { joinExpressions [ i ] . addAll ( whereExpressions [ i ] ) ; assignToRangeVariable ( rangeVariables [ i ] , i , joinExpressions [ i ] , true ) ; } // A VoltDB extension to disable // Turn off some weird rewriting of in expressions based on index support for the query. // This makes it simpler to parse on the VoltDB side, // at the expense of HSQL performance. // Also fixed an apparent join/where confusion? if ( inExpressions [ i ] != null ) { if ( ! flags [ i ] && isOuter ) { rangeVariables [ i ] . addJoinCondition ( inExpressions [ i ] ) ; } else { rangeVariables [ i ] . addWhereCondition ( inExpressions [ i ] ) ; } /* disable 7 lines ... if (rangeVariables[i].hasIndexCondition() && inExpressions[i] != null) { if (!flags[i] && isOuter) { rangeVariables[i].addWhereCondition(inExpressions[i]); } else { rangeVariables[i].addJoinCondition(inExpressions[i]); } ... disabled 7 lines */ // End of VoltDB extension inExpressions [ i ] = null ; inExpressionCount -- ; } } if ( inExpressionCount != 0 ) { // A VoltDB extension to disable // This will never be called because of the change made to the block above assert ( false ) ; // End of VoltDB extension setInConditionsAsTables ( ) ; } }
Assigns conditions to range variables and converts suitable IN conditions to table lookup .
438
16
155,899
void setInConditionsAsTables ( ) { for ( int i = rangeVariables . length - 1 ; i >= 0 ; i -- ) { RangeVariable rangeVar = rangeVariables [ i ] ; Expression in = inExpressions [ i ] ; if ( in != null ) { Index index = rangeVar . rangeTable . getIndexForColumn ( in . getLeftNode ( ) . nodes [ 0 ] . getColumnIndex ( ) ) ; RangeVariable newRangeVar = new RangeVariable ( in . getRightNode ( ) . subQuery . getTable ( ) , null , null , null , compileContext ) ; RangeVariable [ ] newList = new RangeVariable [ rangeVariables . length + 1 ] ; ArrayUtil . copyAdjustArray ( rangeVariables , newList , newRangeVar , i , 1 ) ; rangeVariables = newList ; // make two columns as arg ColumnSchema left = rangeVar . rangeTable . getColumn ( in . getLeftNode ( ) . nodes [ 0 ] . getColumnIndex ( ) ) ; ColumnSchema right = newRangeVar . rangeTable . getColumn ( 0 ) ; Expression e = new ExpressionLogical ( rangeVar , left , newRangeVar , right ) ; rangeVar . addIndexCondition ( e , index , flags [ i ] ) ; } } }
Converts an IN conditions into a JOIN
280
9