idx
int64 0
165k
| question
stringlengths 73
4.15k
| target
stringlengths 5
918
| len_question
int64 21
890
| len_target
int64 3
255
|
|---|---|---|---|---|
155,600
|
@ SuppressWarnings ( "deprecation" ) public VoltTable [ ] run ( SystemProcedureExecutionContext ctx , String username , String remoteHost , String xmlConfig ) { long oldLevels = 0 ; if ( ctx . isLowestSiteId ( ) ) { // Logger level is a global property, pick the site with lowest id to do it. hostLog . info ( String . format ( "%s from %s changed the log4j settings" , username , remoteHost ) ) ; hostLog . info ( xmlConfig ) ; oldLevels = hostLog . getLogLevels ( loggers ) ; } try { // Mimic the multi-fragment semantics as scatter-gather pattern is an overkill for this simple task. // There are chances that some sites being interrupted and update the logging before old logger level // being read, but the reasons we don't care because 1) it is rare and 2) it only effects when HOST // logger being changed from higher than INFO level to INFO or lower level. barrier . await ( ) ; } catch ( InterruptedException | BrokenBarrierException dontcare ) { } VoltDB . instance ( ) . logUpdate ( xmlConfig , DeprecatedProcedureAPIAccess . getVoltPrivateRealTransactionId ( this ) , ctx . getPaths ( ) . getVoltDBRoot ( ) ) ; ctx . updateBackendLogLevels ( ) ; if ( ctx . isLowestSiteId ( ) ) { long newLevels = hostLog . getLogLevels ( loggers ) ; if ( newLevels != oldLevels ) { // If HOST logger wasn't able to log before and now it can, logs the setting change event. int index = ( int ) ( ( oldLevels >> 3 ) & 7 ) ; Level before = Level . values ( ) [ index ] ; index = ( int ) ( ( newLevels >> 3 ) & 7 ) ; Level after = Level . values ( ) [ index ] ; if ( before . ordinal ( ) > Level . INFO . ordinal ( ) && after . ordinal ( ) <= Level . INFO . ordinal ( ) ) { hostLog . info ( String . format ( "%s from %s changed the log4j settings" , username , remoteHost ) ) ; hostLog . info ( xmlConfig ) ; } } barrier . reset ( ) ; } VoltTable t = new VoltTable ( VoltSystemProcedure . STATUS_SCHEMA ) ; t . addRow ( VoltSystemProcedure . STATUS_OK ) ; return ( new VoltTable [ ] { t } ) ; }
|
Change the operational log configuration .
| 564
| 6
|
155,601
|
public void add ( String item , String value ) { int maxChar = MaxLenInZChoice ; if ( item . length ( ) < MaxLenInZChoice ) { maxChar = item . length ( ) ; } super . add ( item . substring ( 0 , maxChar ) ) ; values . addElement ( value ) ; }
|
restrict strings for the choice to MaxLenInZChoice characters
| 71
| 13
|
155,602
|
private int findValue ( String s ) { for ( int i = 0 ; i < values . size ( ) ; i ++ ) { if ( s . equals ( values . elementAt ( i ) ) ) { return i ; } // end of if (s.equals(values.elementAt(i))) } // end of for (int i=0; i<values.size(); i++) return - 1 ; }
|
find for a given value the index in values
| 89
| 9
|
155,603
|
public static ByteBuffer getNextChunk ( byte [ ] schemaBytes , ByteBuffer buf , CachedByteBufferAllocator resultBufferAllocator ) { buf . position ( buf . position ( ) + 4 ) ; //skip partition id int length = schemaBytes . length + buf . remaining ( ) ; ByteBuffer outputBuffer = resultBufferAllocator . allocate ( length ) ; outputBuffer . put ( schemaBytes ) ; outputBuffer . put ( buf ) ; outputBuffer . flip ( ) ; return outputBuffer ; }
|
Assemble the chunk so that it can be used to construct the VoltTable that will be passed to EE .
| 109
| 22
|
155,604
|
private RestoreWork processMessage ( DecodedContainer msg , CachedByteBufferAllocator resultBufferAllocator ) { if ( msg == null ) { return null ; } RestoreWork restoreWork = null ; try { if ( msg . m_msgType == StreamSnapshotMessageType . FAILURE ) { VoltDB . crashLocalVoltDB ( "Rejoin source sent failure message." , false , null ) ; // for test code only if ( m_expectedEOFs . decrementAndGet ( ) == 0 ) { m_EOF = true ; } } else if ( msg . m_msgType == StreamSnapshotMessageType . END ) { if ( rejoinLog . isTraceEnabled ( ) ) { rejoinLog . trace ( "Got END message " + msg . m_blockIndex + " from " + CoreUtils . hsIdToString ( msg . m_srcHSId ) + " (TargetId " + msg . m_dataTargetId + ")" ) ; } if ( m_expectedEOFs . decrementAndGet ( ) == 0 ) { m_EOF = true ; } } else if ( msg . m_msgType == StreamSnapshotMessageType . SCHEMA ) { rejoinLog . trace ( "Got SCHEMA message " + msg . m_blockIndex + " from " + CoreUtils . hsIdToString ( msg . m_srcHSId ) + " (TargetId " + msg . m_dataTargetId + ")" ) ; ByteBuffer block = msg . m_container . b ( ) ; block . position ( StreamSnapshotDataTarget . contentOffset ) ; byte [ ] schemaBytes = new byte [ block . remaining ( ) ] ; block . get ( schemaBytes ) ; m_schemas . put ( msg . m_tableId , schemaBytes ) ; } else if ( msg . m_msgType == StreamSnapshotMessageType . HASHINATOR ) { ByteBuffer block = msg . m_container . b ( ) ; block . position ( StreamSnapshotDataTarget . contentOffset ) ; long version = block . getLong ( ) ; byte [ ] hashinatorConfig = new byte [ block . remaining ( ) ] ; block . get ( hashinatorConfig ) ; restoreWork = new HashinatorRestoreWork ( version , hashinatorConfig ) ; } else { // It's normal snapshot data afterwards rejoinLog . trace ( "Got DATA message " + msg . m_blockIndex + " from " + CoreUtils . hsIdToString ( msg . m_srcHSId ) + " (TargetId " + msg . m_dataTargetId + ")" ) ; ByteBuffer block = msg . m_container . b ( ) ; if ( ! m_schemas . containsKey ( msg . m_tableId ) ) { VoltDB . crashLocalVoltDB ( "No schema for table with ID " + msg . m_tableId , false , null ) ; } // Get the byte buffer ready to be consumed block . position ( StreamSnapshotDataTarget . contentOffset ) ; ByteBuffer nextChunk = getNextChunk ( m_schemas . get ( msg . m_tableId ) , block , resultBufferAllocator ) ; m_bytesReceived += nextChunk . remaining ( ) ; restoreWork = new TableRestoreWork ( msg . m_tableId , nextChunk ) ; } return restoreWork ; } finally { msg . m_container . discard ( ) ; // Queue ack to this block (after the container has been discarded) m_ack . ack ( msg . m_srcHSId , msg . m_msgType == StreamSnapshotMessageType . END , msg . m_dataTargetId , msg . m_blockIndex ) ; } }
|
Process a message pulled off from the network thread and discard the container once it s processed .
| 809
| 18
|
155,605
|
public static void copyFile ( String fromPath , String toPath ) throws Exception { File inputFile = new File ( fromPath ) ; File outputFile = new File ( toPath ) ; com . google_voltpatches . common . io . Files . copy ( inputFile , outputFile ) ; }
|
Simple code to copy a file from one place to another ... Java should have this built in ... stupid java ...
| 63
| 22
|
155,606
|
public static String parseRevisionString ( String fullBuildString ) { String build = "" ; // Test for SVN revision string - example: https://svn.voltdb.com/eng/trunk?revision=2352 String [ ] splitted = fullBuildString . split ( "=" , 2 ) ; if ( splitted . length == 2 ) { build = splitted [ 1 ] . trim ( ) ; if ( build . length ( ) == 0 ) { return null ; } return build ; } // Test for git build string - example: 2.0 voltdb-2.0-70-gb39f43e-dirty Pattern p = Pattern . compile ( "-(\\d*-\\w{8}(?:-.*)?)" ) ; Matcher m = p . matcher ( fullBuildString ) ; if ( ! m . find ( ) ) { return null ; } build = m . group ( 1 ) . trim ( ) ; if ( build . length ( ) == 0 ) { return null ; } return build ; }
|
Check that RevisionStrings are properly formatted .
| 224
| 9
|
155,607
|
public static Object [ ] parseVersionString ( String versionString ) { if ( versionString == null ) { return null ; } // check for whitespace if ( versionString . matches ( "\\s" ) ) { return null ; } // split on the dots String [ ] split = versionString . split ( "\\." ) ; if ( split . length == 0 ) { return null ; } Object [ ] v = new Object [ split . length ] ; int i = 0 ; for ( String s : split ) { try { v [ i ] = Integer . parseInt ( s ) ; } catch ( NumberFormatException e ) { v [ i ] = s ; } i ++ ; } // check for a numeric beginning if ( v [ 0 ] instanceof Integer ) { return v ; } else { return null ; } }
|
Parse a version string in the form of x . y . z . It doesn t require that there are exactly three parts in the version . Each part must be separated by a dot .
| 172
| 38
|
155,608
|
public static int compareVersions ( Object [ ] left , Object [ ] right ) { if ( left == null || right == null ) { throw new IllegalArgumentException ( "Invalid versions" ) ; } for ( int i = 0 ; i < left . length ; i ++ ) { // right is shorter than left and share the same prefix => left must be larger if ( right . length == i ) { return 1 ; } if ( left [ i ] instanceof Integer ) { if ( right [ i ] instanceof Integer ) { // compare two numbers if ( ( ( Integer ) left [ i ] ) > ( ( Integer ) right [ i ] ) ) { return 1 ; } else if ( ( ( Integer ) left [ i ] ) < ( ( Integer ) right [ i ] ) ) { return - 1 ; } else { continue ; } } else { // numbers always greater than alphanumeric tags return 1 ; } } else if ( right [ i ] instanceof Integer ) { // alphanumeric tags always less than numbers return - 1 ; } else { // compare two alphanumeric tags lexicographically int cmp = ( ( String ) left [ i ] ) . compareTo ( ( String ) right [ i ] ) ; if ( cmp != 0 ) { return cmp ; } else { // two alphanumeric tags are the same... so keep comparing continue ; } } } // left is shorter than right and share the same prefix, must be less if ( left . length < right . length ) { return - 1 ; } // samesies return 0 ; }
|
Compare two versions . Version should be represented as an array of integers .
| 328
| 14
|
155,609
|
public static boolean isPro ( ) { if ( m_isPro == null ) { //Allow running pro kit as community. if ( ! Boolean . parseBoolean ( System . getProperty ( "community" , "false" ) ) ) { m_isPro = ProClass . load ( "org.voltdb.CommandLogImpl" , "Command logging" , ProClass . HANDLER_IGNORE ) . hasProClass ( ) ; } else { m_isPro = false ; } } return m_isPro . booleanValue ( ) ; }
|
check if we re running pro code
| 118
| 7
|
155,610
|
public static final long cheesyBufferCheckSum ( ByteBuffer buffer ) { final int mypos = buffer . position ( ) ; buffer . position ( 0 ) ; long checksum = 0 ; if ( buffer . hasArray ( ) ) { final byte bytes [ ] = buffer . array ( ) ; final int end = buffer . arrayOffset ( ) + mypos ; for ( int ii = buffer . arrayOffset ( ) ; ii < end ; ii ++ ) { checksum += bytes [ ii ] ; } } else { for ( int ii = 0 ; ii < mypos ; ii ++ ) { checksum += buffer . get ( ) ; } } buffer . position ( mypos ) ; return checksum ; }
|
I heart commutativity
| 146
| 5
|
155,611
|
public static < T > T [ ] concatAll ( final T [ ] empty , Iterable < T [ ] > arrayList ) { assert ( empty . length == 0 ) ; if ( arrayList . iterator ( ) . hasNext ( ) == false ) { return empty ; } int len = 0 ; for ( T [ ] subArray : arrayList ) { len += subArray . length ; } int pos = 0 ; T [ ] result = Arrays . copyOf ( empty , len ) ; for ( T [ ] subArray : arrayList ) { System . arraycopy ( subArray , 0 , result , pos , subArray . length ) ; pos += subArray . length ; } return result ; }
|
Concatenate an list of arrays of typed - objects
| 149
| 12
|
155,612
|
public static long getMBRss ( Client client ) { assert ( client != null ) ; long rssMax = 0 ; try { ClientResponse r = client . callProcedure ( "@Statistics" , "MEMORY" , 0 ) ; VoltTable stats = r . getResults ( ) [ 0 ] ; stats . resetRowPosition ( ) ; while ( stats . advanceRow ( ) ) { long rss = stats . getLong ( "RSS" ) / 1024 ; if ( rss > rssMax ) { rssMax = rss ; } } return rssMax ; } catch ( Exception e ) { e . printStackTrace ( ) ; System . exit ( - 1 ) ; return 0 ; } }
|
Get the resident set size in mb for the voltdb server on the other end of the client . If the client is connected to multiple servers return the max individual rss across the cluster .
| 154
| 40
|
155,613
|
public static < K , V > Multimap < K , V > zipToMap ( List < K > keys , List < V > values ) { if ( keys . isEmpty ( ) || values . isEmpty ( ) ) { return null ; } Iterator < K > keyIter = keys . iterator ( ) ; Iterator < V > valueIter = values . iterator ( ) ; ArrayListMultimap < K , V > result = ArrayListMultimap . create ( ) ; while ( keyIter . hasNext ( ) && valueIter . hasNext ( ) ) { result . put ( keyIter . next ( ) , valueIter . next ( ) ) ; } // In case there are more values than keys, assign the rest of the // values to the first key K firstKey = keys . get ( 0 ) ; while ( valueIter . hasNext ( ) ) { result . put ( firstKey , valueIter . next ( ) ) ; } return result ; }
|
Zip the two lists up into a multimap
| 205
| 9
|
155,614
|
public static < K > List < K > zip ( Collection < Deque < K > > stuff ) { final List < K > result = Lists . newArrayList ( ) ; // merge the results Iterator < Deque < K > > iter = stuff . iterator ( ) ; while ( iter . hasNext ( ) ) { final K next = iter . next ( ) . poll ( ) ; if ( next != null ) { result . add ( next ) ; } else { iter . remove ( ) ; } if ( ! iter . hasNext ( ) ) { iter = stuff . iterator ( ) ; } } return result ; }
|
Aggregates the elements from each of the given deque . It takes one element from the head of each deque in each loop and put them into a single list . This method modifies the deques in - place .
| 131
| 46
|
155,615
|
public static < K extends Comparable < ? > , V > ListMultimap < K , V > sortedArrayListMultimap ( ) { Map < K , Collection < V > > map = Maps . newTreeMap ( ) ; return Multimaps . newListMultimap ( map , new Supplier < List < V > > ( ) { @ Override public List < V > get ( ) { return Lists . newArrayList ( ) ; } } ) ; }
|
Create an ArrayListMultimap that uses TreeMap as the container map so order is preserved .
| 101
| 20
|
155,616
|
public static StoredProcedureInvocation roundTripForCL ( StoredProcedureInvocation invocation ) throws IOException { if ( invocation . getSerializedParams ( ) != null ) { return invocation ; } ByteBuffer buf = ByteBuffer . allocate ( invocation . getSerializedSize ( ) ) ; invocation . flattenToBuffer ( buf ) ; buf . flip ( ) ; StoredProcedureInvocation rti = new StoredProcedureInvocation ( ) ; rti . initFromBuffer ( buf ) ; return rti ; }
|
Serialize and then deserialize an invocation so that it has serializedParams set for command logging if the invocation is sent to a local site .
| 118
| 31
|
155,617
|
public static Map < Integer , byte [ ] > getBinaryPartitionKeys ( TheHashinator hashinator ) { Map < Integer , byte [ ] > partitionMap = new HashMap <> ( ) ; VoltTable partitionKeys = null ; if ( hashinator == null ) { partitionKeys = TheHashinator . getPartitionKeys ( VoltType . VARBINARY ) ; } else { partitionKeys = TheHashinator . getPartitionKeys ( hashinator , VoltType . VARBINARY ) ; } if ( partitionKeys == null ) { return null ; } else { // This is a shared resource so make a copy of the table to protect the cache copy in TheHashinator ByteBuffer buf = ByteBuffer . allocate ( partitionKeys . getSerializedSize ( ) ) ; partitionKeys . flattenToBuffer ( buf ) ; buf . flip ( ) ; VoltTable keyCopy = PrivateVoltTableFactory . createVoltTableFromSharedBuffer ( buf ) ; while ( keyCopy . advanceRow ( ) ) { partitionMap . put ( ( int ) keyCopy . getLong ( 0 ) , keyCopy . getVarbinary ( 1 ) ) ; } } return partitionMap ; }
|
Get VARBINARY partition keys for the specified topology .
| 249
| 13
|
155,618
|
public static Properties readPropertiesFromCredentials ( String credentials ) { Properties props = new Properties ( ) ; File propFD = new File ( credentials ) ; if ( ! propFD . exists ( ) || ! propFD . isFile ( ) || ! propFD . canRead ( ) ) { throw new IllegalArgumentException ( "Credentials file " + credentials + " is not a read accessible file" ) ; } else { FileReader fr = null ; try { fr = new FileReader ( credentials ) ; props . load ( fr ) ; } catch ( IOException e ) { throw new IllegalArgumentException ( "Credential file not found or permission denied." ) ; } } return props ; }
|
Get username and password from credentials file .
| 149
| 8
|
155,619
|
public static int writeDeferredSerialization ( ByteBuffer mbuf , DeferredSerialization ds ) throws IOException { int written = 0 ; try { final int objStartPosition = mbuf . position ( ) ; ds . serialize ( mbuf ) ; written = mbuf . position ( ) - objStartPosition ; } finally { ds . cancel ( ) ; } return written ; }
|
Serialize the deferred serializer data into byte buffer
| 83
| 10
|
155,620
|
public NodeAVL getNode ( int index ) { NodeAVL n = nPrimaryNode ; while ( index -- > 0 ) { n = n . nNext ; } return n ; }
|
Returns the Node for a given Index using the ordinal position of the Index within the Table Object .
| 40
| 20
|
155,621
|
NodeAVL getNextNode ( NodeAVL n ) { if ( n == null ) { n = nPrimaryNode ; } else { n = n . nNext ; } return n ; }
|
Returns the Node for the next Index on this database row given the Node for any Index .
| 41
| 18
|
155,622
|
private boolean listACLEquals ( List < ACL > lista , List < ACL > listb ) { if ( lista . size ( ) != listb . size ( ) ) { return false ; } for ( int i = 0 ; i < lista . size ( ) ; i ++ ) { ACL a = lista . get ( i ) ; ACL b = listb . get ( i ) ; if ( ! a . equals ( b ) ) { return false ; } } return true ; }
|
compare two list of acls . if there elements are in the same order and the same size then return true else return false
| 106
| 27
|
155,623
|
public synchronized Long convertAcls ( List < ACL > acls ) { if ( acls == null ) return - 1L ; // get the value from the map Long ret = aclKeyMap . get ( acls ) ; // could not find the map if ( ret != null ) return ret ; long val = incrementIndex ( ) ; longKeyMap . put ( val , acls ) ; aclKeyMap . put ( acls , val ) ; return val ; }
|
converts the list of acls to a list of longs .
| 106
| 15
|
155,624
|
public synchronized List < ACL > convertLong ( Long longVal ) { if ( longVal == null ) return null ; if ( longVal == - 1L ) return Ids . OPEN_ACL_UNSAFE ; List < ACL > acls = longKeyMap . get ( longVal ) ; if ( acls == null ) { LOG . error ( "ERROR: ACL not available for long " + longVal ) ; throw new RuntimeException ( "Failed to fetch acls for " + longVal ) ; } return acls ; }
|
converts a list of longs to a list of acls .
| 118
| 15
|
155,625
|
public long approximateDataSize ( ) { long result = 0 ; for ( Map . Entry < String , DataNode > entry : nodes . entrySet ( ) ) { DataNode value = entry . getValue ( ) ; synchronized ( value ) { result += entry . getKey ( ) . length ( ) ; result += ( value . data == null ? 0 : value . data . length ) ; } } return result ; }
|
Get the size of the nodes based on path and data length .
| 87
| 13
|
155,626
|
boolean isSpecialPath ( String path ) { if ( rootZookeeper . equals ( path ) || procZookeeper . equals ( path ) || quotaZookeeper . equals ( path ) ) { return true ; } return false ; }
|
is the path one of the special paths owned by zookeeper .
| 54
| 14
|
155,627
|
public void updateCount ( String lastPrefix , int diff ) { String statNode = Quotas . statPath ( lastPrefix ) ; DataNode node = nodes . get ( statNode ) ; StatsTrack updatedStat = null ; if ( node == null ) { // should not happen LOG . error ( "Missing count node for stat " + statNode ) ; return ; } synchronized ( node ) { updatedStat = new StatsTrack ( new String ( node . data ) ) ; updatedStat . setCount ( updatedStat . getCount ( ) + diff ) ; node . data = updatedStat . toString ( ) . getBytes ( ) ; } // now check if the counts match the quota String quotaNode = Quotas . quotaPath ( lastPrefix ) ; node = nodes . get ( quotaNode ) ; StatsTrack thisStats = null ; if ( node == null ) { // should not happen LOG . error ( "Missing count node for quota " + quotaNode ) ; return ; } synchronized ( node ) { thisStats = new StatsTrack ( new String ( node . data ) ) ; } if ( thisStats . getCount ( ) < updatedStat . getCount ( ) ) { LOG . warn ( "Quota exceeded: " + lastPrefix + " count=" + updatedStat . getCount ( ) + " limit=" + thisStats . getCount ( ) ) ; } }
|
update the count of this stat datanode
| 290
| 9
|
155,628
|
public void deleteNode ( String path , long zxid ) throws KeeperException . NoNodeException { int lastSlash = path . lastIndexOf ( ' ' ) ; String parentName = path . substring ( 0 , lastSlash ) ; String childName = path . substring ( lastSlash + 1 ) ; DataNode node = nodes . get ( path ) ; if ( node == null ) { throw new KeeperException . NoNodeException ( ) ; } nodes . remove ( path ) ; DataNode parent = nodes . get ( parentName ) ; if ( parent == null ) { throw new KeeperException . NoNodeException ( ) ; } synchronized ( parent ) { parent . removeChild ( childName ) ; parent . stat . setCversion ( parent . stat . getCversion ( ) + 1 ) ; parent . stat . setPzxid ( zxid ) ; long eowner = node . stat . getEphemeralOwner ( ) ; if ( eowner != 0 ) { HashSet < String > nodes = ephemerals . get ( eowner ) ; if ( nodes != null ) { synchronized ( nodes ) { nodes . remove ( path ) ; } } } node . parent = null ; } if ( parentName . startsWith ( procZookeeper ) ) { // delete the node in the trie. if ( Quotas . limitNode . equals ( childName ) ) { // we need to update the trie // as well pTrie . deletePath ( parentName . substring ( quotaZookeeper . length ( ) ) ) ; } } // also check to update the quotas for this node String lastPrefix = pTrie . findMaxPrefix ( path ) ; if ( ! rootZookeeper . equals ( lastPrefix ) && ! ( "" . equals ( lastPrefix ) ) ) { // ok we have some match and need to update updateCount ( lastPrefix , - 1 ) ; int bytes = 0 ; synchronized ( node ) { bytes = ( node . data == null ? 0 : - ( node . data . length ) ) ; } updateBytes ( lastPrefix , bytes ) ; } if ( LOG . isTraceEnabled ( ) ) { ZooTrace . logTraceMessage ( LOG , ZooTrace . EVENT_DELIVERY_TRACE_MASK , "dataWatches.triggerWatch " + path ) ; ZooTrace . logTraceMessage ( LOG , ZooTrace . EVENT_DELIVERY_TRACE_MASK , "childWatches.triggerWatch " + parentName ) ; } Set < Watcher > processed = dataWatches . triggerWatch ( path , EventType . NodeDeleted ) ; childWatches . triggerWatch ( path , EventType . NodeDeleted , processed ) ; childWatches . triggerWatch ( parentName . equals ( "" ) ? "/" : parentName , EventType . NodeChildrenChanged ) ; }
|
remove the path from the datatree
| 621
| 8
|
155,629
|
private void getCounts ( String path , Counts counts ) { DataNode node = getNode ( path ) ; if ( node == null ) { return ; } String [ ] children = null ; int len = 0 ; synchronized ( node ) { Set < String > childs = node . getChildren ( ) ; if ( childs != null ) { children = childs . toArray ( new String [ childs . size ( ) ] ) ; } len = ( node . data == null ? 0 : node . data . length ) ; } // add itself counts . count += 1 ; counts . bytes += len ; if ( children == null || children . length == 0 ) { return ; } for ( String child : children ) { getCounts ( path + "/" + child , counts ) ; } }
|
this method gets the count of nodes and the bytes under a subtree
| 168
| 14
|
155,630
|
private void updateQuotaForPath ( String path ) { Counts c = new Counts ( ) ; getCounts ( path , c ) ; StatsTrack strack = new StatsTrack ( ) ; strack . setBytes ( c . bytes ) ; strack . setCount ( c . count ) ; String statPath = Quotas . quotaZookeeper + path + "/" + Quotas . statNode ; DataNode node = getNode ( statPath ) ; // it should exist if ( node == null ) { LOG . warn ( "Missing quota stat node " + statPath ) ; return ; } synchronized ( node ) { node . data = strack . toString ( ) . getBytes ( ) ; } }
|
update the quota for the given path
| 154
| 7
|
155,631
|
private void traverseNode ( String path ) { DataNode node = getNode ( path ) ; String children [ ] = null ; synchronized ( node ) { Set < String > childs = node . getChildren ( ) ; if ( childs != null ) { children = childs . toArray ( new String [ childs . size ( ) ] ) ; } } if ( children != null ) { if ( children . length == 0 ) { // this node does not have a child // is the leaf node // check if its the leaf node String endString = "/" + Quotas . limitNode ; if ( path . endsWith ( endString ) ) { // ok this is the limit node // get the real node and update // the count and the bytes String realPath = path . substring ( Quotas . quotaZookeeper . length ( ) , path . indexOf ( endString ) ) ; updateQuotaForPath ( realPath ) ; this . pTrie . addPath ( realPath ) ; } return ; } for ( String child : children ) { traverseNode ( path + "/" + child ) ; } } }
|
this method traverses the quota path and update the path trie and sets
| 239
| 15
|
155,632
|
private void setupQuota ( ) { String quotaPath = Quotas . quotaZookeeper ; DataNode node = getNode ( quotaPath ) ; if ( node == null ) { return ; } traverseNode ( quotaPath ) ; }
|
this method sets up the path trie and sets up stats for quota nodes
| 51
| 15
|
155,633
|
public void dumpEphemerals ( PrintWriter pwriter ) { Set < Long > keys = ephemerals . keySet ( ) ; pwriter . println ( "Sessions with Ephemerals (" + keys . size ( ) + "):" ) ; for ( long k : keys ) { pwriter . print ( "0x" + Long . toHexString ( k ) ) ; pwriter . println ( ":" ) ; HashSet < String > tmp = ephemerals . get ( k ) ; synchronized ( tmp ) { for ( String path : tmp ) { pwriter . println ( "\t" + path ) ; } } } }
|
Write a text dump of all the ephemerals in the datatree .
| 136
| 16
|
155,634
|
public int getCount ( ) throws InterruptedException , KeeperException { return ByteBuffer . wrap ( m_zk . getData ( m_path , false , null ) ) . getInt ( ) ; }
|
Returns the current count
| 44
| 4
|
155,635
|
public boolean isCountedDown ( ) throws InterruptedException , KeeperException { if ( countedDown ) return true ; int count = ByteBuffer . wrap ( m_zk . getData ( m_path , false , null ) ) . getInt ( ) ; if ( count > 0 ) return false ; countedDown = true ; return true ; }
|
Returns if already counted down to zero
| 73
| 7
|
155,636
|
private void copyTableSchemaFromShared ( ) { for ( SchemaColumn scol : m_sharedScan . getOutputSchema ( ) ) { SchemaColumn copy = new SchemaColumn ( scol . getTableName ( ) , getTableAlias ( ) , scol . getColumnName ( ) , scol . getColumnAlias ( ) , scol . getExpression ( ) , scol . getDifferentiator ( ) ) ; addOutputColumn ( copy ) ; } }
|
Copy the table schema from the shared part to here . We have to repair the table aliases .
| 104
| 19
|
155,637
|
public void harmonizeOutputSchema ( ) { boolean changedCurrent ; boolean changedBase ; boolean changedRecursive = false ; NodeSchema currentSchema = getOutputSchema ( ) ; NodeSchema baseSchema = getBestCostBasePlan ( ) . rootPlanGraph . getTrueOutputSchema ( false ) ; NodeSchema recursiveSchema = ( getBestCostRecursivePlan ( ) == null ) ? null : getBestCostRecursivePlan ( ) . rootPlanGraph . getTrueOutputSchema ( true ) ; // First, make the current schema // the widest. changedCurrent = currentSchema . harmonize ( baseSchema , "Base Query" ) ; if ( recursiveSchema != null ) { // Widen the current schema to the recursive // schema if necessary as well. boolean changedRec = currentSchema . harmonize ( recursiveSchema , "Recursive Query" ) ; changedCurrent = changedCurrent || changedRec ; } // Then change the base and current // schemas. changedBase = baseSchema . harmonize ( currentSchema , "Base Query" ) ; if ( recursiveSchema != null ) { changedRecursive = recursiveSchema . harmonize ( currentSchema , "Recursive Query" ) ; } // If we changed something, update the output schemas // which depend on the one we changed. if ( changedBase ) { getBestCostBasePlan ( ) . rootPlanGraph . getTrueOutputSchema ( true ) ; } if ( changedRecursive ) { getBestCostRecursivePlan ( ) . rootPlanGraph . getTrueOutputSchema ( true ) ; } }
|
We have just planned the base query and perhaps the recursive query . We need to make sure that the output schema of the scan and the output schemas of the base and recursive plans are all compatible .
| 341
| 40
|
155,638
|
private static void complete ( AbstractFuture < ? > future ) { boolean maskExecutorExceptions = future . maskExecutorExceptions ; Listener next = null ; outer : while ( true ) { future . releaseWaiters ( ) ; // We call this before the listeners in order to avoid needing to manage a separate stack data // structure for them. // afterDone() should be generally fast and only used for cleanup work... but in theory can // also be recursive and create StackOverflowErrors future . afterDone ( ) ; // push the current set of listeners onto next next = future . clearListeners ( next ) ; future = null ; while ( next != null ) { Listener curr = next ; next = next . next ; Runnable task = curr . task ; if ( task instanceof AbstractFuture . SetFuture ) { AbstractFuture . SetFuture < ? > setFuture = ( AbstractFuture . SetFuture ) task ; // We unwind setFuture specifically to avoid StackOverflowErrors in the case of long // chains of SetFutures // Handling this special case is important because there is no way to pass an executor to // setFuture, so a user couldn't break the chain by doing this themselves. It is also // potentially common if someone writes a recursive Futures.transformAsync transformer. future = setFuture . owner ; if ( future . value == setFuture ) { Object valueToSet = getFutureValue ( setFuture . future ) ; if ( ATOMIC_HELPER . casValue ( future , setFuture , valueToSet ) ) { continue outer ; } } // other wise the future we were trying to set is already done. } else { executeListener ( task , curr . executor , maskExecutorExceptions ) ; } } break ; } }
|
Unblocks all threads and runs all listeners .
| 376
| 9
|
155,639
|
public void addPath ( String path ) { if ( path == null ) { return ; } String [ ] pathComponents = path . split ( "/" ) ; TrieNode parent = rootNode ; String part = null ; if ( pathComponents . length <= 1 ) { throw new IllegalArgumentException ( "Invalid path " + path ) ; } for ( int i = 1 ; i < pathComponents . length ; i ++ ) { part = pathComponents [ i ] ; if ( parent . getChild ( part ) == null ) { parent . addChild ( part , new TrieNode ( parent ) ) ; } parent = parent . getChild ( part ) ; } parent . setProperty ( true ) ; }
|
add a path to the path trie
| 152
| 8
|
155,640
|
public void deletePath ( String path ) { if ( path == null ) { return ; } String [ ] pathComponents = path . split ( "/" ) ; TrieNode parent = rootNode ; String part = null ; if ( pathComponents . length <= 1 ) { throw new IllegalArgumentException ( "Invalid path " + path ) ; } for ( int i = 1 ; i < pathComponents . length ; i ++ ) { part = pathComponents [ i ] ; if ( parent . getChild ( part ) == null ) { //the path does not exist return ; } parent = parent . getChild ( part ) ; LOG . info ( parent ) ; } TrieNode realParent = parent . getParent ( ) ; realParent . deleteChild ( part ) ; }
|
delete a path from the trie
| 165
| 7
|
155,641
|
public String findMaxPrefix ( String path ) { if ( path == null ) { return null ; } if ( "/" . equals ( path ) ) { return path ; } String [ ] pathComponents = path . split ( "/" ) ; TrieNode parent = rootNode ; List < String > components = new ArrayList < String > ( ) ; if ( pathComponents . length <= 1 ) { throw new IllegalArgumentException ( "Invalid path " + path ) ; } int i = 1 ; String part = null ; StringBuilder sb = new StringBuilder ( ) ; int lastindex = - 1 ; while ( ( i < pathComponents . length ) ) { if ( parent . getChild ( pathComponents [ i ] ) != null ) { part = pathComponents [ i ] ; parent = parent . getChild ( part ) ; components . add ( part ) ; if ( parent . getProperty ( ) ) { lastindex = i - 1 ; } } else { break ; } i ++ ; } for ( int j = 0 ; j < ( lastindex + 1 ) ; j ++ ) { sb . append ( "/" + components . get ( j ) ) ; } return sb . toString ( ) ; }
|
return the largest prefix for the input path .
| 262
| 9
|
155,642
|
public static VoltTable tableFromShorthand ( String schema ) { String name = "T" ; VoltTable . ColumnInfo [ ] columns = null ; // get a name Matcher nameMatcher = m_namePattern . matcher ( schema ) ; if ( nameMatcher . find ( ) ) { name = nameMatcher . group ( ) . trim ( ) ; } // get the column schema Matcher columnDataMatcher = m_columnsPattern . matcher ( schema ) ; if ( ! columnDataMatcher . find ( ) ) { throw new IllegalArgumentException ( "No column data found in shorthand" ) ; } String [ ] columnData = columnDataMatcher . group ( ) . trim ( ) . split ( "\\s*,\\s*" ) ; int columnCount = columnData . length ; columns = new VoltTable . ColumnInfo [ columnCount ] ; for ( int i = 0 ; i < columnCount ; i ++ ) { columns [ i ] = parseColumnShorthand ( columnData [ i ] , i ) ; } // get the pkey Matcher pkeyMatcher = m_pkeyPattern . matcher ( schema ) ; int [ ] pkeyIndexes = new int [ 0 ] ; // default no pkey if ( pkeyMatcher . find ( ) ) { String [ ] pkeyColData = pkeyMatcher . group ( ) . trim ( ) . split ( "\\s*,\\s*" ) ; pkeyIndexes = new int [ pkeyColData . length ] ; for ( int pkeyIndex = 0 ; pkeyIndex < pkeyColData . length ; pkeyIndex ++ ) { String pkeyCol = pkeyColData [ pkeyIndex ] ; // numeric means index of column if ( Character . isDigit ( pkeyCol . charAt ( 0 ) ) ) { int colIndex = Integer . parseInt ( pkeyCol ) ; pkeyIndexes [ pkeyIndex ] = colIndex ; } else { for ( int colIndex = 0 ; colIndex < columnCount ; colIndex ++ ) { if ( columns [ colIndex ] . name . equals ( pkeyCol ) ) { pkeyIndexes [ pkeyIndex ] = colIndex ; break ; } } } } } // get any partitioning Matcher partitionMatcher = m_partitionPattern . matcher ( schema ) ; int partitionColumnIndex = - 1 ; // default to replicated if ( partitionMatcher . find ( ) ) { String partitionColStr = partitionMatcher . group ( ) . trim ( ) ; // numeric means index of column if ( Character . isDigit ( partitionColStr . charAt ( 0 ) ) ) { partitionColumnIndex = Integer . parseInt ( partitionColStr ) ; } else { for ( int colIndex = 0 ; colIndex < columnCount ; colIndex ++ ) { if ( columns [ colIndex ] . name . equals ( partitionColStr ) ) { partitionColumnIndex = colIndex ; break ; } } } assert ( partitionColumnIndex != - 1 ) : "Regex match here means there is a partitioning column" ; } VoltTable table = new VoltTable ( new VoltTable . ExtraMetadata ( name , partitionColumnIndex , pkeyIndexes , columns ) , columns , columns . length ) ; return table ; }
|
Parse the shorthand according to the syntax as described in the class comment .
| 703
| 15
|
155,643
|
private static void swap ( Object [ ] w , int a , int b ) { Object t = w [ a ] ; w [ a ] = w [ b ] ; w [ b ] = t ; }
|
Swaps the a th and b th elements of the specified Row array .
| 43
| 15
|
155,644
|
synchronized void insertRowInTable ( final VoltBulkLoaderRow nextRow ) throws InterruptedException { m_partitionRowQueue . put ( nextRow ) ; if ( m_partitionRowQueue . size ( ) == m_minBatchTriggerSize ) { m_es . execute ( new Runnable ( ) { @ Override public void run ( ) { try { while ( m_partitionRowQueue . size ( ) >= m_minBatchTriggerSize ) { loadTable ( buildTable ( ) , m_table ) ; } } catch ( Exception e ) { loaderLog . error ( "Failed to load batch" , e ) ; } } } ) ; } }
|
Synchronized so that when the a single batch is filled up we only queue one task to drain the queue . The task will drain the queue until it doesn t contain a single batch .
| 148
| 38
|
155,645
|
public static List < Field > getFields ( Class < ? > startClass ) { List < Field > currentClassFields = new ArrayList < Field > ( ) ; currentClassFields . addAll ( Arrays . asList ( startClass . getDeclaredFields ( ) ) ) ; Class < ? > parentClass = startClass . getSuperclass ( ) ; if ( parentClass != null ) { List < Field > parentClassFields = ( List < Field > ) getFields ( parentClass ) ; currentClassFields . addAll ( parentClassFields ) ; } return currentClassFields ; }
|
get all the fields including parents
| 132
| 6
|
155,646
|
public static synchronized void initialize ( int myHostId , CatalogContext catalogContext , HostMessenger messenger ) throws BundleException , IOException { ImporterStatsCollector statsCollector = new ImporterStatsCollector ( myHostId ) ; ImportManager em = new ImportManager ( myHostId , messenger , statsCollector ) ; VoltDB . instance ( ) . getStatsAgent ( ) . registerStatsSource ( StatsSelector . IMPORTER , myHostId , statsCollector ) ; m_self = em ; em . create ( catalogContext ) ; }
|
Create the singleton ImportManager and initialize .
| 117
| 9
|
155,647
|
private synchronized void create ( CatalogContext catalogContext ) { try { Map < String , ImportConfiguration > newProcessorConfig = loadNewConfigAndBundles ( catalogContext ) ; restartImporters ( newProcessorConfig ) ; } catch ( final Exception e ) { VoltDB . crashLocalVoltDB ( "Error creating import processor" , true , e ) ; } }
|
This creates a import connector from configuration provided .
| 79
| 9
|
155,648
|
private Map < String , ImportConfiguration > loadNewConfigAndBundles ( CatalogContext catalogContext ) { Map < String , ImportConfiguration > newProcessorConfig ; ImportType importElement = catalogContext . getDeployment ( ) . getImport ( ) ; if ( importElement == null || importElement . getConfiguration ( ) . isEmpty ( ) ) { newProcessorConfig = new HashMap <> ( ) ; } else { newProcessorConfig = CatalogUtil . getImportProcessorConfig ( importElement ) ; } Iterator < Map . Entry < String , ImportConfiguration > > iter = newProcessorConfig . entrySet ( ) . iterator ( ) ; while ( iter . hasNext ( ) ) { String configName = iter . next ( ) . getKey ( ) ; ImportConfiguration importConfig = newProcessorConfig . get ( configName ) ; Properties properties = importConfig . getmoduleProperties ( ) ; String importBundleJar = properties . getProperty ( ImportDataProcessor . IMPORT_MODULE ) ; Preconditions . checkNotNull ( importBundleJar , "Import source is undefined or custom import plugin class missing." ) ; if ( ! importConfig . checkProcedures ( catalogContext , importLog , configName ) ) { iter . remove ( ) ; continue ; } // NOTE: if bundle is already loaded, loadImporterBundle does nothing and returns true boolean bundlePresent = loadImporterBundle ( properties ) ; if ( ! bundlePresent ) { iter . remove ( ) ; } } m_formatterFactories . clear ( ) ; for ( ImportConfiguration config : newProcessorConfig . values ( ) ) { Map < String , FormatterBuilder > formatters = config . getFormatterBuilders ( ) ; if ( formatters != null ) { try { for ( FormatterBuilder builder : formatters . values ( ) ) { String module = builder . getFormatterProperties ( ) . getProperty ( ImportDataProcessor . IMPORT_FORMATTER ) ; AbstractFormatterFactory formatterFactory = m_formatterFactories . get ( module ) ; if ( formatterFactory == null ) { URI moduleURI = URI . create ( module ) ; formatterFactory = m_moduleManager . getService ( moduleURI , AbstractFormatterFactory . class ) ; if ( formatterFactory == null ) { VoltDB . crashLocalVoltDB ( "Failed to initialize formatter from: " + module ) ; } m_formatterFactories . put ( module , formatterFactory ) ; } builder . setFormatterFactory ( formatterFactory ) ; } } catch ( Throwable t ) { VoltDB . crashLocalVoltDB ( "Failed to initialize formatter." ) ; } } } importLog . info ( "Final importer count:" + newProcessorConfig . size ( ) ) ; return newProcessorConfig ; }
|
Parses importer configs and loads the formatters and bundles needed into memory . This is used to generate a new configuration either to load or to compare with existing .
| 608
| 35
|
155,649
|
private boolean loadImporterBundle ( Properties moduleProperties ) { String importModuleName = moduleProperties . getProperty ( ImportDataProcessor . IMPORT_MODULE ) ; String attrs [ ] = importModuleName . split ( "\\|" ) ; String bundleJar = attrs [ 1 ] ; String moduleType = attrs [ 0 ] ; try { AbstractImporterFactory importerFactory = m_loadedBundles . get ( bundleJar ) ; if ( importerFactory == null ) { if ( moduleType . equalsIgnoreCase ( "osgi" ) ) { URI bundleURI = URI . create ( bundleJar ) ; importerFactory = m_moduleManager . getService ( bundleURI , AbstractImporterFactory . class ) ; if ( importerFactory == null ) { importLog . error ( "Failed to initialize importer from: " + bundleJar ) ; return false ; } } else { // class based importer. Class < ? > reference = this . getClass ( ) . getClassLoader ( ) . loadClass ( bundleJar ) ; if ( reference == null ) { importLog . error ( "Failed to initialize importer from: " + bundleJar ) ; return false ; } importerFactory = ( AbstractImporterFactory ) reference . newInstance ( ) ; } String importerType = importerFactory . getTypeName ( ) ; if ( importerType == null || importerType . trim ( ) . isEmpty ( ) ) { throw new RuntimeException ( "Importer must implement and return a valid unique name." ) ; } Preconditions . checkState ( ! m_importersByType . containsKey ( importerType ) , "Importer must implement and return a valid unique name: " + importerType ) ; m_importersByType . put ( importerType , importerFactory ) ; m_loadedBundles . put ( bundleJar , importerFactory ) ; } } catch ( Throwable t ) { importLog . error ( "Failed to configure import handler for " + bundleJar , t ) ; Throwables . propagate ( t ) ; } return true ; }
|
Checks if the module for importer has been loaded in the memory . If bundle doesn t exists it loades one and updates the mapping records of the bundles .
| 456
| 33
|
155,650
|
protected static void printCaughtException ( String exceptionMessage ) { if ( ++ countCaughtExceptions <= MAX_CAUGHT_EXCEPTION_MESSAGES ) { System . out . println ( exceptionMessage ) ; } if ( countCaughtExceptions == MAX_CAUGHT_EXCEPTION_MESSAGES ) { System . out . println ( "In NonVoltDBBackend, reached limit of " + MAX_CAUGHT_EXCEPTION_MESSAGES + " exception messages to be printed." ) ; } }
|
Print a message about an Exception that was caught ; but limit the number of such print messages so that the console is not swamped by them .
| 119
| 29
|
155,651
|
protected List < String > getAllColumns ( String tableName ) { List < String > columns = new ArrayList < String > ( ) ; try { // Lower-case table names are required for PostgreSQL; we might need to // alter this if we use another comparison database (besides HSQL) someday ResultSet rs = dbconn . getMetaData ( ) . getColumns ( null , null , tableName . toLowerCase ( ) , null ) ; while ( rs . next ( ) ) { columns . add ( rs . getString ( 4 ) ) ; } } catch ( SQLException e ) { printCaughtException ( "In NonVoltDBBackend.getAllColumns, caught SQLException: " + e ) ; } return columns ; }
|
Returns all column names for the specified table in the order defined in the DDL .
| 165
| 17
|
155,652
|
protected List < String > getPrimaryKeys ( String tableName ) { List < String > pkCols = new ArrayList < String > ( ) ; try { // Lower-case table names are required for PostgreSQL; we might need to // alter this if we use another comparison database (besides HSQL) someday ResultSet rs = dbconn . getMetaData ( ) . getPrimaryKeys ( null , null , tableName . toLowerCase ( ) ) ; while ( rs . next ( ) ) { pkCols . add ( rs . getString ( 4 ) ) ; } } catch ( SQLException e ) { printCaughtException ( "In NonVoltDBBackend.getPrimaryKeys, caught SQLException: " + e ) ; } return pkCols ; }
|
Returns all primary key column names for the specified table in the order defined in the DDL .
| 170
| 19
|
155,653
|
protected List < String > getNonPrimaryKeyColumns ( String tableName ) { List < String > columns = getAllColumns ( tableName ) ; columns . removeAll ( getPrimaryKeys ( tableName ) ) ; return columns ; }
|
Returns all non - primary - key column names for the specified table in the order defined in the DDL .
| 50
| 22
|
155,654
|
protected String transformQuery ( String query , QueryTransformer ... qts ) { String result = query ; for ( QueryTransformer qt : qts ) { result = transformQuery ( result , qt ) ; } return result ; }
|
Calls the transformQuery method above multiple times for each specified QueryTransformer .
| 49
| 16
|
155,655
|
static protected void printTransformedSql ( String originalSql , String modifiedSql ) { if ( transformedSqlFileWriter != null && ! originalSql . equals ( modifiedSql ) ) { try { transformedSqlFileWriter . write ( "original SQL: " + originalSql + "\n" ) ; transformedSqlFileWriter . write ( "modified SQL: " + modifiedSql + "\n" ) ; } catch ( IOException e ) { printCaughtException ( "Caught IOException:\n " + e + "\noriginal SQL: " + originalSql + "\nmodified SQL: " + modifiedSql ) ; } } }
|
Prints the original and modified SQL statements to the Transformed SQL output file assuming that that file is defined ; and only if the original and modified SQL are not the same i . e . only if some transformation has indeed taken place .
| 141
| 47
|
155,656
|
private static SQLPatternPart makeGroup ( boolean capture , String captureLabel , SQLPatternPart part ) { // Need an outer part if capturing something that's already a group (capturing or not) boolean alreadyGroup = ( part . m_flags & ( SQLPatternFactory . GROUP | SQLPatternFactory . CAPTURE ) ) != 0 ; SQLPatternPart retPart = alreadyGroup ? new SQLPatternPartElement ( part ) : part ; if ( capture ) { retPart . m_flags |= SQLPatternFactory . CAPTURE ; retPart . setCaptureLabel ( captureLabel ) ; } else { retPart . m_flags |= SQLPatternFactory . GROUP ; } return retPart ; }
|
Make a capturing or non - capturing group
| 145
| 8
|
155,657
|
public static HSQLInterface loadHsqldb ( ParameterStateManager psMgr ) { // Specifically set the timezone to UTC to avoid the default usage local timezone in HSQL. // This ensures that all VoltDB data paths use the same timezone for representing time. TimeZone . setDefault ( TimeZone . getTimeZone ( "GMT+0" ) ) ; String name = "hsqldbinstance-" + String . valueOf ( instanceId ) + "-" + String . valueOf ( System . currentTimeMillis ( ) ) ; instanceId ++ ; HsqlProperties props = new HsqlProperties ( ) ; try { Session sessionProxy = DatabaseManager . newSession ( DatabaseURL . S_MEM , name , "SA" , "" , props , 0 ) ; // make HSQL case insensitive sessionProxy . executeDirectStatement ( "SET IGNORECASE TRUE;" ) ; sessionProxy . setParameterStateManager ( psMgr ) ; return new HSQLInterface ( sessionProxy ) ; } catch ( HsqlException caught ) { m_logger . warn ( "Unexpected error initializing the SQL parser" , caught ) ; caught . printStackTrace ( ) ; throw caught ; } }
|
Load up an HSQLDB in - memory instance .
| 258
| 11
|
155,658
|
public VoltXMLDiff runDDLCommandAndDiff ( HSQLDDLInfo stmtInfo , String ddl ) throws HSQLParseException { // name of the table we're going to have to diff (if any) String expectedTableAffected = null ; // If we fail to pre-process a statement, then we want to fail, but we're // still going to run the statement through HSQL to get its error message. // This variable helps us make sure we don't fail to preprocess and then // succeed at runnign the statement through HSQL. boolean expectFailure = false ; // If cascade, we're going to need to look for any views that might have // gotten deleted. So get a list of all tables and views that existed before // we run the ddl, then we'll do a comparison later. Set < String > existingTableNames = null ; if ( stmtInfo != null ) { if ( stmtInfo . cascade ) { existingTableNames = getTableNames ( ) ; } // we either have an index name or a table/view name, but not both if ( stmtInfo . noun == HSQLDDLInfo . Noun . INDEX ) { if ( stmtInfo . verb == HSQLDDLInfo . Verb . CREATE ) { expectedTableAffected = stmtInfo . secondName ; } else { expectedTableAffected = tableNameForIndexName ( stmtInfo . name ) ; } } else { expectedTableAffected = stmtInfo . name ; } // Note that we're assuming ifexists can't happen with "create" expectFailure = ( expectedTableAffected == null ) && ! stmtInfo . ifexists ; } else { expectFailure = true ; } runDDLCommand ( ddl ) ; // If we expect to fail, but the statement above didn't bail... // (Shouldn't get here ever I think) if ( expectFailure ) { throw new HSQLParseException ( "Unable to plan statement due to VoltDB DDL pre-processing error" ) ; } // sanity checks for non-failure assert ( stmtInfo != null ) ; // get old and new XML representations for the affected table VoltXMLElement tableXMLNew = null , tableXMLOld = null ; if ( expectedTableAffected != null ) { tableXMLNew = getXMLForTable ( expectedTableAffected ) ; tableXMLOld = lastSchema . get ( expectedTableAffected ) ; } // valid reasons for tableXMLNew to be null are DROP IF EXISTS and not much else if ( tableXMLNew == null ) { tableXMLNew = emptySchema ; } // the old table can be null for CREATE TABLE or for IF EXISTS stuff if ( tableXMLOld == null ) { tableXMLOld = emptySchema ; } VoltXMLDiff diff = VoltXMLElement . computeDiff ( tableXMLOld , tableXMLNew ) ; // now find any views that might be missing and make sure the diff reflects that // they're gone if ( stmtInfo . cascade ) { Set < String > finalTableNames = getTableNames ( ) ; for ( String tableName : existingTableNames ) { if ( ! finalTableNames . contains ( tableName ) ) { tableName = tableName . toLowerCase ( ) ; tableXMLOld = lastSchema . get ( tableName ) . children . get ( 0 ) ; lastSchema . remove ( tableName ) ; if ( tableName . equals ( expectedTableAffected ) ) { continue ; } diff . m_removedElements . add ( tableXMLOld ) ; } } } // this is a hack to allow the diff-apply-er to accept a diff that has no order diff . m_elementOrder . clear ( ) ; // remember the current schema if ( expectedTableAffected != null ) { lastSchema . put ( expectedTableAffected , tableXMLNew . duplicate ( ) ) ; } return diff ; }
|
Modify the current schema with a SQL DDL command and get the diff which represents the changes .
| 866
| 20
|
155,659
|
public void runDDLCommand ( String ddl ) throws HSQLParseException { sessionProxy . clearLocalTables ( ) ; Result result = sessionProxy . executeDirectStatement ( ddl ) ; if ( result . hasError ( ) ) { throw new HSQLParseException ( result . getMainString ( ) ) ; } }
|
Modify the current schema with a SQL DDL command .
| 72
| 12
|
155,660
|
private void fixupInStatementExpressions ( VoltXMLElement expr ) throws HSQLParseException { if ( doesExpressionReallyMeanIn ( expr ) ) { inFixup ( expr ) ; // can't return because in with subquery can be nested } // recursive hunt for ( VoltXMLElement child : expr . children ) { fixupInStatementExpressions ( child ) ; } }
|
Recursively find all in - lists subquery row comparisons found in the XML and munge them into the simpler thing we want to pass to the AbstractParsedStmt .
| 84
| 37
|
155,661
|
private void inFixup ( VoltXMLElement inElement ) { // make this an in expression inElement . name = "operation" ; inElement . attributes . put ( "optype" , "in" ) ; VoltXMLElement rowElem = null ; VoltXMLElement tableElem = null ; VoltXMLElement subqueryElem = null ; VoltXMLElement valueElem = null ; for ( VoltXMLElement child : inElement . children ) { if ( child . name . equals ( "row" ) ) { rowElem = child ; } else if ( child . name . equals ( "table" ) ) { tableElem = child ; } else if ( child . name . equals ( "tablesubquery" ) ) { subqueryElem = child ; } else if ( child . name . equals ( "value" ) ) { valueElem = child ; } } VoltXMLElement inlist ; if ( tableElem != null ) { // make the table expression an in-list inlist = new VoltXMLElement ( "vector" ) ; for ( VoltXMLElement child : tableElem . children ) { assert ( child . name . equals ( "row" ) ) ; assert ( child . children . size ( ) == 1 ) ; inlist . children . addAll ( child . children ) ; } } else if ( subqueryElem != null ) { inlist = subqueryElem ; } else { assert valueElem != null ; inlist = valueElem ; } assert ( rowElem != null ) ; assert ( inlist != null ) ; inElement . children . clear ( ) ; // add the row inElement . children . add ( rowElem ) ; // add the inlist inElement . children . add ( inlist ) ; }
|
Take an equality - test expression that represents in - list and munge it into the simpler thing we want to output to the AbstractParsedStmt for its AbstractExpression classes .
| 385
| 38
|
155,662
|
@ SuppressWarnings ( "unused" ) private void printTables ( ) { try { String schemaName = sessionProxy . getSchemaName ( null ) ; System . out . println ( "*** Tables For Schema: " + schemaName + " ***" ) ; } catch ( HsqlException caught ) { caught . printStackTrace ( ) ; } // load all the tables HashMappedList hsqlTables = getHSQLTables ( ) ; for ( int i = 0 ; i < hsqlTables . size ( ) ; i ++ ) { Table table = ( Table ) hsqlTables . get ( i ) ; System . out . println ( table . getName ( ) . name ) ; } }
|
Debug - only method that prints out the names of all tables in the current schema .
| 157
| 17
|
155,663
|
public VoltXMLElement getXMLForTable ( String tableName ) throws HSQLParseException { VoltXMLElement xml = emptySchema . duplicate ( ) ; // search all the tables XXX probably could do this non-linearly, // but i don't know about case-insensitivity yet HashMappedList hsqlTables = getHSQLTables ( ) ; for ( int i = 0 ; i < hsqlTables . size ( ) ; i ++ ) { Table table = ( Table ) hsqlTables . get ( i ) ; String candidateTableName = table . getName ( ) . name ; // found the table of interest if ( candidateTableName . equalsIgnoreCase ( tableName ) ) { VoltXMLElement vxmle = table . voltGetTableXML ( sessionProxy ) ; assert ( vxmle != null ) ; xml . children . add ( vxmle ) ; return xml ; } } return null ; }
|
Get a serialized XML representation of a particular table .
| 206
| 11
|
155,664
|
private void calculateTrackers ( Collection < TopicPartition > partitions ) { Map < TopicPartition , CommitTracker > trackers = new HashMap <> ( ) ; trackers . putAll ( m_trackerMap . get ( ) ) ; Map < TopicPartition , AtomicLong > lastCommittedOffSets = new HashMap <> ( ) ; lastCommittedOffSets . putAll ( m_lastCommittedOffSets . get ( ) ) ; boolean newTopicPartition = false ; for ( TopicPartition partition : partitions ) { if ( m_trackerMap . get ( ) . get ( partition ) != null ) { continue ; } newTopicPartition = true ; long startOffset = - 1L ; CommitTracker commitTracker = null ; if ( m_config . getCommitPolicy ( ) == KafkaCommitPolicy . TIME && m_config . getTriggerValue ( ) > 0 ) { commitTracker = new SimpleTracker ( ) ; } else { commitTracker = new DurableTracker ( KafkaConstants . IMPORT_GAP_LEAD , partition . topic ( ) , partition . partition ( ) , m_config . getGroupId ( ) ) ; } trackers . put ( partition , commitTracker ) ; try { OffsetAndMetadata offsetAndMetaData = m_consumer . committed ( partition ) ; startOffset = offsetAndMetaData != null ? offsetAndMetaData . offset ( ) : - 1L ; if ( startOffset > - 1L ) { commitTracker . resetTo ( startOffset ) ; } } catch ( KafkaException e ) { LOGGER . error ( "Failed to read committed offsets for group " + m_config . getGroupId ( ) + partition + " " + e . getMessage ( ) ) ; } lastCommittedOffSets . put ( partition , new AtomicLong ( startOffset ) ) ; m_pauseOffsets . put ( partition , new AtomicLong ( - 1 ) ) ; m_workTrackers . put ( partition , new PendingWorkTracker ( ) ) ; if ( LOGGER . isDebugEnabled ( ) ) { LOGGER . debug ( "Starting offset for group:" + m_config . getGroupId ( ) + ":" + startOffset + " partition:" + partition ) ; } } if ( newTopicPartition ) { m_trackerMap . set ( trackers ) ; m_lastCommittedOffSets . set ( lastCommittedOffSets ) ; } }
|
add trackers for new topic - partition in this importer
| 525
| 12
|
155,665
|
private void seek ( List < TopicPartition > seekList ) { for ( TopicPartition tp : seekList ) { AtomicLong lastCommittedOffset = m_lastCommittedOffSets . get ( ) . get ( tp ) ; if ( lastCommittedOffset != null && lastCommittedOffset . get ( ) > - 1L ) { AtomicLong lastSeeked = m_lastSeekedOffSets . get ( tp ) ; //eliminate duplicate seek if ( lastSeeked != null && lastSeeked . get ( ) == lastCommittedOffset . get ( ) ) { continue ; } m_consumer . seek ( tp , lastCommittedOffset . longValue ( ) ) ; m_lastSeekedOffSets . put ( tp , new AtomicLong ( lastCommittedOffset . get ( ) ) ) ; if ( LOGGER . isDebugEnabled ( ) ) { LOGGER . debug ( "Moves offset for group " + m_config . getGroupId ( ) + " -" + tp + " to " + lastCommittedOffset ) ; } } } }
|
Move offsets to correct positions for next poll
| 241
| 8
|
155,666
|
public static String toZeroPaddedString ( long value , int precision , int maxSize ) { StringBuffer sb = new StringBuffer ( ) ; if ( value < 0 ) { value = - value ; } String s = Long . toString ( value ) ; if ( s . length ( ) > precision ) { s = s . substring ( precision ) ; } for ( int i = s . length ( ) ; i < precision ; i ++ ) { sb . append ( ' ' ) ; } sb . append ( s ) ; if ( maxSize < precision ) { sb . setLength ( maxSize ) ; } return sb . toString ( ) ; }
|
If necessary adds zeros to the beginning of a value so that the total length matches the given precision otherwise trims the right digits . Then if maxSize is smaller than precision trims the right digits to maxSize . Negative values are treated as positive
| 143
| 50
|
155,667
|
public static String toLowerSubset ( String source , char substitute ) { int len = source . length ( ) ; StringBuffer sb = new StringBuffer ( len ) ; char ch ; for ( int i = 0 ; i < len ; i ++ ) { ch = source . charAt ( i ) ; if ( ! Character . isLetterOrDigit ( ch ) ) { sb . append ( substitute ) ; } else if ( ( i == 0 ) && Character . isDigit ( ch ) ) { sb . append ( substitute ) ; } else { sb . append ( Character . toLowerCase ( ch ) ) ; } } return sb . toString ( ) ; }
|
Returns a string with non alphanumeric chars converted to the substitute character . A digit first character is also converted . By sqlbob
| 145
| 27
|
155,668
|
public static String arrayToString ( Object array ) { int len = Array . getLength ( array ) ; int last = len - 1 ; StringBuffer sb = new StringBuffer ( 2 * ( len + 1 ) ) ; sb . append ( ' ' ) ; for ( int i = 0 ; i < len ; i ++ ) { sb . append ( Array . get ( array , i ) ) ; if ( i != last ) { sb . append ( ' ' ) ; } } sb . append ( ' ' ) ; return sb . toString ( ) ; }
|
Builds a bracketed CSV list from the array
| 123
| 10
|
155,669
|
public static void appendPair ( StringBuffer b , String s1 , String s2 , String separator , String terminator ) { b . append ( s1 ) ; b . append ( separator ) ; b . append ( s2 ) ; b . append ( terminator ) ; }
|
Appends a pair of string to the string buffer using the separator between and terminator at the end
| 61
| 21
|
155,670
|
public static int rightTrimSize ( String s ) { int i = s . length ( ) ; while ( i > 0 ) { i -- ; if ( s . charAt ( i ) != ' ' ) { return i + 1 ; } } return 0 ; }
|
Returns the size of substring that does not contain any trailing spaces
| 56
| 13
|
155,671
|
public static int skipSpaces ( String s , int start ) { int limit = s . length ( ) ; int i = start ; for ( ; i < limit ; i ++ ) { if ( s . charAt ( i ) != ' ' ) { break ; } } return i ; }
|
Skips any spaces at or after start and returns the index of first non - space character ;
| 61
| 19
|
155,672
|
public static String [ ] split ( String s , String separator ) { HsqlArrayList list = new HsqlArrayList ( ) ; int currindex = 0 ; for ( boolean more = true ; more ; ) { int nextindex = s . indexOf ( separator , currindex ) ; if ( nextindex == - 1 ) { nextindex = s . length ( ) ; more = false ; } list . add ( s . substring ( currindex , nextindex ) ) ; currindex = nextindex + separator . length ( ) ; } return ( String [ ] ) list . toArray ( new String [ list . size ( ) ] ) ; }
|
Splits the string into an array using the separator . If separator is not found in the string the whole string is returned in the array .
| 143
| 30
|
155,673
|
@ Override protected void populateColumnSchema ( ArrayList < ColumnInfo > columns ) { super . populateColumnSchema ( columns ) ; columns . add ( new ColumnInfo ( VoltSystemProcedure . CNAME_SITE_ID , VoltSystemProcedure . CTYPE_ID ) ) ; columns . add ( new ColumnInfo ( Columns . PARTITION_ID , VoltType . BIGINT ) ) ; columns . add ( new ColumnInfo ( Columns . SOURCE_NAME , VoltType . STRING ) ) ; columns . add ( new ColumnInfo ( Columns . EXPORT_TARGET , VoltType . STRING ) ) ; columns . add ( new ColumnInfo ( Columns . ACTIVE , VoltType . STRING ) ) ; columns . add ( new ColumnInfo ( Columns . TUPLE_COUNT , VoltType . BIGINT ) ) ; columns . add ( new ColumnInfo ( Columns . TUPLE_PENDING , VoltType . BIGINT ) ) ; columns . add ( new ColumnInfo ( Columns . LAST_QUEUED_TIMESTAMP , VoltType . TIMESTAMP ) ) ; columns . add ( new ColumnInfo ( Columns . LAST_ACKED_TIMESTAMP , VoltType . TIMESTAMP ) ) ; columns . add ( new ColumnInfo ( Columns . AVERAGE_LATENCY , VoltType . BIGINT ) ) ; columns . add ( new ColumnInfo ( Columns . MAX_LATENCY , VoltType . BIGINT ) ) ; columns . add ( new ColumnInfo ( Columns . QUEUE_GAP , VoltType . BIGINT ) ) ; columns . add ( new ColumnInfo ( Columns . STATUS , VoltType . STRING ) ) ; }
|
Check cluster . py and checkstats . py if order of the columns is changed
| 375
| 16
|
155,674
|
private void offerInternal ( Mailbox mailbox , Item item , long handle ) { m_bufferedReads . add ( item ) ; releaseBufferedReads ( mailbox , handle ) ; }
|
SPI offers a new message .
| 40
| 7
|
155,675
|
public long sizeInBytes ( ) throws IOException { long memoryBlockUsage = 0 ; for ( StreamBlock b : m_memoryDeque ) { //Use only total size, but throw in the USO //to make book keeping consistent when flushed to disk //Also dont count persisted blocks. memoryBlockUsage += b . totalSize ( ) ; } //Subtract USO from on disk size return memoryBlockUsage + m_reader . sizeInBytes ( ) - ( StreamBlock . HEADER_SIZE * m_reader . getNumObjects ( ) ) ; }
|
Only used in tests should be removed .
| 119
| 8
|
155,676
|
public void truncateToSequenceNumber ( final long truncationSeqNo ) throws IOException { assert ( m_memoryDeque . isEmpty ( ) ) ; m_persistentDeque . parseAndTruncate ( new BinaryDequeTruncator ( ) { @ Override public TruncatorResponse parse ( BBContainer bbc ) { ByteBuffer b = bbc . b ( ) ; ByteOrder endianness = b . order ( ) ; b . order ( ByteOrder . LITTLE_ENDIAN ) ; try { final long startSequenceNumber = b . getLong ( ) ; // If after the truncation point is the first row in the block, the entire block is to be discarded if ( startSequenceNumber > truncationSeqNo ) { return PersistentBinaryDeque . fullTruncateResponse ( ) ; } final long committedSequenceNumber = b . getLong ( ) ; // committedSequenceNumber final int tupleCountPos = b . position ( ) ; final int tupleCount = b . getInt ( ) ; // There is nothing to do with this buffer final long lastSequenceNumber = startSequenceNumber + tupleCount - 1 ; if ( lastSequenceNumber <= truncationSeqNo ) { return null ; } b . getLong ( ) ; // uniqueId // Partial truncation int offset = 0 ; while ( b . hasRemaining ( ) ) { if ( startSequenceNumber + offset > truncationSeqNo ) { // The sequence number of this row is the greater than the truncation sequence number. // Don't want this row, but want to preserve all rows before it. // Move back before the row length prefix, txnId and header // Return everything in the block before the truncation point. // Indicate this is the end of the interesting data. b . limit ( b . position ( ) ) ; // update tuple count in the header b . putInt ( tupleCountPos , offset - 1 ) ; b . position ( 0 ) ; return new ByteBufferTruncatorResponse ( b ) ; } offset ++ ; // Not the row we are looking to truncate at. Skip past it (row length + row length field). final int rowLength = b . getInt ( ) ; b . position ( b . position ( ) + rowLength ) ; } return null ; } finally { b . order ( endianness ) ; } } } ) ; // close reopen reader m_persistentDeque . close ( ) ; CatalogContext catalogContext = VoltDB . instance ( ) . getCatalogContext ( ) ; Table streamTable = VoltDB . instance ( ) . getCatalogContext ( ) . database . getTables ( ) . get ( m_streamName ) ; StreamTableSchemaSerializer ds = new StreamTableSchemaSerializer ( streamTable , m_streamName , catalogContext . m_genId ) ; m_persistentDeque = new PersistentBinaryDeque ( m_nonce , ds , new VoltFile ( m_path ) , exportLog , ! DISABLE_COMPRESSION ) ; m_reader = m_persistentDeque . openForRead ( m_nonce ) ; // temporary debug stmt exportLog . info ( "After truncate, PBD size is " + ( m_reader . sizeInBytes ( ) - ( 8 * m_reader . getNumObjects ( ) ) ) ) ; }
|
See PDB segment layout at beginning of this file .
| 726
| 11
|
155,677
|
public int set ( int pos ) { while ( pos >= capacity ) { doubleCapacity ( ) ; } if ( pos >= limitPos ) { limitPos = pos + 1 ; } int windex = pos >> 5 ; int mask = 0x80000000 >>> ( pos & 0x1F ) ; int word = map [ windex ] ; int result = ( word & mask ) == 0 ? 0 : 1 ; map [ windex ] = ( word | mask ) ; return result ; }
|
Sets pos and returns old value
| 103
| 7
|
155,678
|
public static void and ( byte [ ] map , int pos , byte source , int count ) { int shift = pos & 0x07 ; int mask = ( source & 0xff ) >>> shift ; int innermask = 0xff >> shift ; int index = pos / 8 ; if ( count < 8 ) { innermask = innermask >>> ( 8 - count ) ; innermask = innermask << ( 8 - count ) ; } mask &= innermask ; innermask = ~ innermask ; if ( index >= map . length ) { return ; } byte b = map [ index ] ; map [ index ] = ( byte ) ( b & innermask ) ; b = ( byte ) ( b & mask ) ; map [ index ] = ( byte ) ( map [ index ] | b ) ; if ( shift == 0 ) { return ; } shift = 8 - shift ; if ( count > shift ) { mask = ( ( source & 0xff ) << 8 ) >>> shift ; innermask = 0xff00 >>> shift ; innermask = ~ innermask ; b = map [ index + 1 ] ; map [ index + 1 ] = ( byte ) ( b & innermask ) ; b = ( byte ) ( b & mask ) ; map [ index + 1 ] = ( byte ) ( map [ index + 1 ] | b ) ; } }
|
AND count bits from source with map contents starting at pos
| 279
| 11
|
155,679
|
public static void or ( byte [ ] map , int pos , byte source , int count ) { int shift = pos & 0x07 ; int mask = ( source & 0xff ) >>> shift ; int index = pos / 8 ; if ( index >= map . length ) { return ; } byte b = ( byte ) ( map [ index ] | mask ) ; map [ index ] = b ; if ( shift == 0 ) { return ; } shift = 8 - shift ; if ( count > shift ) { mask = ( ( source & 0xff ) << 8 ) >>> shift ; b = ( byte ) ( map [ index + 1 ] | mask ) ; map [ index + 1 ] = b ; } }
|
OR count bits from source with map contents starting at pos
| 147
| 11
|
155,680
|
public synchronized boolean addUnsorted ( int key , int value ) { if ( count == capacity ) { if ( fixedSize ) { return false ; } else { doubleCapacity ( ) ; } } if ( sorted && count != 0 ) { if ( sortOnValues ) { if ( value < values [ count - 1 ] ) { sorted = false ; } } else { if ( value < keys [ count - 1 ] ) { sorted = false ; } } } hasChanged = true ; keys [ count ] = key ; values [ count ] = value ; count ++ ; return true ; }
|
Adds a pair into the table .
| 123
| 7
|
155,681
|
public synchronized boolean addUnique ( int key , int value ) { if ( count == capacity ) { if ( fixedSize ) { return false ; } else { doubleCapacity ( ) ; } } if ( ! sorted ) { fastQuickSort ( ) ; } targetSearchValue = sortOnValues ? value : key ; int i = binaryEmptySlotSearch ( ) ; if ( i == - 1 ) { return false ; } hasChanged = true ; if ( count != i ) { moveRows ( i , i + 1 , count - i ) ; } keys [ i ] = key ; values [ i ] = value ; count ++ ; return true ; }
|
Adds a pair ensuring no duplicate key xor value already exists in the current search target column .
| 136
| 19
|
155,682
|
private int binaryFirstSearch ( ) { int low = 0 ; int high = count ; int mid = 0 ; int compare = 0 ; int found = count ; while ( low < high ) { mid = ( low + high ) / 2 ; compare = compare ( mid ) ; if ( compare < 0 ) { high = mid ; } else if ( compare > 0 ) { low = mid + 1 ; } else { high = mid ; found = mid ; } } return found == count ? - 1 : found ; }
|
Returns the index of the lowest element == the given search target or - 1
| 107
| 15
|
155,683
|
private int binaryGreaterSearch ( ) { int low = 0 ; int high = count ; int mid = 0 ; int compare = 0 ; while ( low < high ) { mid = ( low + high ) / 2 ; compare = compare ( mid ) ; if ( compare < 0 ) { high = mid ; } else { low = mid + 1 ; } } return low == count ? - 1 : low ; }
|
Returns the index of the lowest element > the given search target
| 86
| 12
|
155,684
|
private int binarySlotSearch ( ) { int low = 0 ; int high = count ; int mid = 0 ; int compare = 0 ; while ( low < high ) { mid = ( low + high ) / 2 ; compare = compare ( mid ) ; if ( compare <= 0 ) { high = mid ; } else { low = mid + 1 ; } } return low ; }
|
Returns the index of the lowest element > = the given search target or count
| 78
| 15
|
155,685
|
private int binaryEmptySlotSearch ( ) { int low = 0 ; int high = count ; int mid = 0 ; int compare = 0 ; while ( low < high ) { mid = ( low + high ) / 2 ; compare = compare ( mid ) ; if ( compare < 0 ) { high = mid ; } else if ( compare > 0 ) { low = mid + 1 ; } else { return - 1 ; } } return low ; }
|
Returns the index of the lowest element > the given search target or count or - 1 if target is found
| 92
| 21
|
155,686
|
private int compare ( int i ) { if ( sortOnValues ) { if ( targetSearchValue > values [ i ] ) { return 1 ; } else if ( targetSearchValue < values [ i ] ) { return - 1 ; } } else { if ( targetSearchValue > keys [ i ] ) { return 1 ; } else if ( targetSearchValue < keys [ i ] ) { return - 1 ; } } return 0 ; }
|
Check if targeted column value in the row indexed i is less than the search target object .
| 91
| 18
|
155,687
|
private boolean lessThan ( int i , int j ) { if ( sortOnValues ) { if ( values [ i ] < values [ j ] ) { return true ; } } else { if ( keys [ i ] < keys [ j ] ) { return true ; } } return false ; }
|
Check if row indexed i is less than row indexed j
| 62
| 11
|
155,688
|
public static void setFontSize ( String inFontSize ) { // weconsultants@users 20050215 - Changed for Compatbilty fix for JDK 1.3 // Convert Strng to float for deriveFont() call Float stageFloat = new Float ( inFontSize ) ; float fontSize = stageFloat . floatValue ( ) ; Font fonttTree = fOwner . tTree . getFont ( ) . deriveFont ( fontSize ) ; fOwner . tTree . setFont ( fonttTree ) ; Font fontTxtCommand = fOwner . txtCommand . getFont ( ) . deriveFont ( fontSize ) ; fOwner . txtCommand . setFont ( fontTxtCommand ) ; Font fontTxtResult = fOwner . txtResult . getFont ( ) . deriveFont ( fontSize ) ; fOwner . txtResult . setFont ( fontTxtResult ) ; }
|
Displays a color chooser and Sets the selected color .
| 189
| 12
|
155,689
|
public Host lookup ( final String hostName ) { final Map < String , Host > cache = this . refresh ( ) ; Host h = cache . get ( hostName ) ; if ( h == null ) { h = new Host ( ) ; } if ( h . patternsApplied ) { return h ; } for ( final Map . Entry < String , Host > e : cache . entrySet ( ) ) { if ( ! isHostPattern ( e . getKey ( ) ) ) { continue ; } if ( ! isHostMatch ( e . getKey ( ) , hostName ) ) { continue ; } //log.debug("Found host match in SSH config:" + e.getValue()); h . copyFrom ( e . getValue ( ) ) ; } if ( h . port == 0 ) { h . port = - 1 ; } h . patternsApplied = true ; return h ; }
|
Locate the configuration for a specific host request .
| 187
| 10
|
155,690
|
public static ListeningExecutorService getCachedSingleThreadExecutor ( String name , long keepAlive ) { return MoreExecutors . listeningDecorator ( new ThreadPoolExecutor ( 0 , 1 , keepAlive , TimeUnit . MILLISECONDS , new LinkedBlockingQueue < Runnable > ( ) , CoreUtils . getThreadFactory ( null , name , SMALL_STACK_SIZE , false , null ) ) ) ; }
|
Get a single thread executor that caches its thread meaning that the thread will terminate after keepAlive milliseconds . A new thread will be created the next time a task arrives and that will be kept around for keepAlive milliseconds . On creation no thread is allocated the first task creates a thread .
| 100
| 59
|
155,691
|
public static ListeningExecutorService getBoundedSingleThreadExecutor ( String name , int capacity ) { LinkedBlockingQueue < Runnable > lbq = new LinkedBlockingQueue < Runnable > ( capacity ) ; ThreadPoolExecutor tpe = new ThreadPoolExecutor ( 1 , 1 , 0L , TimeUnit . MILLISECONDS , lbq , CoreUtils . getThreadFactory ( name ) ) ; return MoreExecutors . listeningDecorator ( tpe ) ; }
|
Create a bounded single threaded executor that rejects requests if more than capacity requests are outstanding .
| 110
| 18
|
155,692
|
public static ThreadPoolExecutor getBoundedThreadPoolExecutor ( int maxPoolSize , long keepAliveTime , TimeUnit unit , ThreadFactory tFactory ) { return new ThreadPoolExecutor ( 0 , maxPoolSize , keepAliveTime , unit , new SynchronousQueue < Runnable > ( ) , tFactory ) ; }
|
Create a bounded thread pool executor . The work queue is synchronous and can cause RejectedExecutionException if there is no available thread to take a new task .
| 73
| 34
|
155,693
|
public static ExecutorService getQueueingExecutorService ( final Queue < Runnable > taskQueue ) { return new ExecutorService ( ) { @ Override public void execute ( Runnable command ) { taskQueue . offer ( command ) ; } @ Override public void shutdown ( ) { throw new UnsupportedOperationException ( ) ; } @ Override public List < Runnable > shutdownNow ( ) { throw new UnsupportedOperationException ( ) ; } @ Override public boolean isShutdown ( ) { return false ; } @ Override public boolean isTerminated ( ) { return false ; } @ Override public boolean awaitTermination ( long timeout , TimeUnit unit ) throws InterruptedException { return true ; } @ Override public < T > Future < T > submit ( Callable < T > task ) { Preconditions . checkNotNull ( task ) ; FutureTask < T > retval = new FutureTask < T > ( task ) ; taskQueue . offer ( retval ) ; return retval ; } @ Override public < T > Future < T > submit ( Runnable task , T result ) { Preconditions . checkNotNull ( task ) ; FutureTask < T > retval = new FutureTask < T > ( task , result ) ; taskQueue . offer ( retval ) ; return retval ; } @ Override public Future < ? > submit ( Runnable task ) { Preconditions . checkNotNull ( task ) ; ListenableFutureTask < Object > retval = ListenableFutureTask . create ( task , null ) ; taskQueue . offer ( retval ) ; return retval ; } @ Override public < T > List < Future < T > > invokeAll ( Collection < ? extends Callable < T > > tasks ) throws InterruptedException { throw new UnsupportedOperationException ( ) ; } @ Override public < T > List < Future < T > > invokeAll ( Collection < ? extends Callable < T > > tasks , long timeout , TimeUnit unit ) throws InterruptedException { throw new UnsupportedOperationException ( ) ; } @ Override public < T > T invokeAny ( Collection < ? extends Callable < T > > tasks ) throws InterruptedException , ExecutionException { throw new UnsupportedOperationException ( ) ; } @ Override public < T > T invokeAny ( Collection < ? extends Callable < T > > tasks , long timeout , TimeUnit unit ) throws InterruptedException , ExecutionException , TimeoutException { throw new UnsupportedOperationException ( ) ; } } ; }
|
Create an ExceutorService that places tasks in an existing task queue for execution . Used to create a bridge for using ListenableFutures in classes already built around a queue .
| 538
| 37
|
155,694
|
public static ThreadFactory getThreadFactory ( final String groupName , final String name , final int stackSize , final boolean incrementThreadNames , final Queue < String > coreList ) { ThreadGroup group = null ; if ( groupName != null ) { group = new ThreadGroup ( Thread . currentThread ( ) . getThreadGroup ( ) , groupName ) ; } final ThreadGroup finalGroup = group ; return new ThreadFactory ( ) { private final AtomicLong m_createdThreadCount = new AtomicLong ( 0 ) ; private final ThreadGroup m_group = finalGroup ; @ Override public synchronized Thread newThread ( final Runnable r ) { final String threadName = name + ( incrementThreadNames ? " - " + m_createdThreadCount . getAndIncrement ( ) : "" ) ; String coreTemp = null ; if ( coreList != null && ! coreList . isEmpty ( ) ) { coreTemp = coreList . poll ( ) ; } final String core = coreTemp ; Runnable runnable = new Runnable ( ) { @ Override public void run ( ) { if ( core != null ) { // Remove Affinity for now to make this dependency dissapear from the client. // Goal is to remove client dependency on this class in the medium term. //PosixJNAAffinity.INSTANCE.setAffinity(core); } try { r . run ( ) ; } catch ( Throwable t ) { new VoltLogger ( "HOST" ) . error ( "Exception thrown in thread " + threadName , t ) ; } finally { m_threadLocalDeallocator . run ( ) ; } } } ; Thread t = new Thread ( m_group , runnable , threadName , stackSize ) ; t . setDaemon ( true ) ; return t ; } } ; }
|
Creates a thread factory that creates threads within a thread group if the group name is given . The threads created will catch any unhandled exceptions and log them to the HOST logger .
| 387
| 37
|
155,695
|
public static String getHostnameOrAddress ( ) { final InetAddress addr = m_localAddressSupplier . get ( ) ; if ( addr == null ) return "" ; return ReverseDNSCache . hostnameOrAddress ( addr ) ; }
|
Return the local hostname if it s resolvable . If not return the IPv4 address on the first interface we find if it exists . If not returns whatever address exists on the first interface .
| 52
| 40
|
155,696
|
public static final < T > ListenableFuture < T > retryHelper ( final ScheduledExecutorService ses , final ExecutorService es , final Callable < T > callable , final long maxAttempts , final long startInterval , final TimeUnit startUnit , final long maxInterval , final TimeUnit maxUnit ) { SettableFuture < T > future = SettableFuture . create ( ) ; retryHelper ( ses , es , callable , maxAttempts , startInterval , startUnit , maxInterval , maxUnit , future ) ; return future ; }
|
A helper for retrying tasks asynchronously returns a settable future that can be used to attempt to cancel the task .
| 122
| 25
|
155,697
|
public static < K extends Comparable < ? super K > , V extends Comparable < ? super V > > List < Entry < K , V > > sortKeyValuePairByValue ( Map < K , V > map ) { List < Map . Entry < K , V > > entries = new ArrayList < Map . Entry < K , V > > ( map . entrySet ( ) ) ; Collections . sort ( entries , new Comparator < Map . Entry < K , V > > ( ) { @ Override public int compare ( Entry < K , V > o1 , Entry < K , V > o2 ) { if ( ! o1 . getValue ( ) . equals ( o2 . getValue ( ) ) ) { return ( o1 . getValue ( ) ) . compareTo ( o2 . getValue ( ) ) ; } return o1 . getKey ( ) . compareTo ( o2 . getKey ( ) ) ; } } ) ; return entries ; }
|
Utility method to sort the keys and values of a map by their value .
| 208
| 16
|
155,698
|
private static NodeAVL set ( PersistentStore store , NodeAVL x , boolean isleft , NodeAVL n ) { if ( isleft ) { x = x . setLeft ( store , n ) ; } else { x = x . setRight ( store , n ) ; } if ( n != null ) { n . setParent ( store , x ) ; } return x ; }
|
Set a node as child of another
| 83
| 7
|
155,699
|
private static NodeAVL child ( PersistentStore store , NodeAVL x , boolean isleft ) { return isleft ? x . getLeft ( store ) : x . getRight ( store ) ; }
|
Returns either child node
| 43
| 4
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.