idx
int64 0
165k
| question
stringlengths 73
4.15k
| target
stringlengths 5
918
| len_question
int64 21
890
| len_target
int64 3
255
|
|---|---|---|---|---|
154,600
|
@ Override public int getDatabaseMajorVersion ( ) throws SQLException { checkClosed ( ) ; System . out . println ( "\n\n\nVERSION: " + versionString ) ; return Integer . valueOf ( versionString . split ( "\\." ) [ 0 ] ) ; }
|
Retrieves the major version number of the underlying database .
| 64
| 12
|
154,601
|
@ Override public ResultSet getFunctions ( String catalog , String schemaPattern , String functionNamePattern ) throws SQLException { checkClosed ( ) ; throw SQLError . noSupport ( ) ; }
|
Retrieves a description of the system and user functions available in the given catalog .
| 46
| 17
|
154,602
|
@ Override public ResultSet getPrimaryKeys ( String catalog , String schema , String table ) throws SQLException { assert ( table != null && ! table . isEmpty ( ) ) ; checkClosed ( ) ; this . sysCatalog . setString ( 1 , "PRIMARYKEYS" ) ; JDBC4ResultSet res = ( JDBC4ResultSet ) this . sysCatalog . executeQuery ( ) ; VoltTable vtable = res . getVoltTable ( ) . clone ( 0 ) ; // Filter the primary keys based on table name while ( res . next ( ) ) { if ( res . getString ( "TABLE_NAME" ) . equals ( table ) ) { vtable . addRow ( res . getRowData ( ) ) ; } } return new JDBC4ResultSet ( sysCatalog , vtable ) ; }
|
Retrieves a description of the given table s primary key columns .
| 179
| 14
|
154,603
|
@ Override public ResultSet getSchemas ( ) throws SQLException { checkClosed ( ) ; VoltTable vtable = new VoltTable ( new ColumnInfo ( "TABLE_SCHEM" , VoltType . STRING ) , new ColumnInfo ( "TABLE_CATALOG" , VoltType . STRING ) ) ; JDBC4ResultSet res = new JDBC4ResultSet ( this . sysCatalog , vtable ) ; return res ; }
|
Retrieves the schema names available in this database .
| 99
| 11
|
154,604
|
@ Override public ResultSet getTablePrivileges ( String catalog , String schemaPattern , String tableNamePattern ) throws SQLException { checkClosed ( ) ; VoltTable vtable = new VoltTable ( new ColumnInfo ( "TABLE_CAT" , VoltType . STRING ) , new ColumnInfo ( "TABLE_SCHEM" , VoltType . STRING ) , new ColumnInfo ( "TABLE_NAME" , VoltType . STRING ) , new ColumnInfo ( "GRANTOR" , VoltType . STRING ) , new ColumnInfo ( "GRANTEE" , VoltType . STRING ) , new ColumnInfo ( "PRIVILEGE" , VoltType . STRING ) , new ColumnInfo ( "IS_GRANTABLE" , VoltType . STRING ) ) ; //NB: @SystemCatalog(?) will need additional support if we want to // populate the table. JDBC4ResultSet res = new JDBC4ResultSet ( this . sysCatalog , vtable ) ; return res ; }
|
Retrieves a description of the access rights for each table available in a catalog .
| 219
| 17
|
154,605
|
public static Pattern computeJavaPattern ( String sqlPattern ) { StringBuffer pattern_buff = new StringBuffer ( ) ; // Replace "_" with "." (match exactly 1 character) // Replace "%" with ".*" (match 0 or more characters) for ( int i = 0 ; i < sqlPattern . length ( ) ; i ++ ) { char c = sqlPattern . charAt ( i ) ; if ( c == ' ' ) { pattern_buff . append ( ' ' ) ; } else if ( c == ' ' ) { pattern_buff . append ( ".*" ) ; } else pattern_buff . append ( c ) ; } return Pattern . compile ( pattern_buff . toString ( ) ) ; }
|
Convert the users VoltDB SQL pattern into a regex pattern
| 151
| 12
|
154,606
|
@ Override public ResultSet getTables ( String catalog , String schemaPattern , String tableNamePattern , String [ ] types ) throws SQLException { checkClosed ( ) ; this . sysCatalog . setString ( 1 , "TABLES" ) ; JDBC4ResultSet res = ( JDBC4ResultSet ) this . sysCatalog . executeQuery ( ) ; VoltTable vtable = res . getVoltTable ( ) . clone ( 0 ) ; List < String > typeStrings = null ; if ( types != null ) { typeStrings = Arrays . asList ( types ) ; } // If no pattern is specified, default to matching any/all. if ( tableNamePattern == null || tableNamePattern . length ( ) == 0 ) { tableNamePattern = "%" ; } Pattern table_pattern = computeJavaPattern ( tableNamePattern ) ; // Filter tables based on type and pattern while ( res . next ( ) ) { if ( typeStrings == null || typeStrings . contains ( res . getString ( "TABLE_TYPE" ) ) ) { Matcher table_matcher = table_pattern . matcher ( res . getString ( "TABLE_NAME" ) ) ; if ( table_matcher . matches ( ) ) { vtable . addRow ( res . getRowData ( ) ) ; } } } return new JDBC4ResultSet ( this . sysCatalog , vtable ) ; }
|
Retrieves a description of the tables available in the given catalog .
| 305
| 14
|
154,607
|
@ Override public ResultSet getTableTypes ( ) throws SQLException { checkClosed ( ) ; VoltTable vtable = new VoltTable ( new ColumnInfo ( "TABLE_TYPE" , VoltType . STRING ) ) ; for ( String type : tableTypes ) { vtable . addRow ( type ) ; } JDBC4ResultSet res = new JDBC4ResultSet ( this . sysCatalog , vtable ) ; return res ; }
|
Retrieves the table types available in this database .
| 96
| 11
|
154,608
|
@ Override public ResultSet getTypeInfo ( ) throws SQLException { checkClosed ( ) ; this . sysCatalog . setString ( 1 , "TYPEINFO" ) ; ResultSet res = this . sysCatalog . executeQuery ( ) ; return res ; }
|
Retrieves a description of all the data types supported by this database .
| 57
| 15
|
154,609
|
@ Override public ResultSet getVersionColumns ( String catalog , String schema , String table ) throws SQLException { checkClosed ( ) ; throw SQLError . noSupport ( ) ; }
|
Retrieves a description of a table s columns that are automatically updated when any value in a row is updated .
| 44
| 23
|
154,610
|
@ Override public boolean supportsConvert ( int fromType , int toType ) throws SQLException { checkClosed ( ) ; switch ( fromType ) { /* * ALL types can be converted to VARCHAR /VoltType.String */ case java . sql . Types . VARCHAR : case java . sql . Types . VARBINARY : case java . sql . Types . TIMESTAMP : case java . sql . Types . OTHER : switch ( toType ) { case java . sql . Types . VARCHAR : return true ; default : return false ; } case java . sql . Types . TINYINT : case java . sql . Types . SMALLINT : case java . sql . Types . INTEGER : case java . sql . Types . BIGINT : case java . sql . Types . FLOAT : case java . sql . Types . DECIMAL : switch ( toType ) { case java . sql . Types . VARCHAR : case java . sql . Types . TINYINT : case java . sql . Types . SMALLINT : case java . sql . Types . INTEGER : case java . sql . Types . BIGINT : case java . sql . Types . FLOAT : case java . sql . Types . DECIMAL : return true ; default : return false ; } default : return false ; } }
|
Retrieves whether this database supports the JDBC scalar function CONVERT for conversions between the JDBC types fromType and toType .
| 287
| 28
|
154,611
|
@ Override public boolean supportsResultSetType ( int type ) throws SQLException { checkClosed ( ) ; if ( type == ResultSet . TYPE_SCROLL_INSENSITIVE ) return true ; return false ; }
|
Retrieves whether this database supports the given result set type .
| 50
| 13
|
154,612
|
public static boolean isInProcessDatabaseType ( String url ) { if ( url == S_FILE || url == S_RES || url == S_MEM ) { return true ; } return false ; }
|
Returns true if type represents an in - process connection to database .
| 43
| 13
|
154,613
|
public T nextReady ( long systemCurrentTimeMillis ) { if ( delayed . size ( ) == 0 ) { return null ; } // no ready objects if ( delayed . firstKey ( ) > systemCurrentTimeMillis ) { return null ; } Entry < Long , Object [ ] > entry = delayed . pollFirstEntry ( ) ; Object [ ] values = entry . getValue ( ) ; @ SuppressWarnings ( "unchecked" ) T value = ( T ) values [ 0 ] ; // if this map entry had multiple values, put all but one // of them back if ( values . length > 1 ) { int prevLength = values . length ; values = Arrays . copyOfRange ( values , 1 , values . length ) ; assert ( values . length == prevLength - 1 ) ; delayed . put ( entry . getKey ( ) , values ) ; } m_size -- ; return value ; }
|
Return the next object that is safe for delivery or null if there are no safe objects to deliver .
| 193
| 20
|
154,614
|
private static byte [ ] readCatalog ( String catalogUrl ) throws IOException { assert ( catalogUrl != null ) ; final int MAX_CATALOG_SIZE = 40 * 1024 * 1024 ; // 40mb InputStream fin = null ; try { URL url = new URL ( catalogUrl ) ; fin = url . openStream ( ) ; } catch ( MalformedURLException ex ) { // Invalid URL. Try as a file. fin = new FileInputStream ( catalogUrl ) ; } byte [ ] buffer = new byte [ MAX_CATALOG_SIZE ] ; int readBytes = 0 ; int totalBytes = 0 ; try { while ( readBytes >= 0 ) { totalBytes += readBytes ; readBytes = fin . read ( buffer , totalBytes , buffer . length - totalBytes - 1 ) ; } } finally { fin . close ( ) ; } return Arrays . copyOf ( buffer , totalBytes ) ; }
|
Read catalog bytes from URL
| 197
| 5
|
154,615
|
synchronized public void close ( ) { closed = true ; if ( sqw != null ) { try { if ( layoutHeaderChecked && layout != null && layout . getFooter ( ) != null ) { sendLayoutMessage ( layout . getFooter ( ) ) ; } sqw . close ( ) ; sqw = null ; } catch ( java . io . IOException ex ) { sqw = null ; } } }
|
Release any resources held by this SyslogAppender .
| 91
| 12
|
154,616
|
public static int getFacility ( String facilityName ) { if ( facilityName != null ) { facilityName = facilityName . trim ( ) ; } if ( "KERN" . equalsIgnoreCase ( facilityName ) ) { return LOG_KERN ; } else if ( "USER" . equalsIgnoreCase ( facilityName ) ) { return LOG_USER ; } else if ( "MAIL" . equalsIgnoreCase ( facilityName ) ) { return LOG_MAIL ; } else if ( "DAEMON" . equalsIgnoreCase ( facilityName ) ) { return LOG_DAEMON ; } else if ( "AUTH" . equalsIgnoreCase ( facilityName ) ) { return LOG_AUTH ; } else if ( "SYSLOG" . equalsIgnoreCase ( facilityName ) ) { return LOG_SYSLOG ; } else if ( "LPR" . equalsIgnoreCase ( facilityName ) ) { return LOG_LPR ; } else if ( "NEWS" . equalsIgnoreCase ( facilityName ) ) { return LOG_NEWS ; } else if ( "UUCP" . equalsIgnoreCase ( facilityName ) ) { return LOG_UUCP ; } else if ( "CRON" . equalsIgnoreCase ( facilityName ) ) { return LOG_CRON ; } else if ( "AUTHPRIV" . equalsIgnoreCase ( facilityName ) ) { return LOG_AUTHPRIV ; } else if ( "FTP" . equalsIgnoreCase ( facilityName ) ) { return LOG_FTP ; } else if ( "LOCAL0" . equalsIgnoreCase ( facilityName ) ) { return LOG_LOCAL0 ; } else if ( "LOCAL1" . equalsIgnoreCase ( facilityName ) ) { return LOG_LOCAL1 ; } else if ( "LOCAL2" . equalsIgnoreCase ( facilityName ) ) { return LOG_LOCAL2 ; } else if ( "LOCAL3" . equalsIgnoreCase ( facilityName ) ) { return LOG_LOCAL3 ; } else if ( "LOCAL4" . equalsIgnoreCase ( facilityName ) ) { return LOG_LOCAL4 ; } else if ( "LOCAL5" . equalsIgnoreCase ( facilityName ) ) { return LOG_LOCAL5 ; } else if ( "LOCAL6" . equalsIgnoreCase ( facilityName ) ) { return LOG_LOCAL6 ; } else if ( "LOCAL7" . equalsIgnoreCase ( facilityName ) ) { return LOG_LOCAL7 ; } else { return - 1 ; } }
|
Returns the integer value corresponding to the named syslog facility or - 1 if it couldn t be recognized .
| 558
| 21
|
154,617
|
public void activateOptions ( ) { if ( header ) { getLocalHostname ( ) ; } if ( layout != null && layout . getHeader ( ) != null ) { sendLayoutMessage ( layout . getHeader ( ) ) ; } layoutHeaderChecked = true ; }
|
This method returns immediately as options are activated when they are set .
| 57
| 13
|
154,618
|
private String getPacketHeader ( final long timeStamp ) { if ( header ) { StringBuffer buf = new StringBuffer ( dateFormat . format ( new Date ( timeStamp ) ) ) ; // RFC 3164 says leading space, not leading zero on days 1-9 if ( buf . charAt ( 4 ) == ' ' ) { buf . setCharAt ( 4 , ' ' ) ; } buf . append ( getLocalHostname ( ) ) ; buf . append ( ' ' ) ; return buf . toString ( ) ; } return "" ; }
|
Gets HEADER portion of packet .
| 118
| 8
|
154,619
|
private void sendLayoutMessage ( final String msg ) { if ( sqw != null ) { String packet = msg ; String hdr = getPacketHeader ( new Date ( ) . getTime ( ) ) ; if ( facilityPrinting || hdr . length ( ) > 0 ) { StringBuffer buf = new StringBuffer ( hdr ) ; if ( facilityPrinting ) { buf . append ( facilityStr ) ; } buf . append ( msg ) ; packet = buf . toString ( ) ; } sqw . setLevel ( 6 ) ; sqw . write ( packet ) ; } }
|
Set header or footer of layout .
| 124
| 8
|
154,620
|
protected byte [ ] getGZipData ( ) throws SQLException { byte [ ] bytes = gZipData ( ) ; if ( bytes != null ) { return bytes ; } if ( ( this . outputStream == null ) || ! this . outputStream . isClosed ( ) || this . outputStream . isFreed ( ) ) { throw Exceptions . notReadable ( ) ; } try { setGZipData ( this . outputStream . toByteArray ( ) ) ; return gZipData ( ) ; } catch ( IOException ex ) { throw Exceptions . notReadable ( ) ; } finally { this . freeOutputStream ( ) ; } }
|
Retrieves this object s SQLXML value as a gzipped array of bytes possibly by terminating any in - progress write operations and converting accumulated intermediate data .
| 141
| 33
|
154,621
|
protected synchronized void close ( ) { this . closed = true ; setReadable ( false ) ; setWritable ( false ) ; freeOutputStream ( ) ; freeInputStream ( ) ; this . gzdata = null ; }
|
closes this object and releases the resources that it holds .
| 48
| 12
|
154,622
|
protected < T extends Result > T createResult ( Class < T > resultClass ) throws SQLException { checkWritable ( ) ; setWritable ( false ) ; setReadable ( true ) ; if ( JAXBResult . class . isAssignableFrom ( resultClass ) ) { // Must go first presently, since JAXBResult extends SAXResult // (purely as an implmentation detail) and it's not possible // to instantiate a valid JAXBResult with a Zero-Args // constructor(or any subclass thereof, due to the finality of // its private UnmarshallerHandler) // FALL THROUGH... will throw an exception } else if ( ( resultClass == null ) || StreamResult . class . isAssignableFrom ( resultClass ) ) { return createStreamResult ( resultClass ) ; } else if ( DOMResult . class . isAssignableFrom ( resultClass ) ) { return createDOMResult ( resultClass ) ; } else if ( SAXResult . class . isAssignableFrom ( resultClass ) ) { return createSAXResult ( resultClass ) ; } else if ( StAXResult . class . isAssignableFrom ( resultClass ) ) { return createStAXResult ( resultClass ) ; } throw Util . invalidArgument ( "resultClass: " + resultClass ) ; }
|
Retrieves a new Result for setting the XML value designated by this SQLXML instance .
| 289
| 19
|
154,623
|
@ SuppressWarnings ( "unchecked" ) protected < T extends Result > T createSAXResult ( Class < T > resultClass ) throws SQLException { SAXResult result = null ; try { result = ( resultClass == null ) ? new SAXResult ( ) : ( SAXResult ) resultClass . newInstance ( ) ; } catch ( SecurityException ex ) { throw Exceptions . resultInstantiation ( ex ) ; } catch ( InstantiationException ex ) { throw Exceptions . resultInstantiation ( ex ) ; } catch ( IllegalAccessException ex ) { throw Exceptions . resultInstantiation ( ex ) ; } catch ( ClassCastException ex ) { throw Exceptions . resultInstantiation ( ex ) ; } StAXResult staxResult = createStAXResult ( null ) ; XMLStreamWriter xmlWriter = staxResult . getXMLStreamWriter ( ) ; SAX2XMLStreamWriter handler = new SAX2XMLStreamWriter ( xmlWriter ) ; result . setHandler ( handler ) ; return ( T ) result ; }
|
Retrieves a new SAXResult for setting the XML value designated by this SQLXML instance .
| 225
| 21
|
154,624
|
@ Override public List < AbstractExpression > bindingToIndexedExpression ( AbstractExpression expr ) { if ( equals ( expr ) ) { return s_reusableImmutableEmptyBinding ; } return null ; }
|
Otherwise there is no binding possible indicated by a null return .
| 47
| 12
|
154,625
|
public static Client getClient ( ClientConfig config , String [ ] servers , int port ) throws Exception { config . setTopologyChangeAware ( true ) ; // Set client to be topology-aware final Client client = ClientFactory . createClient ( config ) ; for ( String server : servers ) { // Try connecting servers one by one until we have a success try { client . createConnection ( server . trim ( ) , port ) ; break ; } catch ( IOException e ) { // Only swallow the exceptions from Java network or connection problems // Unresolved hostname exceptions will be thrown } } if ( client . getConnectedHostList ( ) . isEmpty ( ) ) { throw new Exception ( "Unable to connect to any servers." ) ; } return client ; }
|
Get connection to servers in cluster .
| 162
| 7
|
154,626
|
public synchronized void addAdapter ( int pid , InternalClientResponseAdapter adapter ) { final ImmutableMap . Builder < Integer , InternalClientResponseAdapter > builder = ImmutableMap . builder ( ) ; builder . putAll ( m_adapters ) ; builder . put ( pid , adapter ) ; m_adapters = builder . build ( ) ; }
|
Synchronized in case multiple partitions are added concurrently .
| 72
| 11
|
154,627
|
public boolean hasTable ( String name ) { Table table = getCatalogContext ( ) . tables . get ( name ) ; return ( table != null ) ; }
|
Returns true if a table with the given name exists in the server catalog .
| 33
| 15
|
154,628
|
public boolean callProcedure ( InternalConnectionContext caller , Function < Integer , Boolean > backPressurePredicate , InternalConnectionStatsCollector statsCollector , ProcedureCallback procCallback , String proc , Object ... fieldList ) { Procedure catProc = InvocationDispatcher . getProcedureFromName ( proc , getCatalogContext ( ) ) ; if ( catProc == null ) { String fmt = "Cannot invoke procedure %s from streaming interface %s. Procedure not found." ; m_logger . rateLimitedLog ( SUPPRESS_INTERVAL , Level . ERROR , null , fmt , proc , caller ) ; m_failedCount . incrementAndGet ( ) ; return false ; } StoredProcedureInvocation task = new StoredProcedureInvocation ( ) ; task . setProcName ( proc ) ; task . setParams ( fieldList ) ; try { task = MiscUtils . roundTripForCL ( task ) ; } catch ( Exception e ) { String fmt = "Cannot invoke procedure %s from streaming interface %s. failed to create task." ; m_logger . rateLimitedLog ( SUPPRESS_INTERVAL , Level . ERROR , null , fmt , proc , caller ) ; m_failedCount . incrementAndGet ( ) ; return false ; } int [ ] partitions = null ; try { partitions = InvocationDispatcher . getPartitionsForProcedure ( catProc , task ) ; } catch ( Exception e ) { String fmt = "Can not invoke procedure %s from streaming interface %s. Partition not found." ; m_logger . rateLimitedLog ( SUPPRESS_INTERVAL , Level . ERROR , e , fmt , proc , caller ) ; m_failedCount . incrementAndGet ( ) ; return false ; } boolean mp = ( partitions [ 0 ] == MpInitiator . MP_INIT_PID ) || ( partitions . length > 1 ) ; final InternalClientResponseAdapter adapter = mp ? m_adapters . get ( MpInitiator . MP_INIT_PID ) : m_adapters . get ( partitions [ 0 ] ) ; InternalAdapterTaskAttributes kattrs = new InternalAdapterTaskAttributes ( caller , adapter . connectionId ( ) ) ; final AuthUser user = getCatalogContext ( ) . authSystem . getImporterUser ( ) ; if ( ! adapter . createTransaction ( kattrs , proc , catProc , procCallback , statsCollector , task , user , partitions , false , backPressurePredicate ) ) { m_failedCount . incrementAndGet ( ) ; return false ; } m_submitSuccessCount . incrementAndGet ( ) ; return true ; }
|
Use null backPressurePredicate for no back pressure
| 573
| 11
|
154,629
|
synchronized void registerService ( Promotable service ) { m_services . add ( service ) ; if ( m_isLeader ) { try { service . acceptPromotion ( ) ; } catch ( Exception e ) { VoltDB . crashLocalVoltDB ( "Unable to promote global service." , true , e ) ; } } }
|
Add a service to be notified if this node becomes the global leader
| 73
| 13
|
154,630
|
void resolveTypesForCaseWhen ( Session session ) { if ( dataType != null ) { return ; } Expression expr = this ; while ( expr . opType == OpTypes . CASEWHEN ) { expr . nodes [ LEFT ] . resolveTypes ( session , expr ) ; if ( expr . nodes [ LEFT ] . isParam ) { expr . nodes [ LEFT ] . dataType = Type . SQL_BOOLEAN ; } expr . nodes [ RIGHT ] . nodes [ LEFT ] . resolveTypes ( session , nodes [ RIGHT ] ) ; expr . nodes [ RIGHT ] . nodes [ RIGHT ] . resolveTypes ( session , nodes [ RIGHT ] ) ; expr = expr . nodes [ RIGHT ] . nodes [ RIGHT ] ; } expr = this ; while ( expr . opType == OpTypes . CASEWHEN ) { dataType = Type . getAggregateType ( expr . nodes [ RIGHT ] . nodes [ LEFT ] . dataType , dataType ) ; dataType = Type . getAggregateType ( expr . nodes [ RIGHT ] . nodes [ RIGHT ] . dataType , dataType ) ; expr = expr . nodes [ RIGHT ] . nodes [ RIGHT ] ; } expr = this ; while ( expr . opType == OpTypes . CASEWHEN ) { if ( expr . nodes [ RIGHT ] . nodes [ LEFT ] . dataType == null ) { expr . nodes [ RIGHT ] . nodes [ LEFT ] . dataType = dataType ; } if ( expr . nodes [ RIGHT ] . nodes [ RIGHT ] . dataType == null ) { expr . nodes [ RIGHT ] . nodes [ RIGHT ] . dataType = dataType ; } if ( expr . nodes [ RIGHT ] . dataType == null ) { expr . nodes [ RIGHT ] . dataType = dataType ; } expr = expr . nodes [ RIGHT ] . nodes [ RIGHT ] ; } if ( dataType == null ) { throw Error . error ( ErrorCode . X_42567 ) ; } }
|
For CASE WHEN and its special cases section 9 . 3 of the SQL standard on type aggregation is implemented .
| 418
| 21
|
154,631
|
public static GeographyPointValue fromWKT ( String param ) { if ( param == null ) { throw new IllegalArgumentException ( "Null well known text argument to GeographyPointValue constructor." ) ; } Matcher m = wktPattern . matcher ( param ) ; if ( m . find ( ) ) { // Add 0.0 to avoid -0.0. double longitude = toDouble ( m . group ( 1 ) , m . group ( 2 ) ) + 0.0 ; double latitude = toDouble ( m . group ( 3 ) , m . group ( 4 ) ) + 0.0 ; if ( Math . abs ( latitude ) > 90.0 ) { throw new IllegalArgumentException ( String . format ( "Latitude \"%f\" out of bounds." , latitude ) ) ; } if ( Math . abs ( longitude ) > 180.0 ) { throw new IllegalArgumentException ( String . format ( "Longitude \"%f\" out of bounds." , longitude ) ) ; } return new GeographyPointValue ( longitude , latitude ) ; } else { throw new IllegalArgumentException ( "Cannot construct GeographyPointValue value from \"" + param + "\"" ) ; } }
|
Create a GeographyPointValue from a well - known text string .
| 261
| 14
|
154,632
|
String formatLngLat ( ) { DecimalFormat df = new DecimalFormat ( "##0.0###########" ) ; // Explicitly test for differences less than 1.0e-12 and // force them to be zero. Otherwise you may find a case // where two points differ in the less significant bits, but // they format as the same number. double lng = ( Math . abs ( m_longitude ) < EPSILON ) ? 0 : m_longitude ; double lat = ( Math . abs ( m_latitude ) < EPSILON ) ? 0 : m_latitude ; return df . format ( lng ) + " " + df . format ( lat ) ; }
|
Format the coordinates for this point . Use 12 digits of precision after the decimal point .
| 149
| 17
|
154,633
|
public static GeographyPointValue unflattenFromBuffer ( ByteBuffer inBuffer , int offset ) { double lng = inBuffer . getDouble ( offset ) ; double lat = inBuffer . getDouble ( offset + BYTES_IN_A_COORD ) ; if ( lat == 360.0 && lng == 360.0 ) { // This is a null point. return null ; } return new GeographyPointValue ( lng , lat ) ; }
|
Deserializes a point from a ByteBuffer at an absolute offset .
| 97
| 14
|
154,634
|
private static double normalize ( double v , double range ) { double a = v - Math . floor ( ( v + ( range / 2 ) ) / range ) * range ; // Make sure that a and v have the same sign // when abs(v) = 180. if ( Math . abs ( a ) == 180.0 && ( a * v ) < 0 ) { a *= - 1 ; } // The addition of 0.0 is to avoid negative // zero, which just confuses things. return a + 0.0 ; }
|
by subtracting multiples of 360 .
| 114
| 8
|
154,635
|
@ Deprecated public GeographyPointValue mul ( double alpha ) { return GeographyPointValue . normalizeLngLat ( getLongitude ( ) * alpha + 0.0 , getLatitude ( ) * alpha + 0.0 ) ; }
|
Return a point scaled by the given alpha value .
| 52
| 10
|
154,636
|
@ Deprecated public GeographyPointValue rotate ( double phi , GeographyPointValue center ) { double sinphi = Math . sin ( 2 * Math . PI * phi / 360.0 ) ; double cosphi = Math . cos ( 2 * Math . PI * phi / 360.0 ) ; // Translate to the center. double longitude = getLongitude ( ) - center . getLongitude ( ) ; double latitude = getLatitude ( ) - center . getLatitude ( ) ; // Rotate and translate back. return GeographyPointValue . normalizeLngLat ( ( cosphi * longitude - sinphi * latitude ) + center . getLongitude ( ) , ( sinphi * longitude + cosphi * latitude ) + center . getLatitude ( ) ) ; }
|
Return a new point which is this point rotated by the angle phi around a given center point .
| 170
| 20
|
154,637
|
public static void createPersistentZKNodes ( ZooKeeper zk ) { LinkedList < ZKUtil . StringCallback > callbacks = new LinkedList < ZKUtil . StringCallback > ( ) ; for ( int i = 0 ; i < VoltZK . ZK_HIERARCHY . length ; i ++ ) { ZKUtil . StringCallback cb = new ZKUtil . StringCallback ( ) ; callbacks . add ( cb ) ; zk . create ( VoltZK . ZK_HIERARCHY [ i ] , null , Ids . OPEN_ACL_UNSAFE , CreateMode . PERSISTENT , cb , null ) ; } for ( ZKUtil . StringCallback cb : callbacks ) { try { cb . get ( ) ; } catch ( org . apache . zookeeper_voltpatches . KeeperException . NodeExistsException e ) { // this is an expected race. } catch ( Exception e ) { VoltDB . crashLocalVoltDB ( e . getMessage ( ) , true , e ) ; } } }
|
Race to create the persistent nodes .
| 243
| 7
|
154,638
|
public static List < MailboxNodeContent > parseMailboxContents ( List < String > jsons ) throws JSONException { ArrayList < MailboxNodeContent > objects = new ArrayList < MailboxNodeContent > ( jsons . size ( ) ) ; for ( String json : jsons ) { MailboxNodeContent content = null ; JSONObject jsObj = new JSONObject ( json ) ; long HSId = jsObj . getLong ( "HSId" ) ; Integer partitionId = null ; if ( jsObj . has ( "partitionId" ) ) { partitionId = jsObj . getInt ( "partitionId" ) ; } content = new MailboxNodeContent ( HSId , partitionId ) ; objects . add ( content ) ; } return objects ; }
|
Helper method for parsing mailbox node contents into Java objects .
| 163
| 11
|
154,639
|
public static boolean createMigratePartitionLeaderInfo ( ZooKeeper zk , MigratePartitionLeaderInfo info ) { try { zk . create ( migrate_partition_leader_info , info . toBytes ( ) , Ids . OPEN_ACL_UNSAFE , CreateMode . PERSISTENT ) ; } catch ( KeeperException e ) { if ( e . code ( ) == KeeperException . Code . NODEEXISTS ) { try { zk . setData ( migrate_partition_leader_info , info . toBytes ( ) , - 1 ) ; } catch ( KeeperException | InterruptedException | JSONException e1 ) { } return false ; } org . voltdb . VoltDB . crashLocalVoltDB ( "Unable to create MigratePartitionLeader Indicator" , true , e ) ; } catch ( InterruptedException | JSONException e ) { org . voltdb . VoltDB . crashLocalVoltDB ( "Unable to create MigratePartitionLeader Indicator" , true , e ) ; } return true ; }
|
Save MigratePartitionLeader information for error handling
| 232
| 10
|
154,640
|
public static MigratePartitionLeaderInfo getMigratePartitionLeaderInfo ( ZooKeeper zk ) { try { byte [ ] data = zk . getData ( migrate_partition_leader_info , null , null ) ; if ( data != null ) { MigratePartitionLeaderInfo info = new MigratePartitionLeaderInfo ( data ) ; return info ; } } catch ( KeeperException | InterruptedException | JSONException e ) { } return null ; }
|
get MigratePartitionLeader information
| 100
| 7
|
154,641
|
private boolean convertDateTimeLiteral ( Session session , Expression a , Expression b ) { if ( a . dataType . isDateTimeType ( ) ) { } else if ( b . dataType . isDateTimeType ( ) ) { Expression c = a ; a = b ; b = c ; } else { return false ; } if ( a . dataType . isDateTimeTypeWithZone ( ) ) { return false ; } if ( b . opType == OpTypes . VALUE && b . dataType . isCharacterType ( ) ) { b . valueData = a . dataType . castToType ( session , b . valueData , b . dataType ) ; b . dataType = a . dataType ; return true ; } return false ; }
|
for compatibility convert a datetime character string to a datetime value for comparison
| 162
| 15
|
154,642
|
void distributeOr ( ) { if ( opType != OpTypes . OR ) { return ; } if ( nodes [ LEFT ] . opType == OpTypes . AND ) { opType = OpTypes . AND ; Expression temp = new ExpressionLogical ( OpTypes . OR , nodes [ LEFT ] . nodes [ RIGHT ] , nodes [ RIGHT ] ) ; nodes [ LEFT ] . opType = OpTypes . OR ; nodes [ LEFT ] . nodes [ RIGHT ] = nodes [ RIGHT ] ; nodes [ RIGHT ] = temp ; } else if ( nodes [ RIGHT ] . opType == OpTypes . AND ) { Expression temp = nodes [ LEFT ] ; nodes [ LEFT ] = nodes [ RIGHT ] ; nodes [ RIGHT ] = temp ; distributeOr ( ) ; return ; } ( ( ExpressionLogical ) nodes [ LEFT ] ) . distributeOr ( ) ; ( ( ExpressionLogical ) nodes [ RIGHT ] ) . distributeOr ( ) ; }
|
Converts an OR containing an AND to an AND
| 201
| 10
|
154,643
|
boolean isSimpleBound ( ) { if ( opType == OpTypes . IS_NULL ) { return true ; } if ( nodes [ RIGHT ] != null ) { if ( nodes [ RIGHT ] . opType == OpTypes . VALUE ) { // also true for all parameters return true ; } if ( nodes [ RIGHT ] . opType == OpTypes . SQL_FUNCTION ) { if ( ( ( FunctionSQL ) nodes [ RIGHT ] ) . isValueFunction ( ) ) { return true ; } } } return false ; }
|
Called only on comparison expressions after reordering which have a COLUMN left leaf
| 112
| 17
|
154,644
|
void swapCondition ( ) { int i = OpTypes . EQUAL ; switch ( opType ) { case OpTypes . GREATER_EQUAL : i = OpTypes . SMALLER_EQUAL ; break ; case OpTypes . SMALLER_EQUAL : i = OpTypes . GREATER_EQUAL ; break ; case OpTypes . SMALLER : i = OpTypes . GREATER ; break ; case OpTypes . GREATER : i = OpTypes . SMALLER ; break ; case OpTypes . NOT_DISTINCT : i = OpTypes . NOT_DISTINCT ; break ; case OpTypes . EQUAL : break ; default : throw Error . runtimeError ( ErrorCode . U_S0500 , "Expression.swapCondition" ) ; } opType = i ; Expression e = nodes [ LEFT ] ; nodes [ LEFT ] = nodes [ RIGHT ] ; nodes [ RIGHT ] = e ; }
|
Swap the condition with its complement
| 204
| 7
|
154,645
|
private boolean voltConvertBinaryIntegerLiteral ( Session session , Expression lhs , Expression rhs ) { Expression nonIntegralExpr ; int whichChild ; if ( lhs . dataType . isIntegralType ( ) ) { nonIntegralExpr = rhs ; whichChild = RIGHT ; } else if ( rhs . dataType . isIntegralType ( ) ) { nonIntegralExpr = lhs ; whichChild = LEFT ; } else { return false ; } return ExpressionValue . voltMutateToBigintType ( nonIntegralExpr , this , whichChild ) ; }
|
If one child is an integer and the other is a VARBINARY literal try to convert the literal to an integer .
| 130
| 25
|
154,646
|
public final void delete ( Row row ) { for ( int i = indexList . length - 1 ; i >= 0 ; i -- ) { indexList [ i ] . delete ( this , row ) ; } remove ( row . getPos ( ) ) ; }
|
Basic delete with no logging or referential checks .
| 54
| 11
|
154,647
|
public int compare ( final Object a , final Object b ) { final long awhen = ( ( Task ) ( a ) ) . getNextScheduled ( ) ; final long bwhen = ( ( Task ) ( b ) ) . getNextScheduled ( ) ; return ( awhen < bwhen ) ? - 1 : ( awhen == bwhen ) ? 0 : 1 ; }
|
Required to back the priority queue for scheduled tasks .
| 82
| 10
|
154,648
|
public Object scheduleAfter ( final long delay , final Runnable runnable ) throws IllegalArgumentException { if ( runnable == null ) { throw new IllegalArgumentException ( "runnable == null" ) ; } return this . addTask ( now ( ) + delay , runnable , 0 , false ) ; }
|
Causes the specified Runnable to be executed once in the background after the specified delay .
| 71
| 19
|
154,649
|
public Object scheduleAt ( final Date date , final Runnable runnable ) throws IllegalArgumentException { if ( date == null ) { throw new IllegalArgumentException ( "date == null" ) ; } else if ( runnable == null ) { throw new IllegalArgumentException ( "runnable == null" ) ; } return this . addTask ( date . getTime ( ) , runnable , 0 , false ) ; }
|
Causes the specified Runnable to be executed once in the background at the specified time .
| 95
| 19
|
154,650
|
public Object schedulePeriodicallyAt ( final Date date , final long period , final Runnable runnable , final boolean relative ) throws IllegalArgumentException { if ( date == null ) { throw new IllegalArgumentException ( "date == null" ) ; } else if ( period <= 0 ) { throw new IllegalArgumentException ( "period <= 0" ) ; } else if ( runnable == null ) { throw new IllegalArgumentException ( "runnable == null" ) ; } return addTask ( date . getTime ( ) , runnable , period , relative ) ; }
|
Causes the specified Runnable to be executed periodically in the background starting at the specified time .
| 127
| 20
|
154,651
|
public Object schedulePeriodicallyAfter ( final long delay , final long period , final Runnable runnable , final boolean relative ) throws IllegalArgumentException { if ( period <= 0 ) { throw new IllegalArgumentException ( "period <= 0" ) ; } else if ( runnable == null ) { throw new IllegalArgumentException ( "runnable == null" ) ; } return addTask ( now ( ) + delay , runnable , period , relative ) ; }
|
Causes the specified Runnable to be executed periodically in the background starting after the specified delay .
| 103
| 20
|
154,652
|
public synchronized void shutdownImmediately ( ) { if ( ! this . isShutdown ) { final Thread runner = this . taskRunnerThread ; this . isShutdown = true ; if ( runner != null && runner . isAlive ( ) ) { runner . interrupt ( ) ; } this . taskQueue . cancelAllTasks ( ) ; } }
|
Shuts down this timer immediately interrupting the wait state associated with the current head of the task queue or the wait state internal to the currently executing task if any such state is currently in effect .
| 73
| 39
|
154,653
|
public static boolean isFixedRate ( final Object task ) { if ( task instanceof Task ) { final Task ltask = ( Task ) task ; return ( ltask . relative && ltask . period > 0 ) ; } else { return false ; } }
|
Retrieves whether the specified argument references a task scheduled periodically using fixed rate scheduling .
| 53
| 17
|
154,654
|
public static boolean isFixedDelay ( final Object task ) { if ( task instanceof Task ) { final Task ltask = ( Task ) task ; return ( ! ltask . relative && ltask . period > 0 ) ; } else { return false ; } }
|
Retrieves whether the specified argument references a task scheduled periodically using fixed delay scheduling .
| 55
| 17
|
154,655
|
public static Date getLastScheduled ( Object task ) { if ( task instanceof Task ) { final Task ltask = ( Task ) task ; final long last = ltask . getLastScheduled ( ) ; return ( last == 0 ) ? null : new Date ( last ) ; } else { return null ; } }
|
Retrieves the last time the referenced task was executed as a Date object . If the task has never been executed null is returned .
| 69
| 27
|
154,656
|
public static Date getNextScheduled ( Object task ) { if ( task instanceof Task ) { final Task ltask = ( Task ) task ; final long next = ltask . isCancelled ( ) ? 0 : ltask . getNextScheduled ( ) ; return next == 0 ? null : new Date ( next ) ; } else { return null ; } }
|
Retrieves the next time the referenced task is due to be executed as a Date object . If the referenced task is cancelled null is returned .
| 79
| 29
|
154,657
|
protected Task addTask ( final long first , final Runnable runnable , final long period , boolean relative ) { if ( this . isShutdown ) { throw new IllegalStateException ( "shutdown" ) ; } final Task task = new Task ( first , runnable , period , relative ) ; // sychronized this . taskQueue . addTask ( task ) ; // sychronized this . restart ( ) ; return task ; }
|
Adds to the task queue a new Task object encapsulating the supplied Runnable and scheduling arguments .
| 94
| 20
|
154,658
|
protected Task nextTask ( ) { try { while ( ! this . isShutdown || Thread . interrupted ( ) ) { long now ; long next ; long wait ; Task task ; // synchronized to ensure removeTask // applies only to the peeked task, // when the computed wait <= 0 synchronized ( this . taskQueue ) { task = this . taskQueue . peekTask ( ) ; if ( task == null ) { // queue is empty break ; } now = System . currentTimeMillis ( ) ; next = task . next ; wait = ( next - now ) ; if ( wait > 0 ) { // release ownership of taskQueue monitor and await // notification of task addition or cancellation, // at most until the time when the peeked task is // next supposed to execute this . taskQueue . park ( wait ) ; continue ; // to top of loop } else { this . taskQueue . removeTask ( ) ; } } long period = task . period ; if ( period > 0 ) { // repeated task if ( task . relative ) { // using fixed rate shceduling final long late = ( now - next ) ; if ( late > period ) { // ensure that really late tasks don't // completely saturate the head of the // task queue period = 0 ; /** @todo : is -1, -2 ... fairer? */ } else if ( late > 0 ) { // compensate for scheduling overruns period -= late ; } } task . updateSchedule ( now , now + period ) ; this . taskQueue . addTask ( task ) ; } return task ; } } catch ( InterruptedException e ) { //e.printStackTrace(); } return null ; }
|
Retrieves the next task to execute or null if this timer is shutdown the current thread is interrupted or there are no queued tasks .
| 350
| 28
|
154,659
|
private static void handleUndoLog ( List < UndoAction > undoLog , boolean undo ) { if ( undoLog == null ) { return ; } if ( undo ) { undoLog = Lists . reverse ( undoLog ) ; } for ( UndoAction action : undoLog ) { if ( undo ) { action . undo ( ) ; } else { action . release ( ) ; } } if ( undo ) { undoLog . clear ( ) ; } }
|
Java level related stuffs that are also needed to roll back
| 96
| 12
|
154,660
|
public boolean updateSettings ( CatalogContext context ) { m_context = context ; // here you could bring the timeout settings m_loadedProcedures . loadProcedures ( m_context ) ; m_ee . loadFunctions ( m_context ) ; return true ; }
|
Update the system settings
| 58
| 4
|
154,661
|
@ Override public long [ ] validatePartitioning ( long [ ] tableIds , byte [ ] hashinatorConfig ) { ByteBuffer paramBuffer = m_ee . getParamBufferForExecuteTask ( 4 + ( 8 * tableIds . length ) + 4 + hashinatorConfig . length ) ; paramBuffer . putInt ( tableIds . length ) ; for ( long tableId : tableIds ) { paramBuffer . putLong ( tableId ) ; } paramBuffer . put ( hashinatorConfig ) ; ByteBuffer resultBuffer = ByteBuffer . wrap ( m_ee . executeTask ( TaskType . VALIDATE_PARTITIONING , paramBuffer ) ) ; long mispartitionedRows [ ] = new long [ tableIds . length ] ; for ( int ii = 0 ; ii < tableIds . length ; ii ++ ) { mispartitionedRows [ ii ] = resultBuffer . getLong ( ) ; } return mispartitionedRows ; }
|
For the specified list of table ids return the number of mispartitioned rows using the provided hashinator config
| 210
| 23
|
154,662
|
public void generateDREvent ( EventType type , long txnId , long uniqueId , long lastCommittedSpHandle , long spHandle , byte [ ] payloads ) { m_ee . quiesce ( lastCommittedSpHandle ) ; ByteBuffer paramBuffer = m_ee . getParamBufferForExecuteTask ( 32 + 16 + payloads . length ) ; paramBuffer . putInt ( type . ordinal ( ) ) ; paramBuffer . putLong ( uniqueId ) ; paramBuffer . putLong ( lastCommittedSpHandle ) ; paramBuffer . putLong ( spHandle ) ; // adding txnId and undoToken to make generateDREvent undoable paramBuffer . putLong ( txnId ) ; paramBuffer . putLong ( getNextUndoToken ( m_currentTxnId ) ) ; paramBuffer . putInt ( payloads . length ) ; paramBuffer . put ( payloads ) ; m_ee . executeTask ( TaskType . GENERATE_DR_EVENT , paramBuffer ) ; }
|
Generate a in - stream DR event which pushes an event buffer to topend
| 220
| 16
|
154,663
|
public boolean areRepairLogsComplete ( ) { for ( Entry < Long , ReplicaRepairStruct > entry : m_replicaRepairStructs . entrySet ( ) ) { if ( ! entry . getValue ( ) . logsComplete ( ) ) { return false ; } } return true ; }
|
Have all survivors supplied a full repair log?
| 65
| 9
|
154,664
|
public void repairSurvivors ( ) { // cancel() and repair() must be synchronized by the caller (the deliver lock, // currently). If cancelled and the last repair message arrives, don't send // out corrections! if ( this . m_promotionResult . isCancelled ( ) ) { repairLogger . debug ( m_whoami + "skipping repair message creation for cancelled Term." ) ; return ; } if ( repairLogger . isDebugEnabled ( ) ) { repairLogger . debug ( m_whoami + "received all repair logs and is repairing surviving replicas." ) ; } for ( Iv2RepairLogResponseMessage li : m_repairLogUnion ) { // send the repair log union to all the survivors. SPIs will ignore // CompleteTransactionMessages for transactions which have already // completed, so this has the effect of making sure that any holes // in the repair log are filled without explicitly having to // discover and track them. VoltMessage repairMsg = createRepairMessage ( li ) ; if ( repairLogger . isDebugEnabled ( ) ) { repairLogger . debug ( m_whoami + "repairing: " + CoreUtils . hsIdCollectionToString ( m_survivors ) + " with: " + TxnEgo . txnIdToString ( li . getTxnId ( ) ) + " " + repairMsg ) ; } if ( repairMsg != null ) { m_mailbox . repairReplicasWith ( m_survivors , repairMsg ) ; } } m_promotionResult . set ( new RepairResult ( m_maxSeenTxnId ) ) ; }
|
Send missed - messages to survivors . Exciting!
| 352
| 10
|
154,665
|
void addToRepairLog ( Iv2RepairLogResponseMessage msg ) { // don't add the null payload from the first message ack to the repair log if ( msg . getPayload ( ) == null ) { return ; } // MP repair log has at most two messages, complete message for prior transaction // and fragment message for current transaction, don't add message before prior completion if ( msg . getTxnId ( ) <= m_maxSeenCompleteTxnId ) { return ; } Iv2RepairLogResponseMessage prev = m_repairLogUnion . floor ( msg ) ; if ( prev != null && ( prev . getTxnId ( ) != msg . getTxnId ( ) ) ) { prev = null ; } if ( msg . getPayload ( ) instanceof CompleteTransactionMessage ) { // prefer complete messages to fragment tasks. Completion message also erases prior staled messages m_repairLogUnion . removeIf ( ( p ) - > p . getTxnId ( ) <= msg . getTxnId ( ) ) ; m_repairLogUnion . add ( msg ) ; m_maxSeenCompleteTxnId = msg . getTxnId ( ) ; } else if ( prev == null ) { m_repairLogUnion . add ( msg ) ; } }
|
replace old messages with complete transaction messages .
| 276
| 8
|
154,666
|
static String getSchemaPath ( String projectFilePath , String path ) throws IOException { File file = null ; if ( path . contains ( ".jar!" ) ) { String ddlText = null ; ddlText = VoltCompilerUtils . readFileFromJarfile ( path ) ; file = VoltProjectBuilder . writeStringToTempFile ( ddlText ) ; } else { file = new File ( path ) ; } if ( ! file . isAbsolute ( ) ) { // Resolve schemaPath relative to either the database definition xml file // or the working directory. if ( projectFilePath != null ) { file = new File ( new File ( projectFilePath ) . getParent ( ) , path ) ; } else { file = new File ( path ) ; } } return file . getPath ( ) ; }
|
Get the path of a schema file optionally relative to a project . xml file s path .
| 174
| 18
|
154,667
|
public void loadFunctions ( CatalogContext catalogContext ) { final CatalogMap < Function > catalogFunctions = catalogContext . database . getFunctions ( ) ; // Remove obsolete tokens for ( UserDefinedFunctionRunner runner : m_udfs . values ( ) ) { // The function that the current UserDefinedFunctionRunner is referring to // does not exist in the catalog anymore, we need to remove its token. if ( catalogFunctions . get ( runner . m_functionName ) == null ) { FunctionForVoltDB . deregisterUserDefinedFunction ( runner . m_functionName ) ; } } // Build new UDF runners ImmutableMap . Builder < Integer , UserDefinedFunctionRunner > builder = ImmutableMap . < Integer , UserDefinedFunctionRunner > builder ( ) ; for ( final Function catalogFunction : catalogFunctions ) { final String className = catalogFunction . getClassname ( ) ; Class < ? > funcClass = null ; try { funcClass = catalogContext . classForProcedureOrUDF ( className ) ; } catch ( final ClassNotFoundException e ) { if ( className . startsWith ( "org.voltdb." ) ) { String msg = String . format ( ORGVOLTDB_FUNCCNAME_ERROR_FMT , className ) ; VoltDB . crashLocalVoltDB ( msg , false , null ) ; } else { String msg = String . format ( UNABLETOLOAD_ERROR_FMT , className ) ; VoltDB . crashLocalVoltDB ( msg , false , null ) ; } } Object funcInstance = null ; try { funcInstance = funcClass . newInstance ( ) ; } catch ( InstantiationException | IllegalAccessException e ) { throw new RuntimeException ( String . format ( "Error instantiating function \"%s\"" , className ) , e ) ; } assert ( funcInstance != null ) ; builder . put ( catalogFunction . getFunctionid ( ) , new UserDefinedFunctionRunner ( catalogFunction , funcInstance ) ) ; } loadBuiltInJavaFunctions ( builder ) ; m_udfs = builder . build ( ) ; }
|
Load all the UDFs recorded in the catalog . Instantiate and register them in the system .
| 456
| 20
|
154,668
|
static String readFile ( String file ) { try { FileReader reader = new FileReader ( file ) ; BufferedReader read = new BufferedReader ( reader ) ; StringBuffer b = new StringBuffer ( ) ; String s = null ; int count = 0 ; while ( ( s = read . readLine ( ) ) != null ) { count ++ ; b . append ( s ) ; b . append ( ' ' ) ; } read . close ( ) ; reader . close ( ) ; return b . toString ( ) ; } catch ( IOException e ) { return e . getMessage ( ) ; } }
|
Redid this file to remove sizing requirements and to make it faster Speeded it up 10 fold .
| 128
| 20
|
154,669
|
static String [ ] getServersFromURL ( String url ) { // get everything between the prefix and the ? String prefix = URL_PREFIX + "//" ; int end = url . length ( ) ; if ( url . indexOf ( "?" ) > 0 ) { end = url . indexOf ( "?" ) ; } String servstring = url . substring ( prefix . length ( ) , end ) ; return servstring . split ( "," ) ; }
|
Static so it s unit - testable yes lazy me
| 100
| 11
|
154,670
|
private void initializeGenerationFromDisk ( final CatalogMap < Connector > connectors , final ExportDataProcessor processor , File [ ] files , List < Pair < Integer , Integer > > localPartitionsToSites , long genId ) { List < Integer > onDiskPartitions = new ArrayList < Integer > ( ) ; NavigableSet < Table > streams = CatalogUtil . getExportTablesExcludeViewOnly ( connectors ) ; Set < String > exportedTables = new HashSet <> ( ) ; for ( Table stream : streams ) { exportedTables . add ( stream . getTypeName ( ) ) ; } /* * Find all the data files. Once one is found, extract the nonce * and check for any advertisements related to the data files. If * there are orphaned advertisements, delete them. */ Map < String , File > dataFiles = new HashMap <> ( ) ; for ( File data : files ) { if ( data . getName ( ) . endsWith ( ".pbd" ) ) { PbdSegmentName pbdName = PbdSegmentName . parseFile ( exportLog , data ) ; if ( pbdName . m_nonce != null ) { String nonce = pbdName . m_nonce ; String streamName = getStreamNameFromNonce ( nonce ) ; if ( exportedTables . contains ( streamName ) ) { dataFiles . put ( nonce , data ) ; } else { // ENG-15740, stream can be dropped while node is offline, delete .pbd files // if stream is no longer in catalog data . delete ( ) ; } } else if ( pbdName . m_result == Result . NOT_PBD ) { exportLog . warn ( data . getAbsolutePath ( ) + " is not a PBD file." ) ; } else if ( pbdName . m_result == Result . INVALID_NAME ) { exportLog . warn ( data . getAbsolutePath ( ) + " doesn't have valid PBD name." ) ; } } } for ( File ad : files ) { if ( ad . getName ( ) . endsWith ( ".ad" ) ) { String nonce = getNonceFromAdFile ( ad ) ; File dataFile = dataFiles . get ( nonce ) ; if ( dataFile != null ) { try { addDataSource ( ad , localPartitionsToSites , onDiskPartitions , processor , genId ) ; } catch ( IOException e ) { VoltDB . crashLocalVoltDB ( "Error intializing export datasource " + ad , true , e ) ; } } else { //Delete ads that have no data ad . delete ( ) ; } } } // Count unique partitions only Set < Integer > allLocalPartitions = localPartitionsToSites . stream ( ) . map ( p -> p . getFirst ( ) ) . collect ( Collectors . toSet ( ) ) ; Set < Integer > onDIskPartitionsSet = new HashSet < Integer > ( onDiskPartitions ) ; onDIskPartitionsSet . removeAll ( allLocalPartitions ) ; // One export mailbox per node, since we only keep one generation if ( ! onDIskPartitionsSet . isEmpty ( ) ) { createAckMailboxesIfNeeded ( onDIskPartitionsSet ) ; } }
|
Initialize generation from disk creating data sources from the PBD files .
| 720
| 14
|
154,671
|
void initializeGenerationFromCatalog ( CatalogContext catalogContext , final CatalogMap < Connector > connectors , final ExportDataProcessor processor , int hostId , List < Pair < Integer , Integer > > localPartitionsToSites , boolean isCatalogUpdate ) { // Update catalog version so that datasources use this version when propagating acks m_catalogVersion = catalogContext . catalogVersion ; if ( exportLog . isDebugEnabled ( ) ) { exportLog . debug ( "Updating to catalog version : " + m_catalogVersion ) ; } // Collect table names of existing datasources Set < String > currentTables = new HashSet <> ( ) ; synchronized ( m_dataSourcesByPartition ) { for ( Iterator < Map < String , ExportDataSource > > it = m_dataSourcesByPartition . values ( ) . iterator ( ) ; it . hasNext ( ) ; ) { Map < String , ExportDataSource > sources = it . next ( ) ; currentTables . addAll ( sources . keySet ( ) ) ; } } if ( exportLog . isDebugEnabled ( ) ) { exportLog . debug ( "Current tables: " + currentTables ) ; } // Now create datasources based on the catalog (if already present will not be re-created). // Note that we create sources on disabled connectors. Set < Integer > partitionsInUse = localPartitionsToSites . stream ( ) . map ( p -> p . getFirst ( ) ) . collect ( Collectors . toSet ( ) ) ; boolean createdSources = false ; NavigableSet < Table > streams = CatalogUtil . getExportTablesExcludeViewOnly ( connectors ) ; Set < String > exportedTables = new HashSet <> ( ) ; for ( Table stream : streams ) { addDataSources ( stream , hostId , localPartitionsToSites , partitionsInUse , processor , catalogContext . m_genId , isCatalogUpdate ) ; exportedTables . add ( stream . getTypeName ( ) ) ; createdSources = true ; } updateStreamStatus ( exportedTables ) ; // Remove datasources that are not exported anymore for ( String table : exportedTables ) { currentTables . remove ( table ) ; } if ( ! currentTables . isEmpty ( ) ) { removeDataSources ( currentTables ) ; } //Only populate partitions in use if export is actually happening createAckMailboxesIfNeeded ( createdSources ? partitionsInUse : new HashSet < Integer > ( ) ) ; }
|
Initialize generation from catalog .
| 536
| 6
|
154,672
|
private void updateStreamStatus ( Set < String > exportedTables ) { synchronized ( m_dataSourcesByPartition ) { for ( Iterator < Map < String , ExportDataSource > > it = m_dataSourcesByPartition . values ( ) . iterator ( ) ; it . hasNext ( ) ; ) { Map < String , ExportDataSource > sources = it . next ( ) ; for ( String tableName : sources . keySet ( ) ) { ExportDataSource src = sources . get ( tableName ) ; if ( ! exportedTables . contains ( tableName ) ) { src . setStatus ( ExportDataSource . StreamStatus . DROPPED ) ; } else if ( src . getStatus ( ) == ExportDataSource . StreamStatus . DROPPED ) { src . setStatus ( ExportDataSource . StreamStatus . ACTIVE ) ; } } } } }
|
Mark a DataSource as dropped if its not present in the connectors .
| 187
| 14
|
154,673
|
private void sendDummyTakeMastershipResponse ( long sourceHsid , long requestId , int partitionId , byte [ ] signatureBytes ) { // msg type(1) + partition:int(4) + length:int(4) + signaturesBytes.length // requestId(8) int msgLen = 1 + 4 + 4 + signatureBytes . length + 8 ; ByteBuffer buf = ByteBuffer . allocate ( msgLen ) ; buf . put ( ExportManager . TAKE_MASTERSHIP_RESPONSE ) ; buf . putInt ( partitionId ) ; buf . putInt ( signatureBytes . length ) ; buf . put ( signatureBytes ) ; buf . putLong ( requestId ) ; BinaryPayloadMessage bpm = new BinaryPayloadMessage ( new byte [ 0 ] , buf . array ( ) ) ; m_mbox . send ( sourceHsid , bpm ) ; if ( exportLog . isDebugEnabled ( ) ) { exportLog . debug ( "Partition " + partitionId + " mailbox hsid (" + CoreUtils . hsIdToString ( m_mbox . getHSId ( ) ) + ") send dummy TAKE_MASTERSHIP_RESPONSE message(" + requestId + ") to " + CoreUtils . hsIdToString ( sourceHsid ) ) ; } }
|
Auto reply a response when the requested stream is no longer exists
| 291
| 12
|
154,674
|
public void updateAckMailboxes ( int partition , Set < Long > newHSIds ) { ImmutableList < Long > replicaHSIds = m_replicasHSIds . get ( partition ) ; synchronized ( m_dataSourcesByPartition ) { Map < String , ExportDataSource > partitionMap = m_dataSourcesByPartition . get ( partition ) ; if ( partitionMap == null ) { return ; } for ( ExportDataSource eds : partitionMap . values ( ) ) { eds . updateAckMailboxes ( Pair . of ( m_mbox , replicaHSIds ) ) ; if ( newHSIds != null && ! newHSIds . isEmpty ( ) ) { // In case of newly joined or rejoined streams miss any RELEASE_BUFFER event, // master stream resends the event when the export mailbox is aware of new streams. eds . forwardAckToNewJoinedReplicas ( newHSIds ) ; // After rejoin, new data source may contain the data which current master doesn't have, // only on master stream if it is blocked by the gap eds . queryForBestCandidate ( ) ; } } } }
|
Access by multiple threads
| 252
| 4
|
154,675
|
private void addDataSources ( Table table , int hostId , List < Pair < Integer , Integer > > localPartitionsToSites , Set < Integer > partitionsInUse , final ExportDataProcessor processor , final long genId , boolean isCatalogUpdate ) { for ( Pair < Integer , Integer > partitionAndSiteId : localPartitionsToSites ) { /* * IOException can occur if there is a problem * with the persistent aspects of the datasource storage */ int partition = partitionAndSiteId . getFirst ( ) ; int siteId = partitionAndSiteId . getSecond ( ) ; synchronized ( m_dataSourcesByPartition ) { try { Map < String , ExportDataSource > dataSourcesForPartition = m_dataSourcesByPartition . get ( partition ) ; if ( dataSourcesForPartition == null ) { dataSourcesForPartition = new HashMap < String , ExportDataSource > ( ) ; m_dataSourcesByPartition . put ( partition , dataSourcesForPartition ) ; } final String key = table . getTypeName ( ) ; if ( ! dataSourcesForPartition . containsKey ( key ) ) { ExportDataSource exportDataSource = new ExportDataSource ( this , processor , "database" , key , partition , siteId , genId , table . getColumns ( ) , table . getPartitioncolumn ( ) , m_directory . getPath ( ) ) ; int migrateBatchSize = CatalogUtil . getPersistentMigrateBatchSize ( key ) ; exportDataSource . setupMigrateRowsDeleter ( migrateBatchSize ) ; if ( exportLog . isDebugEnabled ( ) ) { exportLog . debug ( "Creating ExportDataSource for table in catalog " + key + " partition " + partition + " site " + siteId ) ; } dataSourcesForPartition . put ( key , exportDataSource ) ; if ( isCatalogUpdate ) { exportDataSource . updateCatalog ( table , genId ) ; } } else { // Associate any existing EDS to the export client in the new processor ExportDataSource eds = dataSourcesForPartition . get ( key ) ; ExportClientBase client = processor . getExportClient ( key ) ; if ( client != null ) { // Associate to an existing export client eds . setClient ( client ) ; eds . setRunEveryWhere ( client . isRunEverywhere ( ) ) ; } else { // Reset to no export client eds . setClient ( null ) ; eds . setRunEveryWhere ( false ) ; } // Mark in catalog only if partition is in use eds . markInCatalog ( partitionsInUse . contains ( partition ) ) ; if ( isCatalogUpdate ) { eds . updateCatalog ( table , genId ) ; } } } catch ( IOException e ) { VoltDB . crashLocalVoltDB ( "Error creating datasources for table " + table . getTypeName ( ) + " host id " + hostId , true , e ) ; } } } }
|
Add datasources for a catalog table in all partitions
| 640
| 10
|
154,676
|
@ Override public void onSourceDrained ( int partitionId , String tableName ) { ExportDataSource source ; synchronized ( m_dataSourcesByPartition ) { Map < String , ExportDataSource > sources = m_dataSourcesByPartition . get ( partitionId ) ; if ( sources == null ) { if ( ! m_removingPartitions . contains ( partitionId ) ) { exportLog . error ( "Could not find export data sources for partition " + partitionId + ". The export cleanup stream is being discarded." ) ; } return ; } source = sources . get ( tableName ) ; if ( source == null ) { exportLog . warn ( "Could not find export data source for signature " + partitionId + " name " + tableName + ". The export cleanup stream is being discarded." ) ; return ; } // Remove source and partition entry if empty sources . remove ( tableName ) ; if ( sources . isEmpty ( ) ) { m_dataSourcesByPartition . remove ( partitionId ) ; removeMailbox ( partitionId ) ; } } //Do closing outside the synchronized block. Do not wait on future since // we're invoked from the source's executor thread. exportLog . info ( "Drained on unused partition " + partitionId + ": " + source ) ; source . closeAndDelete ( ) ; }
|
The Export Data Source reports it is drained on an unused partition .
| 282
| 13
|
154,677
|
public void add ( int index , Object element ) { // reporter.updateCounter++; if ( index > elementCount ) { throw new IndexOutOfBoundsException ( "Index out of bounds: " + index + ">" + elementCount ) ; } if ( index < 0 ) { throw new IndexOutOfBoundsException ( "Index out of bounds: " + index + " < 0" ) ; } if ( elementCount >= elementData . length ) { increaseCapacity ( ) ; } for ( int i = elementCount ; i > index ; i -- ) { elementData [ i ] = elementData [ i - 1 ] ; } elementData [ index ] = element ; elementCount ++ ; }
|
Inserts an element at the given index
| 147
| 8
|
154,678
|
public boolean add ( Object element ) { // reporter.updateCounter++; if ( elementCount >= elementData . length ) { increaseCapacity ( ) ; } elementData [ elementCount ] = element ; elementCount ++ ; return true ; }
|
Appends an element to the end of the list
| 49
| 10
|
154,679
|
public Object get ( int index ) { if ( index >= elementCount ) { throw new IndexOutOfBoundsException ( "Index out of bounds: " + index + " >= " + elementCount ) ; } if ( index < 0 ) { throw new IndexOutOfBoundsException ( "Index out of bounds: " + index + " < 0" ) ; } return elementData [ index ] ; }
|
Gets the element at given position
| 85
| 7
|
154,680
|
public Object remove ( int index ) { if ( index >= elementCount ) { throw new IndexOutOfBoundsException ( "Index out of bounds: " + index + " >= " + elementCount ) ; } if ( index < 0 ) { throw new IndexOutOfBoundsException ( "Index out of bounds: " + index + " < 0" ) ; } Object removedObj = elementData [ index ] ; for ( int i = index ; i < elementCount - 1 ; i ++ ) { elementData [ i ] = elementData [ i + 1 ] ; } elementCount -- ; if ( elementCount == 0 ) { clear ( ) ; } else { elementData [ elementCount ] = null ; } return removedObj ; }
|
Removes and returns the element at given position
| 154
| 9
|
154,681
|
public Object set ( int index , Object element ) { if ( index >= elementCount ) { throw new IndexOutOfBoundsException ( "Index out of bounds: " + index + " >= " + elementCount ) ; } if ( index < 0 ) { throw new IndexOutOfBoundsException ( "Index out of bounds: " + index + " < 0" ) ; } Object replacedObj = elementData [ index ] ; elementData [ index ] = element ; return replacedObj ; }
|
Replaces the element at given position
| 103
| 7
|
154,682
|
public static boolean bufEquals ( byte onearray [ ] , byte twoarray [ ] ) { if ( onearray == twoarray ) return true ; boolean ret = ( onearray . length == twoarray . length ) ; if ( ! ret ) { return ret ; } for ( int idx = 0 ; idx < onearray . length ; idx ++ ) { if ( onearray [ idx ] != twoarray [ idx ] ) { return false ; } } return true ; }
|
equals function that actually compares two buffers .
| 108
| 9
|
154,683
|
public Connection getConnection ( String curDriverIn , String curCharsetIn , String curTrustStoreIn ) throws ClassNotFoundException , MalformedURLException , SQLException { // Local vars to satisfy compiler warnings String curDriver = curDriverIn ; String curCharset = curCharsetIn ; String curTrustStore = curTrustStoreIn ; Properties sysProps = System . getProperties ( ) ; if ( curDriver == null ) { // If explicit driver not specified curDriver = ( ( driver == null ) ? DEFAULT_JDBC_DRIVER : driver ) ; } if ( curCharset == null && charset != null ) { curCharset = charset ; } if ( curTrustStore == null && truststore != null ) { curTrustStore = truststore ; } if ( curCharset == null ) { sysProps . remove ( "sqlfile.charset" ) ; } else { sysProps . put ( "sqlfile.charset" , curCharset ) ; } if ( curTrustStore == null ) { sysProps . remove ( "javax.net.ssl.trustStore" ) ; } else { sysProps . put ( "javax.net.ssl.trustStore" , curTrustStore ) ; } String urlString = null ; try { urlString = expandSysPropVars ( url ) ; } catch ( IllegalArgumentException iae ) { throw new MalformedURLException ( iae . getMessage ( ) + " for URL '" + url + "'" ) ; } String userString = null ; if ( username != null ) try { userString = expandSysPropVars ( username ) ; } catch ( IllegalArgumentException iae ) { throw new MalformedURLException ( iae . getMessage ( ) + " for user name '" + username + "'" ) ; } String passwordString = null ; if ( password != null ) try { passwordString = expandSysPropVars ( password ) ; } catch ( IllegalArgumentException iae ) { throw new MalformedURLException ( iae . getMessage ( ) + " for password" ) ; } Class . forName ( curDriver ) ; // This is not necessary for jdbc:odbc or if class loaded by a // service resource file. Consider checking for that. Connection c = ( userString == null ) ? DriverManager . getConnection ( urlString ) : DriverManager . getConnection ( urlString , userString , passwordString ) ; if ( ti != null ) RCData . setTI ( c , ti ) ; // Would like to verify the setting made by checking // c.getTransactionIsolation(). Unfortunately, the spec allows for // databases to substitute levels according to some rules, and it's // impossible to know what to expect since custom levels are permitted. // Debug: // System.err.println("TI set to " + ti + "\nPOST: " // + SqlTool.tiToString(c.getTransactionIsolation())); return c ; }
|
Gets a JDBC Connection using the data of this RCData object with specified override elements
| 659
| 18
|
154,684
|
static public String tiToString ( int ti ) { switch ( ti ) { case Connection . TRANSACTION_READ_UNCOMMITTED : return "TRANSACTION_READ_UNCOMMITTED" ; case Connection . TRANSACTION_READ_COMMITTED : return "TRANSACTION_READ_COMMITTED" ; case Connection . TRANSACTION_REPEATABLE_READ : return "TRANSACTION_REPEATABLE_READ" ; case Connection . TRANSACTION_SERIALIZABLE : return "TRANSACTION_SERIALIZABLE" ; case Connection . TRANSACTION_NONE : return "TRANSACTION_NONE" ; } return "Custom Transaction Isolation numerical value: " + ti ; }
|
Return String for numerical java . sql . Connection Transaction level .
| 156
| 12
|
154,685
|
protected void handleJSONMessageAsDummy ( JSONObject obj ) throws Exception { hostLog . info ( "Generating dummy response for ops request " + obj ) ; sendOpsResponse ( null , obj , OPS_DUMMY ) ; }
|
For OPS actions generate a dummy response to the distributed work to avoid startup initialization dependencies . Startup can take a long time and we don t want to prevent other agents from making progress
| 50
| 35
|
154,686
|
public void performOpsAction ( final Connection c , final long clientHandle , final OpsSelector selector , final ParameterSet params ) throws Exception { m_es . submit ( new Runnable ( ) { @ Override public void run ( ) { try { collectStatsImpl ( c , clientHandle , selector , params ) ; } catch ( Exception e ) { hostLog . warn ( "Exception while attempting to collect stats" , e ) ; // ENG-14639, prevent clients like sqlcmd from hanging on exception sendErrorResponse ( c , ClientResponse . OPERATIONAL_FAILURE , "Failed to get statistics (" + e . getMessage ( ) + ")." , clientHandle ) ; } } } ) ; }
|
Perform the action associated with this agent using the provided ParameterSet . This is the entry point to the OPS system .
| 151
| 25
|
154,687
|
protected void distributeOpsWork ( PendingOpsRequest newRequest , JSONObject obj ) throws Exception { if ( m_pendingRequests . size ( ) > MAX_IN_FLIGHT_REQUESTS ) { /* * Defensively check for an expired request not caught * by timeout check. Should never happen. */ Iterator < Entry < Long , PendingOpsRequest > > iter = m_pendingRequests . entrySet ( ) . iterator ( ) ; final long now = System . currentTimeMillis ( ) ; boolean foundExpiredRequest = false ; while ( iter . hasNext ( ) ) { PendingOpsRequest por = iter . next ( ) . getValue ( ) ; if ( now - por . startTime > OPS_COLLECTION_TIMEOUT * 2 ) { iter . remove ( ) ; foundExpiredRequest = true ; } } if ( ! foundExpiredRequest ) { sendErrorResponse ( newRequest . c , ClientResponse . GRACEFUL_FAILURE , "Too many pending stat requests" , newRequest . clientData ) ; return ; } } final long requestId = m_nextRequestId ++ ; m_pendingRequests . put ( requestId , newRequest ) ; newRequest . timer = m_es . schedule ( new Runnable ( ) { @ Override public void run ( ) { checkForRequestTimeout ( requestId ) ; } } , OPS_COLLECTION_TIMEOUT , TimeUnit . MILLISECONDS ) ; // selector, subselector, interval filled in by parse... obj . put ( "requestId" , requestId ) ; obj . put ( "returnAddress" , m_mailbox . getHSId ( ) ) ; int siteId = CoreUtils . getSiteIdFromHSId ( m_mailbox . getHSId ( ) ) ; byte payloadBytes [ ] = CompressionService . compressBytes ( obj . toString ( 4 ) . getBytes ( "UTF-8" ) ) ; for ( int hostId : m_messenger . getLiveHostIds ( ) ) { long agentHsId = CoreUtils . getHSIdFromHostAndSite ( hostId , siteId ) ; newRequest . expectedOpsResponses ++ ; BinaryPayloadMessage bpm = new BinaryPayloadMessage ( new byte [ ] { JSON_PAYLOAD } , payloadBytes ) ; m_mailbox . send ( agentHsId , bpm ) ; } }
|
For OPS actions which run on every node this method will distribute the necessary parameters to its peers on the other cluster nodes . Additionally it will pre - check for excessive outstanding requests and initialize the tracking and timeout of the new request . Subclasses of OpsAgent should use this when they need this service .
| 524
| 59
|
154,688
|
protected void sendClientResponse ( PendingOpsRequest request ) { byte statusCode = ClientResponse . SUCCESS ; String statusString = null ; /* * It is possible not to receive a table response if a feature is not enabled */ // All of the null/empty table handling/detecting/generation sucks. Just making it // work for now, not making it pretty. --izzy VoltTable responseTables [ ] = request . aggregateTables ; if ( responseTables == null || responseTables . length == 0 ) { responseTables = new VoltTable [ 0 ] ; statusCode = ClientResponse . GRACEFUL_FAILURE ; statusString = "Requested info \"" + request . subselector + "\" is not yet available or not supported in the current configuration." ; } ClientResponseImpl response = new ClientResponseImpl ( statusCode , ClientResponse . UNINITIALIZED_APP_STATUS_CODE , null , responseTables , statusString ) ; response . setClientHandle ( request . clientData ) ; ByteBuffer buf = ByteBuffer . allocate ( response . getSerializedSize ( ) + 4 ) ; buf . putInt ( buf . capacity ( ) - 4 ) ; response . flattenToBuffer ( buf ) . flip ( ) ; request . c . writeStream ( ) . enqueue ( buf ) ; }
|
Send the final response stored in the PendingOpsRequest to the client which initiated the action . Will be called automagically after aggregating cluster - wide responses but may be called directly by subclasses if necessary .
| 286
| 42
|
154,689
|
private void sendOpsResponse ( VoltTable [ ] results , JSONObject obj , byte payloadType ) throws Exception { long requestId = obj . getLong ( "requestId" ) ; long returnAddress = obj . getLong ( "returnAddress" ) ; // Send a response with no data since the stats is not supported or not yet available if ( results == null ) { ByteBuffer responseBuffer = ByteBuffer . allocate ( 8 ) ; responseBuffer . putLong ( requestId ) ; byte responseBytes [ ] = CompressionService . compressBytes ( responseBuffer . array ( ) ) ; BinaryPayloadMessage bpm = new BinaryPayloadMessage ( new byte [ ] { payloadType } , responseBytes ) ; m_mailbox . send ( returnAddress , bpm ) ; return ; } ByteBuffer [ ] bufs = new ByteBuffer [ results . length ] ; int statbytes = 0 ; for ( int i = 0 ; i < results . length ; i ++ ) { bufs [ i ] = results [ i ] . getBuffer ( ) ; bufs [ i ] . position ( 0 ) ; statbytes += bufs [ i ] . remaining ( ) ; } ByteBuffer responseBuffer = ByteBuffer . allocate ( 8 + // requestId 4 * results . length + // length prefix for each stats table + statbytes ) ; responseBuffer . putLong ( requestId ) ; for ( ByteBuffer buf : bufs ) { responseBuffer . putInt ( buf . remaining ( ) ) ; responseBuffer . put ( buf ) ; } byte responseBytes [ ] = CompressionService . compressBytes ( responseBuffer . array ( ) ) ; BinaryPayloadMessage bpm = new BinaryPayloadMessage ( new byte [ ] { payloadType } , responseBytes ) ; m_mailbox . send ( returnAddress , bpm ) ; }
|
Return the results of distributed work to the original requesting agent . Used by subclasses to respond after they ve done their local work .
| 381
| 26
|
154,690
|
private static void addUDFDependences ( Function function , Statement catalogStmt ) { Procedure procedure = ( Procedure ) catalogStmt . getParent ( ) ; addFunctionDependence ( function , procedure , catalogStmt ) ; addStatementDependence ( function , catalogStmt ) ; }
|
Add all statement dependences both ways .
| 61
| 8
|
154,691
|
private static void addFunctionDependence ( Function function , Procedure procedure , Statement catalogStmt ) { String funcDeps = function . getStmtdependers ( ) ; Set < String > stmtSet = new TreeSet <> ( ) ; for ( String stmtName : funcDeps . split ( "," ) ) { if ( ! stmtName . isEmpty ( ) ) { stmtSet . add ( stmtName ) ; } } String statementName = procedure . getTypeName ( ) + ":" + catalogStmt . getTypeName ( ) ; if ( stmtSet . contains ( statementName ) ) { return ; } stmtSet . add ( statementName ) ; StringBuilder sb = new StringBuilder ( ) ; // We will add this procedure:statement pair. So make sure we have // an initial comma. Note that an empty set must be represented // by an empty string. We represent the set {pp:ss, qq:tt}, // where "pp" and "qq" are procedures and "ss" and "tt" are // statements in their procedures respectively, with // the string ",pp:ss,qq:tt,". If we search for "pp:ss" we will // never find "ppp:sss" by accident. // // Do to this, when we add something to string we start with a single // comma, and then add "qq:tt," at the end. sb . append ( "," ) ; for ( String stmtName : stmtSet ) { sb . append ( stmtName + "," ) ; } function . setStmtdependers ( sb . toString ( ) ) ; }
|
Add a dependence to a function of a statement . The function s dependence string is altered with this function .
| 357
| 21
|
154,692
|
private static void addStatementDependence ( Function function , Statement catalogStmt ) { String fnDeps = catalogStmt . getFunctiondependees ( ) ; Set < String > fnSet = new TreeSet <> ( ) ; for ( String fnName : fnDeps . split ( "," ) ) { if ( ! fnName . isEmpty ( ) ) { fnSet . add ( fnName ) ; } } String functionName = function . getTypeName ( ) ; if ( fnSet . contains ( functionName ) ) { return ; } fnSet . add ( functionName ) ; StringBuilder sb = new StringBuilder ( ) ; sb . append ( "," ) ; for ( String fnName : fnSet ) { sb . append ( fnName + "," ) ; } catalogStmt . setFunctiondependees ( sb . toString ( ) ) ; }
|
Add a dependence of a statement to a function . The statement s dependence string is altered with this function .
| 186
| 21
|
154,693
|
static boolean fragmentReferencesPersistentTable ( AbstractPlanNode node ) { if ( node == null ) return false ; // these nodes can read/modify persistent tables if ( node instanceof AbstractScanPlanNode ) return true ; if ( node instanceof InsertPlanNode ) return true ; if ( node instanceof DeletePlanNode ) return true ; if ( node instanceof UpdatePlanNode ) return true ; // recursively check out children for ( int i = 0 ; i < node . getChildCount ( ) ; i ++ ) { AbstractPlanNode child = node . getChild ( i ) ; if ( fragmentReferencesPersistentTable ( child ) ) return true ; } // if nothing found, return false return false ; }
|
Check through a plan graph and return true if it ever touches a persistent table .
| 149
| 16
|
154,694
|
public static Procedure compileNibbleDeleteProcedure ( Table catTable , String procName , Column col , ComparisonOperation comp ) { Procedure newCatProc = addProcedure ( catTable , procName ) ; String countingQuery = genSelectSqlForNibbleDelete ( catTable , col , comp ) ; addStatement ( catTable , newCatProc , countingQuery , "0" ) ; String deleteQuery = genDeleteSqlForNibbleDelete ( catTable , col , comp ) ; addStatement ( catTable , newCatProc , deleteQuery , "1" ) ; String valueAtQuery = genValueAtOffsetSqlForNibbleDelete ( catTable , col , comp ) ; addStatement ( catTable , newCatProc , valueAtQuery , "2" ) ; return newCatProc ; }
|
Generate small deletion queries by using count - select - delete pattern .
| 176
| 14
|
154,695
|
public static Procedure compileMigrateProcedure ( Table table , String procName , Column column , ComparisonOperation comparison ) { Procedure proc = addProcedure ( table , procName ) ; // Select count(*) StringBuilder sb = new StringBuilder ( ) ; sb . append ( "SELECT COUNT(*) FROM " + table . getTypeName ( ) ) ; sb . append ( " WHERE not migrating AND " + column . getName ( ) + " " + comparison . toString ( ) + " ?;" ) ; addStatement ( table , proc , sb . toString ( ) , "0" ) ; // Get cutoff value sb . setLength ( 0 ) ; sb . append ( "SELECT " + column . getName ( ) + " FROM " + table . getTypeName ( ) ) ; sb . append ( " WHERE not migrating ORDER BY " + column . getName ( ) ) ; if ( comparison == ComparisonOperation . LTE || comparison == ComparisonOperation . LT ) { sb . append ( " ASC OFFSET ? LIMIT 1;" ) ; } else { sb . append ( " DESC OFFSET ? LIMIT 1;" ) ; } addStatement ( table , proc , sb . toString ( ) , "1" ) ; // Migrate sb . setLength ( 0 ) ; sb . append ( "MIGRATE FROM " + table . getTypeName ( ) ) ; sb . append ( " WHERE not migrating AND " + column . getName ( ) + " " + comparison . toString ( ) + " ?;" ) ; addStatement ( table , proc , sb . toString ( ) , "2" ) ; return proc ; }
|
Generate migrate queries by using count - select - migrate pattern .
| 363
| 13
|
154,696
|
public static < E > Collection < E > constrainedCollection ( Collection < E > collection , Constraint < ? super E > constraint ) { return new ConstrainedCollection < E > ( collection , constraint ) ; }
|
Returns a constrained view of the specified collection using the specified constraint . Any operations that add new elements to the collection will call the provided constraint . However this method does not verify that existing elements satisfy the constraint .
| 45
| 41
|
154,697
|
public static < E > Set < E > constrainedSet ( Set < E > set , Constraint < ? super E > constraint ) { return new ConstrainedSet < E > ( set , constraint ) ; }
|
Returns a constrained view of the specified set using the specified constraint . Any operations that add new elements to the set will call the provided constraint . However this method does not verify that existing elements satisfy the constraint .
| 45
| 41
|
154,698
|
public static < E > SortedSet < E > constrainedSortedSet ( SortedSet < E > sortedSet , Constraint < ? super E > constraint ) { return new ConstrainedSortedSet < E > ( sortedSet , constraint ) ; }
|
Returns a constrained view of the specified sorted set using the specified constraint . Any operations that add new elements to the sorted set will call the provided constraint . However this method does not verify that existing elements satisfy the constraint .
| 55
| 43
|
154,699
|
public static < E > List < E > constrainedList ( List < E > list , Constraint < ? super E > constraint ) { return ( list instanceof RandomAccess ) ? new ConstrainedRandomAccessList < E > ( list , constraint ) : new ConstrainedList < E > ( list , constraint ) ; }
|
Returns a constrained view of the specified list using the specified constraint . Any operations that add new elements to the list will call the provided constraint . However this method does not verify that existing elements satisfy the constraint .
| 69
| 41
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.