idx
int64
0
41.2k
question
stringlengths
74
4.04k
target
stringlengths
7
750
32,100
void addValue ( String columnName , String value ) { addValue ( columnNames . indexOf ( columnName ) , value ) ; }
Add a value to the first column with the given name
32,101
void addValue ( int columnIndex , String value ) { if ( ( columnIndex < 0 ) || ( columnIndex >= data . size ( ) ) ) { throw new IllegalArgumentException ( ) ; } List < String > stringList = data . get ( columnIndex ) ; stringList . add ( value ) ; }
Add a value to the nth column
32,102
List < String > generate ( ) { List < String > lines = Lists . newArrayList ( ) ; StringBuilder workStr = new StringBuilder ( ) ; List < AtomicInteger > columnWidths = getColumnWidths ( ) ; List < Iterator < String > > dataIterators = getDataIterators ( ) ; Iterator < AtomicInteger > columnWidthIterator = columnWidths . iterator ( ) ; for ( String columnName : columnNames ) { int thisWidth = columnWidthIterator . next ( ) . intValue ( ) ; printValue ( workStr , columnName , thisWidth ) ; } pushLine ( lines , workStr ) ; boolean done = false ; while ( ! done ) { boolean hadValue = false ; Iterator < Iterator < String > > rowIterator = dataIterators . iterator ( ) ; for ( AtomicInteger width : columnWidths ) { Iterator < String > thisDataIterator = rowIterator . next ( ) ; if ( thisDataIterator . hasNext ( ) ) { hadValue = true ; String value = thisDataIterator . next ( ) ; printValue ( workStr , value , width . intValue ( ) ) ; } else { printValue ( workStr , "" , width . intValue ( ) ) ; } } pushLine ( lines , workStr ) ; if ( ! hadValue ) { done = true ; } } return lines ; }
Generate the output as a list of string lines
32,103
public static void start ( Class < ? > mainClass , final String [ ] args ) { try { LifecycleInjector . bootstrap ( mainClass , new AbstractModule ( ) { protected void configure ( ) { bind ( String [ ] . class ) . annotatedWith ( Main . class ) . toInstance ( args ) ; } } ) ; } catch ( Exception e ) { throw new ProvisionException ( "Error instantiating main class" , e ) ; } }
Utility method to start the CommonsCli using a main class and command line arguments
32,104
public void output ( Logger log ) { Map < String , Entry > entries = config . getSortedEntries ( ) ; if ( entries . isEmpty ( ) ) { return ; } ColumnPrinter printer = build ( entries ) ; log . debug ( "Configuration Details" ) ; for ( String line : printer . generate ( ) ) { log . debug ( line ) ; } }
Write the documentation table to a logger
32,105
public void output ( PrintWriter out ) { Map < String , Entry > entries = config . getSortedEntries ( ) ; if ( entries . isEmpty ( ) ) { return ; } ColumnPrinter printer = build ( entries ) ; out . println ( "Configuration Details" ) ; printer . print ( out ) ; }
Output documentation table to a PrintWriter
32,106
private ColumnPrinter build ( Map < String , Entry > entries ) { ColumnPrinter printer = new ColumnPrinter ( ) ; printer . addColumn ( "PROPERTY" ) ; printer . addColumn ( "FIELD" ) ; printer . addColumn ( "DEFAULT" ) ; printer . addColumn ( "VALUE" ) ; printer . addColumn ( "DESCRIPTION" ) ; Map < String , Entry > sortedEntries = Maps . newTreeMap ( ) ; sortedEntries . putAll ( entries ) ; for ( Entry entry : sortedEntries . values ( ) ) { printer . addValue ( 0 , entry . configurationName ) ; printer . addValue ( 1 , entry . field . getDeclaringClass ( ) . getName ( ) + "#" + entry . field . getName ( ) ) ; printer . addValue ( 2 , entry . defaultValue ) ; printer . addValue ( 3 , entry . has ? entry . value : "" ) ; printer . addValue ( 4 , entry . documentation ) ; } return printer ; }
Construct a ColumnPrinter using the entries
32,107
public static List < ConfigurationKeyPart > parse ( String raw , Map < String , String > contextOverrides ) { List < ConfigurationKeyPart > parts = Lists . newArrayList ( ) ; int caret = 0 ; for ( ; ; ) { int startIndex = raw . indexOf ( "${" , caret ) ; if ( startIndex < 0 ) { break ; } int endIndex = raw . indexOf ( "}" , startIndex ) ; if ( endIndex < 0 ) { break ; } if ( startIndex > caret ) { parts . add ( new ConfigurationKeyPart ( raw . substring ( caret , startIndex ) , false ) ) ; } startIndex += 2 ; if ( startIndex < endIndex ) { String name = raw . substring ( startIndex , endIndex ) ; if ( contextOverrides != null && contextOverrides . containsKey ( name ) ) { parts . add ( new ConfigurationKeyPart ( contextOverrides . get ( name ) , false ) ) ; } else { parts . add ( new ConfigurationKeyPart ( name , true ) ) ; } } caret = endIndex + 1 ; } if ( caret < raw . length ( ) ) { parts . add ( new ConfigurationKeyPart ( raw . substring ( caret ) , false ) ) ; } if ( parts . size ( ) == 0 ) { parts . add ( new ConfigurationKeyPart ( "" , false ) ) ; } return parts ; }
Parse a key into parts
32,108
public void add ( Object ... objects ) throws Exception { for ( Object obj : objects ) { add ( obj ) ; } }
Add the objects to the container . Their assets will be loaded post construct methods called etc .
32,109
public void add ( Object obj ) throws Exception { add ( obj , null , new LifecycleMethods ( obj . getClass ( ) ) ) ; }
Add the object to the container . Its assets will be loaded post construct methods called etc .
32,110
public LifecycleState getState ( Object obj ) { LifecycleStateWrapper lifecycleState = objectStates . get ( obj ) ; if ( lifecycleState == null ) { return hasStarted ( ) ? LifecycleState . ACTIVE : LifecycleState . LATENT ; } else { synchronized ( lifecycleState ) { return lifecycleState . get ( ) ; } } }
Return the current state of the given object or LATENT if unknown
32,111
public String getKey ( Map < String , String > variableValues ) { StringBuilder key = new StringBuilder ( ) ; for ( ConfigurationKeyPart p : parts ) { if ( p . isVariable ( ) ) { String value = variableValues . get ( p . getValue ( ) ) ; if ( value == null ) { log . warn ( "No value found for variable: " + p . getValue ( ) ) ; value = "" ; } key . append ( value ) ; } else { key . append ( p . getValue ( ) ) ; } } return key . toString ( ) ; }
Return the final key applying variables as needed
32,112
public Collection < String > getVariableNames ( ) { ImmutableSet . Builder < String > builder = ImmutableSet . builder ( ) ; for ( ConfigurationKeyPart p : parts ) { if ( p . isVariable ( ) ) { builder . add ( p . getValue ( ) ) ; } } return builder . build ( ) ; }
Return the names of the variables specified in the key if any
32,113
public Injector createChildInjector ( Collection < Module > modules ) { Injector childInjector ; Collection < Module > localModules = modules ; for ( ModuleTransformer transformer : transformers ) { localModules = transformer . call ( localModules ) ; } if ( mode == LifecycleInjectorMode . REAL_CHILD_INJECTORS ) { childInjector = injector . createChildInjector ( localModules ) ; } else { childInjector = createSimulatedChildInjector ( localModules ) ; } for ( PostInjectorAction action : actions ) { action . call ( childInjector ) ; } return childInjector ; }
Create an injector that is a child of the bootstrap bindings only
32,114
public Injector createInjector ( Collection < Module > additionalModules ) { List < Module > localModules = Lists . newArrayList ( ) ; localModules . add ( new LifecycleListenerModule ( ) ) ; localModules . add ( new AbstractModule ( ) { public void configure ( ) { if ( requireExplicitBindings ) { binder ( ) . requireExplicitBindings ( ) ; } } } ) ; if ( additionalModules != null ) { localModules . addAll ( additionalModules ) ; } localModules . addAll ( modules ) ; if ( ! ignoreAllClasses ) { Collection < Class < ? > > localIgnoreClasses = Sets . newHashSet ( ignoreClasses ) ; localModules . add ( new InternalAutoBindModule ( injector , scanner , localIgnoreClasses ) ) ; } return createChildInjector ( localModules ) ; }
Create the main injector
32,115
public void stop ( ) { stopWrites ( ) ; stopReads ( ) ; if ( timerRef != null && timerRef . get ( ) != null ) { timerRef . get ( ) . shutdownNow ( ) ; timerRef . set ( null ) ; } ndBenchMonitor . resetStats ( ) ; }
FUNCTIONALITY FOR STOPPING THE WORKERS
32,116
public String nonPipelineRead ( String key ) throws Exception { String res = jedisClient . get ( ) . get ( key ) ; if ( res != null ) { if ( res . isEmpty ( ) ) { throw new Exception ( "Data retrieved is not ok " ) ; } } else { return CacheMiss ; } return ResultOK ; }
This is the non pipelined version of the reads
32,117
public String pipelineRead ( String key , int max_pipe_keys , int min_pipe_keys ) throws Exception { int pipe_keys = randomGenerator . nextInt ( max_pipe_keys ) ; pipe_keys = Math . max ( min_pipe_keys , pipe_keys ) ; DynoJedisPipeline pipeline = this . jedisClient . get ( ) . pipelined ( ) ; Map < String , Response < String > > responses = new HashMap < > ( ) ; for ( int n = 0 ; n < pipe_keys ; ++ n ) { String nth_key = key + "_" + n ; Response < String > resp = pipeline . get ( key ) ; responses . put ( nth_key , resp ) ; } pipeline . sync ( ) ; for ( int n = 0 ; n < pipe_keys ; ++ n ) { String nth_key = key + "_" + n ; Response < String > resp = responses . get ( nth_key ) ; if ( resp == null || resp . get ( ) == null ) { logger . info ( "Cache Miss on pipelined read: key:" + key ) ; return null ; } else { if ( resp . get ( ) . startsWith ( "ERR" ) ) { throw new Exception ( String . format ( "DynoJedisPipeline: error %s" , resp . get ( ) ) ) ; } if ( ! isValidResponse ( key , resp . get ( ) ) ) { throw new Exception ( String . format ( "DynoJedisPipeline: pipeline read: value %s does not contain key %s" , resp . get ( ) , key ) ) ; } } } return "OK" ; }
This is the pipelined version of the reads
32,118
public String pipelineReadHGETALL ( String key , String hm_key_prefix ) throws Exception { DynoJedisPipeline pipeline = jedisClient . get ( ) . pipelined ( ) ; Response < Map < byte [ ] , byte [ ] > > resp = pipeline . hgetAll ( ( hm_key_prefix + key ) . getBytes ( ) ) ; pipeline . sync ( ) ; if ( resp == null || resp . get ( ) == null ) { logger . info ( "Cache Miss: key:" + key ) ; return null ; } else { StringBuilder sb = new StringBuilder ( ) ; for ( byte [ ] bytes : resp . get ( ) . keySet ( ) ) { if ( sb . length ( ) > 0 ) { sb . append ( "," ) ; } sb . append ( new String ( bytes ) ) ; } return "HGETALL:" + sb . toString ( ) ; } }
This the pipelined HGETALL
32,119
public String nonPipelineZRANGE ( String key , int max_score ) { StringBuilder sb = new StringBuilder ( ) ; Set < String > returnEntries = this . jedisClient . get ( ) . zrange ( key , 0 , - 1 ) ; if ( returnEntries . isEmpty ( ) ) { logger . error ( "The number of entries in the sorted set are less than the number of entries written" ) ; return null ; } returnEntries . forEach ( sb :: append ) ; return sb . toString ( ) ; }
Exercising ZRANGE to receive all keys between 0 and MAX_SCORE
32,120
public String nonpipelineWrite ( String key , DataGenerator dataGenerator ) { String value = key + "__" + dataGenerator . getRandomValue ( ) + "__" + key ; String result = this . jedisClient . get ( ) . set ( key , value ) ; if ( ! "OK" . equals ( result ) ) { logger . error ( "SET_ERROR: GOT " + result + " for SET operation" ) ; throw new RuntimeException ( String . format ( "DynoJedis: value %s for SET operation is NOT VALID" , value , key ) ) ; } return result ; }
a simple write without a pipeline
32,121
public String pipelineWrite ( String key , DataGenerator dataGenerator , int max_pipe_keys , int min_pipe_keys ) throws Exception { int pipe_keys = randomGenerator . nextInt ( max_pipe_keys ) ; pipe_keys = Math . max ( min_pipe_keys , pipe_keys ) ; DynoJedisPipeline pipeline = this . jedisClient . get ( ) . pipelined ( ) ; Map < String , Response < String > > responses = new HashMap < > ( ) ; StringBuilder sb = new StringBuilder ( ) ; for ( int n = 0 ; n < pipe_keys ; ++ n ) { String nth_key = key + "_" + n ; sb . append ( nth_key ) ; Response < String > resp = pipeline . set ( key , key + dataGenerator . getRandomValue ( ) + key ) ; responses . put ( nth_key , resp ) ; } pipeline . sync ( ) ; return sb . toString ( ) ; }
pipelined version of the write
32,122
public String pipelineWriteHMSET ( String key , DataGenerator dataGenerator , String hm_key_prefix ) { Map < String , String > map = new HashMap < > ( ) ; String hmKey = hm_key_prefix + key ; map . put ( ( hmKey + "__1" ) , ( key + "__" + dataGenerator . getRandomValue ( ) + "__" + key ) ) ; map . put ( ( hmKey + "__2" ) , ( key + "__" + dataGenerator . getRandomValue ( ) + "__" + key ) ) ; DynoJedisPipeline pipeline = jedisClient . get ( ) . pipelined ( ) ; pipeline . hmset ( hmKey , map ) ; pipeline . expire ( hmKey , 3600 ) ; pipeline . sync ( ) ; return "HMSET:" + hmKey ; }
writes with an pipelined HMSET
32,123
public String nonPipelineZADD ( String key , DataGenerator dataGenerator , String z_key_prefix , int max_score ) throws Exception { String zKey = z_key_prefix + key ; int success = 0 ; long returnOp = 0 ; for ( int i = 0 ; i < max_score ; i ++ ) { returnOp = jedisClient . get ( ) . zadd ( zKey , i , dataGenerator . getRandomValue ( ) + "__" + zKey ) ; success += returnOp ; } if ( success != max_score - 1 ) { return null ; } return "OK" ; }
This adds MAX_SCORE of elements in a sorted set
32,124
public synchronized void init ( DataGenerator dataGenerator ) throws Exception { if ( esConfig . getRestClientPort ( ) == 443 && ! esConfig . isHttps ( ) ) { throw new IllegalArgumentException ( "You must set the configuration property 'https' to true if you use the https default port" ) ; } Integer indexRollsPerHour = esConfig . getIndexRollsPerDay ( ) ; if ( indexRollsPerHour < 0 || indexRollsPerHour > MAX_INDEX_ROLLS_PER_HOUR ) { throw new IllegalArgumentException ( "The configuration property 'indexRollsPerHour' must be > 0 and <= " + MAX_INDEX_ROLLS_PER_HOUR ) ; } if ( indexRollsPerHour > 0 && 60 % indexRollsPerHour != 0 ) { throw new IllegalArgumentException ( "The configuration property 'indexRollsPerHour' must evenly divide 60" ) ; } if ( esConfig . getBulkWriteBatchSize ( ) < 0 ) { throw new IllegalArgumentException ( "bulkWriteBatchSize can't be negative'" ) ; } RestClientBuilder . RequestConfigCallback callback = requestConfigBuilder -> { requestConfigBuilder . setConnectTimeout ( esConfig . getConnectTimeoutSeconds ( ) * 1000 ) ; requestConfigBuilder . setConnectionRequestTimeout ( esConfig . getConnectionRequestTimeoutSeconds ( ) * 1000 ) ; requestConfigBuilder . setSocketTimeout ( esConfig . getSocketTimeoutSeconds ( ) * 1000 ) ; return requestConfigBuilder ; } ; List < HttpHost > endpoints = getEndpoints ( discoverer , esConfig ) ; HttpHost [ ] hosts = endpoints . toArray ( new HttpHost [ 0 ] ) ; restClient = RestClient . builder ( hosts ) . setMaxRetryTimeoutMillis ( esConfig . getMaxRetryTimeoutSeconds ( ) * 1000 ) . setRequestConfigCallback ( callback ) . build ( ) ; String hostname = endpoints . get ( 0 ) . getHostName ( ) ; ES_HOST_PORT = String . format ( "%s://%s:%s" , getScheme ( ) , hostname , esConfig . getRestClientPort ( ) ) ; ES_INDEX_TYPE_RESOURCE_PATH = String . format ( "/%s/%s" , esConfig . getIndexName ( ) , DEFAULT_DOC_TYPE ) ; CONNECTION_INFO = "Cluster: " + this . getClusterOrHostName ( ) + "\n" + "Test Index URL: " + ES_HOST_PORT + ES_INDEX_TYPE_RESOURCE_PATH ; writer = new EsWriter ( esConfig . getIndexName ( ) , DEFAULT_DOC_TYPE , esConfig . getBulkWriteBatchSize ( ) > 0 , indexRollsPerHour , esConfig . getBulkWriteBatchSize ( ) , esConfig . isRandomizeStrings ( ) ? dataGenerator : new FakeWordDictionaryBasedDataGenerator ( dataGenerator , coreConfig . getDataSize ( ) ) ) ; if ( coreConfig . isAutoTuneEnabled ( ) ) { this . autoTuner = new EsAutoTuner ( coreConfig . getAutoTuneRampPeriodMillisecs ( ) , coreConfig . getAutoTuneIncrementIntervalMillisecs ( ) , coreConfig . getWriteRateLimit ( ) , coreConfig . getAutoTuneFinalWriteRate ( ) , coreConfig . getAutoTuneWriteFailureRatioThreshold ( ) ) ; } else { this . autoTuner = null ; } this . randomizeKeys = randomizeKeys ; logger . info ( "ES_REST plugin initialized: " + CONNECTION_INFO ) ; }
Initialize key data structures for plugin using synchronized to ensure other threads are guaranteed visibility of end result of initializing said structures .
32,125
public static String humanReadableByteCount ( final long bytes ) { final int base = 1024 ; if ( bytes < base ) { return bytes + " " + BINARY_UNITS [ 0 ] ; } final int exponent = ( int ) ( Math . log ( bytes ) / Math . log ( base ) ) ; final String unit = BINARY_UNITS [ exponent ] ; return String . format ( "%.1f %s" , bytes / Math . pow ( base , exponent ) , unit ) ; }
FileUtils . byteCountToDisplaySize rounds down the size hence using this for more precision .
32,126
static String constructIndexName ( String indexName , int indexRollsPerDay , Date date ) { if ( indexRollsPerDay > 0 ) { ZonedDateTime zdt = ZonedDateTime . ofInstant ( date . toInstant ( ) , ZoneId . of ( "UTC" ) ) ; int minutesPerRoll = 1440 / indexRollsPerDay ; int minutesElapsedSinceStartOfDay = zdt . getHour ( ) * 60 + zdt . getMinute ( ) ; int nthRoll = minutesElapsedSinceStartOfDay / minutesPerRoll ; String retval = String . format ( "%s-%s.%04d" , indexName , zdt . format ( DateTimeFormatter . ofPattern ( "yyyy-MM-dd" ) ) , nthRoll ) ; logger . debug ( "constructIndexName from rolls per day = {} gives: {}" , indexRollsPerDay , retval ) ; return retval ; } else { return indexName ; } }
methods below are package scoped to facilitate unit testing
32,127
public List < String > readBulk ( final List < String > keys ) throws Exception { List < String > responses = new ArrayList < > ( keys . size ( ) ) ; JanusGraphTransaction transaction = useJanusgraphTransaction ? graph . newTransaction ( ) : null ; try { for ( String key : keys ) { String response = readSingleInternal ( key , transaction ) ; responses . add ( response ) ; } } finally { if ( transaction != null ) transaction . close ( ) ; } return responses ; }
Perform a bulk read operation
32,128
public List < String > writeBulk ( final List < String > keys ) throws Exception { List < String > responses = new ArrayList < > ( keys . size ( ) ) ; for ( String key : keys ) { String response = writeSingle ( key ) ; responses . add ( response ) ; } return responses ; }
Perform a bulk write operation
32,129
public String getNDelimitedStrings ( int n ) { return IntStream . range ( 0 , config . getColsPerRow ( ) ) . mapToObj ( i -> "'" + dataGenerator . getRandomValue ( ) + "'" ) . collect ( Collectors . joining ( "," ) ) ; }
Assumes delimiter to be comma since that covers all the usecase for now . Will parameterize if use cases differ on delimiter .
32,130
protected void doBindView ( View itemView , Context context , Cursor cursor ) { try { @ SuppressWarnings ( "unchecked" ) ViewType itemViewType = ( ViewType ) itemView ; bindView ( itemViewType , context , cursorToObject ( cursor ) ) ; } catch ( SQLException e ) { throw new RuntimeException ( e ) ; } }
This is here to make sure that the user really wants to override it .
32,131
public T getTypedItem ( int position ) { try { return cursorToObject ( ( Cursor ) super . getItem ( position ) ) ; } catch ( SQLException e ) { throw new RuntimeException ( e ) ; } }
Returns a T object at the current position .
32,132
protected T cursorToObject ( Cursor cursor ) throws SQLException { return preparedQuery . mapRow ( new AndroidDatabaseResults ( cursor , null , true ) ) ; }
Map a single row to our cursor object .
32,133
public void changeCursor ( Cursor cursor , PreparedQuery < T > preparedQuery ) { setPreparedQuery ( preparedQuery ) ; super . changeCursor ( cursor ) ; }
Change the cursor associated with the prepared query .
32,134
static int execSql ( SQLiteDatabase db , String label , String finalSql , Object [ ] argArray ) throws SQLException { try { db . execSQL ( finalSql , argArray ) ; } catch ( android . database . SQLException e ) { throw SqlExceptionUtil . create ( "Problems executing " + label + " Android statement: " + finalSql , e ) ; } int result ; SQLiteStatement stmt = null ; try { stmt = db . compileStatement ( "SELECT CHANGES()" ) ; result = ( int ) stmt . simpleQueryForLong ( ) ; } catch ( android . database . SQLException e ) { result = 1 ; } finally { if ( stmt != null ) { stmt . close ( ) ; } } logger . trace ( "executing statement {} changed {} rows: {}" , label , result , finalSql ) ; return result ; }
Execute some SQL on the database and return the number of rows changed .
32,135
public static void writeConfigFile ( String fileName , boolean sortClasses ) throws SQLException , IOException { List < Class < ? > > classList = new ArrayList < Class < ? > > ( ) ; findAnnotatedClasses ( classList , new File ( "." ) , 0 ) ; writeConfigFile ( fileName , classList . toArray ( new Class [ classList . size ( ) ] ) , sortClasses ) ; }
Finds the annotated classes in the current directory or below and writes a configuration file to the file - name in the raw folder .
32,136
public static void writeConfigFile ( File configFile , boolean sortClasses ) throws SQLException , IOException { writeConfigFile ( configFile , new File ( "." ) , sortClasses ) ; }
Finds the annotated classes in the current directory or below and writes a configuration file .
32,137
protected static File findRawDir ( File dir ) { for ( int i = 0 ; dir != null && i < 20 ; i ++ ) { File rawDir = findResRawDir ( dir ) ; if ( rawDir != null ) { return rawDir ; } dir = dir . getParentFile ( ) ; } return null ; }
Look for the resource - directory in the current directory or the directories above . Then look for the raw - directory underneath the resource - directory .
32,138
private static String getPackageOfClass ( File file ) throws IOException { BufferedReader reader = new BufferedReader ( new FileReader ( file ) ) ; try { while ( true ) { String line = reader . readLine ( ) ; if ( line == null ) { return null ; } if ( line . contains ( "package" ) ) { String [ ] parts = line . split ( "[ \t;]" ) ; if ( parts . length > 1 && parts [ 0 ] . equals ( "package" ) ) { return parts [ 1 ] ; } } } } finally { reader . close ( ) ; } }
Returns the package name of a file that has one of the annotations we are looking for .
32,139
private static File findResRawDir ( File dir ) { for ( File file : dir . listFiles ( ) ) { if ( file . getName ( ) . equals ( RESOURCE_DIR_NAME ) && file . isDirectory ( ) ) { File [ ] rawFiles = file . listFiles ( new FileFilter ( ) { public boolean accept ( File file ) { return file . getName ( ) . equals ( RAW_DIR_NAME ) && file . isDirectory ( ) ; } } ) ; if ( rawFiles . length == 1 ) { return rawFiles [ 0 ] ; } } } return null ; }
Look for the resource directory with raw beneath it .
32,140
private static void innerSetHelperClass ( Class < ? extends OrmLiteSqliteOpenHelper > openHelperClass ) { if ( openHelperClass == null ) { throw new IllegalStateException ( "Helper class was trying to be reset to null" ) ; } else if ( helperClass == null ) { helperClass = openHelperClass ; } else if ( helperClass != openHelperClass ) { throw new IllegalStateException ( "Helper class was " + helperClass + " but is trying to be reset to " + openHelperClass ) ; } }
Set the helper class and make sure we aren t changing it to another class .
32,141
private static OrmLiteSqliteOpenHelper constructHelper ( Context context , Class < ? extends OrmLiteSqliteOpenHelper > openHelperClass ) { Constructor < ? > constructor ; try { constructor = openHelperClass . getConstructor ( Context . class ) ; } catch ( Exception e ) { throw new IllegalStateException ( "Could not find public constructor that has a single (Context) argument for helper class " + openHelperClass , e ) ; } try { return ( OrmLiteSqliteOpenHelper ) constructor . newInstance ( context ) ; } catch ( Exception e ) { throw new IllegalStateException ( "Could not construct instance of helper class " + openHelperClass , e ) ; } }
Call the constructor on our helper class .
32,142
private static Class < ? extends OrmLiteSqliteOpenHelper > lookupHelperClass ( Context context , Class < ? > componentClass ) { Resources resources = context . getResources ( ) ; int resourceId = resources . getIdentifier ( HELPER_CLASS_RESOURCE_NAME , "string" , context . getPackageName ( ) ) ; if ( resourceId != 0 ) { String className = resources . getString ( resourceId ) ; try { @ SuppressWarnings ( "unchecked" ) Class < ? extends OrmLiteSqliteOpenHelper > castClass = ( Class < ? extends OrmLiteSqliteOpenHelper > ) Class . forName ( className ) ; return castClass ; } catch ( Exception e ) { throw new IllegalStateException ( "Could not create helper instance for class " + className , e ) ; } } for ( Class < ? > componentClassWalk = componentClass ; componentClassWalk != null ; componentClassWalk = componentClassWalk . getSuperclass ( ) ) { Type superType = componentClassWalk . getGenericSuperclass ( ) ; if ( superType == null || ! ( superType instanceof ParameterizedType ) ) { continue ; } Type [ ] types = ( ( ParameterizedType ) superType ) . getActualTypeArguments ( ) ; if ( types == null || types . length == 0 ) { continue ; } for ( Type type : types ) { if ( ! ( type instanceof Class ) ) { continue ; } Class < ? > clazz = ( Class < ? > ) type ; if ( OrmLiteSqliteOpenHelper . class . isAssignableFrom ( clazz ) ) { @ SuppressWarnings ( "unchecked" ) Class < ? extends OrmLiteSqliteOpenHelper > castOpenHelperClass = ( Class < ? extends OrmLiteSqliteOpenHelper > ) clazz ; return castOpenHelperClass ; } } } throw new IllegalStateException ( "Could not find OpenHelperClass because none of the generic parameters of class " + componentClass + " extends OrmLiteSqliteOpenHelper. You should use getHelper(Context, Class) instead." ) ; }
Lookup the helper class either from the resource string or by looking for a generic parameter .
32,143
public static < T > DatabaseTableConfig < T > fromClass ( ConnectionSource connectionSource , Class < T > clazz ) throws SQLException { DatabaseType databaseType = connectionSource . getDatabaseType ( ) ; String tableName = DatabaseTableConfig . extractTableName ( databaseType , clazz ) ; List < DatabaseFieldConfig > fieldConfigs = new ArrayList < DatabaseFieldConfig > ( ) ; for ( Class < ? > classWalk = clazz ; classWalk != null ; classWalk = classWalk . getSuperclass ( ) ) { for ( Field field : classWalk . getDeclaredFields ( ) ) { DatabaseFieldConfig config = configFromField ( databaseType , tableName , field ) ; if ( config != null && config . isPersisted ( ) ) { fieldConfigs . add ( config ) ; } } } if ( fieldConfigs . size ( ) == 0 ) { return null ; } else { return new DatabaseTableConfig < T > ( clazz , tableName , fieldConfigs ) ; } }
Build our list table config from a class using some annotation fu around .
32,144
private static int [ ] lookupClasses ( ) { Class < ? > annotationMemberArrayClazz ; try { annotationFactoryClazz = Class . forName ( "org.apache.harmony.lang.annotation.AnnotationFactory" ) ; annotationMemberClazz = Class . forName ( "org.apache.harmony.lang.annotation.AnnotationMember" ) ; annotationMemberArrayClazz = Class . forName ( "[Lorg.apache.harmony.lang.annotation.AnnotationMember;" ) ; } catch ( ClassNotFoundException e ) { return null ; } Field fieldField ; try { elementsField = annotationFactoryClazz . getDeclaredField ( "elements" ) ; elementsField . setAccessible ( true ) ; nameField = annotationMemberClazz . getDeclaredField ( "name" ) ; nameField . setAccessible ( true ) ; valueField = annotationMemberClazz . getDeclaredField ( "value" ) ; valueField . setAccessible ( true ) ; fieldField = DatabaseFieldSample . class . getDeclaredField ( "field" ) ; } catch ( SecurityException e ) { return null ; } catch ( NoSuchFieldException e ) { return null ; } DatabaseField databaseField = fieldField . getAnnotation ( DatabaseField . class ) ; InvocationHandler proxy = Proxy . getInvocationHandler ( databaseField ) ; if ( proxy . getClass ( ) != annotationFactoryClazz ) { return null ; } try { Object elements = elementsField . get ( proxy ) ; if ( elements == null || elements . getClass ( ) != annotationMemberArrayClazz ) { return null ; } Object [ ] elementArray = ( Object [ ] ) elements ; int [ ] configNums = new int [ elementArray . length ] ; for ( int i = 0 ; i < elementArray . length ; i ++ ) { String name = ( String ) nameField . get ( elementArray [ i ] ) ; configNums [ i ] = configFieldNameToNum ( name ) ; } return configNums ; } catch ( IllegalAccessException e ) { return null ; } }
This does all of the class reflection fu to find our classes find the order of field names and construct our array of ConfigField entries the correspond to the AnnotationMember array .
32,145
private static int configFieldNameToNum ( String configName ) { if ( configName . equals ( "columnName" ) ) { return COLUMN_NAME ; } else if ( configName . equals ( "dataType" ) ) { return DATA_TYPE ; } else if ( configName . equals ( "defaultValue" ) ) { return DEFAULT_VALUE ; } else if ( configName . equals ( "width" ) ) { return WIDTH ; } else if ( configName . equals ( "canBeNull" ) ) { return CAN_BE_NULL ; } else if ( configName . equals ( "id" ) ) { return ID ; } else if ( configName . equals ( "generatedId" ) ) { return GENERATED_ID ; } else if ( configName . equals ( "generatedIdSequence" ) ) { return GENERATED_ID_SEQUENCE ; } else if ( configName . equals ( "foreign" ) ) { return FOREIGN ; } else if ( configName . equals ( "useGetSet" ) ) { return USE_GET_SET ; } else if ( configName . equals ( "unknownEnumName" ) ) { return UNKNOWN_ENUM_NAME ; } else if ( configName . equals ( "throwIfNull" ) ) { return THROW_IF_NULL ; } else if ( configName . equals ( "persisted" ) ) { return PERSISTED ; } else if ( configName . equals ( "format" ) ) { return FORMAT ; } else if ( configName . equals ( "unique" ) ) { return UNIQUE ; } else if ( configName . equals ( "uniqueCombo" ) ) { return UNIQUE_COMBO ; } else if ( configName . equals ( "index" ) ) { return INDEX ; } else if ( configName . equals ( "uniqueIndex" ) ) { return UNIQUE_INDEX ; } else if ( configName . equals ( "indexName" ) ) { return INDEX_NAME ; } else if ( configName . equals ( "uniqueIndexName" ) ) { return UNIQUE_INDEX_NAME ; } else if ( configName . equals ( "foreignAutoRefresh" ) ) { return FOREIGN_AUTO_REFRESH ; } else if ( configName . equals ( "maxForeignAutoRefreshLevel" ) ) { return MAX_FOREIGN_AUTO_REFRESH_LEVEL ; } else if ( configName . equals ( "persisterClass" ) ) { return PERSISTER_CLASS ; } else if ( configName . equals ( "allowGeneratedIdInsert" ) ) { return ALLOW_GENERATED_ID_INSERT ; } else if ( configName . equals ( "columnDefinition" ) ) { return COLUMN_DEFINITON ; } else if ( configName . equals ( "fullColumnDefinition" ) ) { return FULL_COLUMN_DEFINITON ; } else if ( configName . equals ( "foreignAutoCreate" ) ) { return FOREIGN_AUTO_CREATE ; } else if ( configName . equals ( "version" ) ) { return VERSION ; } else if ( configName . equals ( "foreignColumnName" ) ) { return FOREIGN_COLUMN_NAME ; } else if ( configName . equals ( "readOnly" ) ) { return READ_ONLY ; } else { throw new IllegalStateException ( "Could not find support for DatabaseField " + configName ) ; } }
Convert the name of the
32,146
private static DatabaseFieldConfig buildConfig ( DatabaseField databaseField , String tableName , Field field ) throws Exception { InvocationHandler proxy = Proxy . getInvocationHandler ( databaseField ) ; if ( proxy . getClass ( ) != annotationFactoryClazz ) { return null ; } Object elementsObject = elementsField . get ( proxy ) ; if ( elementsObject == null ) { return null ; } DatabaseFieldConfig config = new DatabaseFieldConfig ( field . getName ( ) ) ; Object [ ] objs = ( Object [ ] ) elementsObject ; for ( int i = 0 ; i < configFieldNums . length ; i ++ ) { Object value = valueField . get ( objs [ i ] ) ; if ( value != null ) { assignConfigField ( configFieldNums [ i ] , config , field , value ) ; } } return config ; }
Instead of calling the annotation methods directly we peer inside the proxy and investigate the array of AnnotationMember objects stored by the AnnotationFactory .
32,147
public H getHelper ( ) { if ( helper == null ) { if ( ! created ) { throw new IllegalStateException ( "A call has not been made to onCreate() yet so the helper is null" ) ; } else if ( destroyed ) { throw new IllegalStateException ( "A call to onDestroy has already been made and the helper cannot be used after that point" ) ; } else { throw new IllegalStateException ( "Helper is null for some unknown reason" ) ; } } else { return helper ; } }
Get a helper for this action .
32,148
protected void onStartLoading ( ) { if ( cachedResults != null ) { deliverResult ( cachedResults ) ; } if ( takeContentChanged ( ) || cachedResults == null ) { forceLoad ( ) ; } dao . registerObserver ( this ) ; }
Starts an asynchronous load of the data . When the result is ready the callbacks will be called on the UI thread . If a previous load has been completed and is still valid the result may be passed to the callbacks immediately .
32,149
private ResponseEntity < String > executeRequest ( final TelemetryEventData eventData ) { final HttpHeaders headers = new HttpHeaders ( ) ; headers . add ( HttpHeaders . CONTENT_TYPE , APPLICATION_JSON . toString ( ) ) ; try { final RestTemplate restTemplate = new RestTemplate ( ) ; final HttpEntity < String > body = new HttpEntity < > ( MAPPER . writeValueAsString ( eventData ) , headers ) ; return restTemplate . exchange ( TELEMETRY_TARGET_URL , HttpMethod . POST , body , String . class ) ; } catch ( JsonProcessingException | HttpClientErrorException ignore ) { log . warn ( "Failed to exchange telemetry request, {}." , ignore . getMessage ( ) ) ; } return null ; }
Align the retry times with sdk
32,150
public < S extends T > S save ( S entity ) { Assert . notNull ( entity , "entity must not be null" ) ; if ( information . isNew ( entity ) ) { return operation . insert ( information . getCollectionName ( ) , entity , createKey ( information . getPartitionKeyFieldValue ( entity ) ) ) ; } else { operation . upsert ( information . getCollectionName ( ) , entity , createKey ( information . getPartitionKeyFieldValue ( entity ) ) ) ; } return entity ; }
save entity without partition
32,151
public < S extends T > Iterable < S > saveAll ( Iterable < S > entities ) { Assert . notNull ( entities , "Iterable entities should not be null" ) ; entities . forEach ( this :: save ) ; return entities ; }
batch save entities
32,152
public Iterable < T > findAll ( ) { return operation . findAll ( information . getCollectionName ( ) , information . getJavaType ( ) ) ; }
find all entities from one collection without configuring partition key value
32,153
public List < T > findAllById ( Iterable < ID > ids ) { Assert . notNull ( ids , "Iterable ids should not be null" ) ; return operation . findByIds ( ids , information . getJavaType ( ) , information . getCollectionName ( ) ) ; }
find entities based on id list from one collection without partitions
32,154
public Optional < T > findById ( ID id ) { Assert . notNull ( id , "id must not be null" ) ; if ( id instanceof String && ! StringUtils . hasText ( ( String ) id ) ) { return Optional . empty ( ) ; } return Optional . ofNullable ( operation . findById ( information . getCollectionName ( ) , id , information . getJavaType ( ) ) ) ; }
find one entity per id without partitions
32,155
public void deleteById ( ID id ) { Assert . notNull ( id , "id to be deleted should not be null" ) ; operation . deleteById ( information . getCollectionName ( ) , id , null ) ; }
delete one document per id without configuring partition key value
32,156
public void delete ( T entity ) { Assert . notNull ( entity , "entity to be deleted should not be null" ) ; final String partitionKeyValue = information . getPartitionKeyFieldValue ( entity ) ; operation . deleteById ( information . getCollectionName ( ) , information . getId ( entity ) , partitionKeyValue == null ? null : new PartitionKey ( partitionKeyValue ) ) ; }
delete one document per entity
32,157
public void deleteAll ( Iterable < ? extends T > entities ) { Assert . notNull ( entities , "Iterable entities should not be null" ) ; StreamSupport . stream ( entities . spliterator ( ) , true ) . forEach ( this :: delete ) ; }
delete list of entities without partitions
32,158
public boolean existsById ( ID primaryKey ) { Assert . notNull ( primaryKey , "primaryKey should not be null" ) ; return findById ( primaryKey ) . isPresent ( ) ; }
check if an entity exists per id without partition
32,159
public Page < T > findAll ( Pageable pageable ) { Assert . notNull ( pageable , "pageable should not be null" ) ; return operation . findAll ( pageable , information . getJavaType ( ) , information . getCollectionName ( ) ) ; }
FindQuerySpecGenerator Returns a Page of entities meeting the paging restriction provided in the Pageable object .
32,160
public static Object toDocumentDBValue ( Object fromPropertyValue ) { if ( fromPropertyValue == null ) { return null ; } if ( fromPropertyValue instanceof Date ) { fromPropertyValue = ( ( Date ) fromPropertyValue ) . getTime ( ) ; } else if ( fromPropertyValue instanceof ZonedDateTime ) { fromPropertyValue = ( ( ZonedDateTime ) fromPropertyValue ) . format ( DateTimeFormatter . ofPattern ( ISO_8601_COMPATIBLE_DATE_PATTERN ) ) ; } else if ( fromPropertyValue instanceof Enum ) { fromPropertyValue = fromPropertyValue . toString ( ) ; } return fromPropertyValue ; }
Convert a property value to the value stored in CosmosDB
32,161
public void addProfile ( Profile profile ) { if ( PROFILE_KIND . equals ( profile . getKind ( ) ) ) { this . profileList . add ( profile . getSettings ( ) ) ; } }
Adds the profile .
32,162
public void addRuleInstances ( Digester digester ) { digester . addObjectCreate ( "profiles" , Profiles . class ) ; digester . addObjectCreate ( PROFILES_PROFILE , Profile . class ) ; digester . addObjectCreate ( PROFILES_PROFILE_SETTING , Setting . class ) ; digester . addSetNext ( PROFILES_PROFILE , "addProfile" ) ; digester . addSetNext ( PROFILES_PROFILE_SETTING , "addSetting" ) ; digester . addSetProperties ( PROFILES_PROFILE , "kind" , "kind" ) ; digester . addSetProperties ( PROFILES_PROFILE_SETTING , "id" , "id" ) ; digester . addSetProperties ( PROFILES_PROFILE_SETTING , "value" , "value" ) ; }
Adds the rule instances .
32,163
List < File > addCollectionFiles ( File newBasedir ) { final DirectoryScanner ds = new DirectoryScanner ( ) ; ds . setBasedir ( newBasedir ) ; if ( this . includes != null && this . includes . length > 0 ) { ds . setIncludes ( this . includes ) ; } else { ds . setIncludes ( DEFAULT_INCLUDES ) ; } ds . setExcludes ( this . excludes ) ; ds . addDefaultExcludes ( ) ; ds . setCaseSensitive ( false ) ; ds . setFollowSymlinks ( false ) ; ds . scan ( ) ; List < File > foundFiles = new ArrayList < > ( ) ; for ( String filename : ds . getIncludedFiles ( ) ) { foundFiles . add ( new File ( newBasedir , filename ) ) ; } return foundFiles ; }
Add source files to the files list .
32,164
private void storeFileHashCache ( Properties props ) { File cacheFile = new File ( this . targetDirectory , CACHE_PROPERTIES_FILENAME ) ; try ( OutputStream out = new BufferedOutputStream ( new FileOutputStream ( cacheFile ) ) ) { props . store ( out , null ) ; } catch ( IOException e ) { getLog ( ) . warn ( "Cannot store file hash cache properties file" , e ) ; } }
Store file hash cache .
32,165
private Properties readFileHashCacheFile ( ) { Properties props = new Properties ( ) ; Log log = getLog ( ) ; if ( ! this . targetDirectory . exists ( ) ) { this . targetDirectory . mkdirs ( ) ; } else if ( ! this . targetDirectory . isDirectory ( ) ) { log . warn ( "Something strange here as the '" + this . targetDirectory . getPath ( ) + "' supposedly target directory is not a directory." ) ; return props ; } File cacheFile = new File ( this . targetDirectory , CACHE_PROPERTIES_FILENAME ) ; if ( ! cacheFile . exists ( ) ) { return props ; } try ( BufferedInputStream stream = new BufferedInputStream ( new FileInputStream ( cacheFile ) ) ) { props . load ( stream ) ; } catch ( IOException e ) { log . warn ( "Cannot load file hash cache properties file" , e ) ; } return props ; }
Read file hash cache file .
32,166
private void formatFile ( File file , ResultCollector rc , Properties hashCache , String basedirPath ) throws MojoFailureException , MojoExecutionException { try { doFormatFile ( file , rc , hashCache , basedirPath , false ) ; } catch ( IOException | MalformedTreeException | BadLocationException e ) { rc . failCount ++ ; getLog ( ) . warn ( e ) ; } }
Format file .
32,167
private String readFileAsString ( File file ) throws java . io . IOException { StringBuilder fileData = new StringBuilder ( 1000 ) ; try ( BufferedReader reader = new BufferedReader ( ReaderFactory . newReader ( file , this . encoding ) ) ) { char [ ] buf = new char [ 1024 ] ; int numRead = 0 ; while ( ( numRead = reader . read ( buf ) ) != - 1 ) { String readData = String . valueOf ( buf , 0 , numRead ) ; fileData . append ( readData ) ; buf = new char [ 1024 ] ; } } return fileData . toString ( ) ; }
Read the given file and return the content as a string .
32,168
private void writeStringToFile ( String str , File file ) throws IOException { if ( ! file . exists ( ) && file . isDirectory ( ) ) { return ; } try ( BufferedWriter bw = new BufferedWriter ( WriterFactory . newWriter ( file , this . encoding ) ) ) { bw . write ( str ) ; } }
Write the given string to a file .
32,169
private JobParameters getNextJobParameters ( Job job ) throws JobParametersNotFoundException { String jobIdentifier = job . getName ( ) ; JobParameters jobParameters ; List < JobInstance > lastInstances = jobExplorer . getJobInstances ( jobIdentifier , 0 , 1 ) ; JobParametersIncrementer incrementer = job . getJobParametersIncrementer ( ) ; if ( lastInstances . isEmpty ( ) ) { jobParameters = incrementer . getNext ( new JobParameters ( ) ) ; if ( jobParameters == null ) { throw new JobParametersNotFoundException ( "No bootstrap parameters found from incrementer for job=" + jobIdentifier ) ; } } else { List < JobExecution > lastExecutions = jobExplorer . getJobExecutions ( lastInstances . get ( 0 ) ) ; jobParameters = incrementer . getNext ( lastExecutions . get ( 0 ) . getJobParameters ( ) ) ; } return jobParameters ; }
Borrowed from CommandLineJobRunner .
32,170
public String read ( ) throws Exception { String item = null ; if ( index < input . length ) { item = input [ index ++ ] ; LOGGER . info ( item ) ; return item ; } else { return null ; } }
Reads next record from input
32,171
String getShardKey ( Message message ) { return getShardKey ( message . getTokenTime ( ) , this . modShardPolicy . getMessageShard ( message , metadata ) ) ; }
Return the shard for this message
32,172
private String getShardKey ( long messageTime , int modShard ) { long timePartition ; if ( metadata . getPartitionDuration ( ) != null ) timePartition = ( messageTime / metadata . getPartitionDuration ( ) ) % metadata . getPartitionCount ( ) ; else timePartition = 0 ; return getName ( ) + ":" + timePartition + ":" + modShard ; }
Return the shard for this timestamp
32,173
public List < MessageHistory > getKeyHistory ( String key , Long startTime , Long endTime , int count ) throws MessageQueueException { List < MessageHistory > list = Lists . newArrayList ( ) ; ColumnList < UUID > columns ; try { columns = keyspace . prepareQuery ( historyColumnFamily ) . setConsistencyLevel ( consistencyLevel ) . getRow ( key ) . execute ( ) . getResult ( ) ; } catch ( ConnectionException e ) { throw new MessageQueueException ( "Failed to load history for " + key , e ) ; } for ( Column < UUID > column : columns ) { try { list . add ( deserializeString ( column . getStringValue ( ) , MessageHistory . class ) ) ; } catch ( Exception e ) { LOG . info ( "Error deserializing history entry" , e ) ; } } return list ; }
Return history for a single key for the specified time range
32,174
public List < Message > peekMessages ( int itemsToPeek ) throws MessageQueueException { List < Message > messages = Lists . newArrayList ( ) ; for ( MessageQueueShard shard : shardReaderPolicy . listShards ( ) ) { messages . addAll ( peekMessages ( shard . getName ( ) , itemsToPeek - messages . size ( ) ) ) ; if ( messages . size ( ) == itemsToPeek ) return messages ; } return messages ; }
Iterate through shards attempting to extract itemsToPeek items . Will return once itemToPeek items have been read or all shards have been checked .
32,175
private Collection < Message > peekMessages ( String shardName , int itemsToPeek ) throws MessageQueueException { try { ColumnList < MessageQueueEntry > result = keyspace . prepareQuery ( queueColumnFamily ) . setConsistencyLevel ( consistencyLevel ) . getKey ( shardName ) . withColumnRange ( new RangeBuilder ( ) . setLimit ( itemsToPeek ) . setStart ( entrySerializer . makeEndpoint ( ( byte ) MessageQueueEntryType . Message . ordinal ( ) , Equality . GREATER_THAN_EQUALS ) . toBytes ( ) ) . setEnd ( entrySerializer . makeEndpoint ( ( byte ) MessageQueueEntryType . Message . ordinal ( ) , Equality . LESS_THAN_EQUALS ) . toBytes ( ) ) . build ( ) ) . execute ( ) . getResult ( ) ; List < Message > messages = Lists . newArrayListWithCapacity ( result . size ( ) ) ; for ( Column < MessageQueueEntry > column : result ) { Message message = extractMessageFromColumn ( column ) ; if ( message != null ) messages . add ( message ) ; } return messages ; } catch ( ConnectionException e ) { throw new MessageQueueException ( "Error peeking for messages from shard " + shardName , e ) ; } }
Peek into messages contained in the shard . This call does not take trigger time into account and will return messages that are not yet due to be executed
32,176
Message extractMessageFromColumn ( Column < MessageQueueEntry > column ) { Message message = null ; try { ByteArrayInputStream bais = new ByteArrayInputStream ( column . getByteArrayValue ( ) ) ; message = mapper . readValue ( bais , Message . class ) ; } catch ( Exception e ) { LOG . warn ( "Error processing message " , e ) ; try { message = invalidMessageHandler . apply ( column . getStringValue ( ) ) ; } catch ( Exception e2 ) { LOG . warn ( "Error processing invalid message" , e2 ) ; } } return message ; }
Extract a message body from a column
32,177
private boolean hasMessages ( String shardName ) throws MessageQueueException { UUID currentTime = TimeUUIDUtils . getUniqueTimeUUIDinMicros ( ) ; try { ColumnList < MessageQueueEntry > result = keyspace . prepareQuery ( queueColumnFamily ) . setConsistencyLevel ( consistencyLevel ) . getKey ( shardName ) . withColumnRange ( new RangeBuilder ( ) . setLimit ( 1 ) . setStart ( entrySerializer . makeEndpoint ( ( byte ) MessageQueueEntryType . Message . ordinal ( ) , Equality . EQUAL ) . toBytes ( ) ) . setEnd ( entrySerializer . makeEndpoint ( ( byte ) MessageQueueEntryType . Message . ordinal ( ) , Equality . EQUAL ) . append ( ( byte ) 0 , Equality . EQUAL ) . append ( currentTime , Equality . LESS_THAN_EQUALS ) . toBytes ( ) ) . build ( ) ) . execute ( ) . getResult ( ) ; return ! result . isEmpty ( ) ; } catch ( ConnectionException e ) { throw new MessageQueueException ( "Error checking shard for messages. " + shardName , e ) ; } }
Fast check to see if a shard has messages to process
32,178
public void verifyLock ( long curTimeInMicros ) throws Exception , BusyLockException , StaleLockException { if ( lockColumn == null ) throw new IllegalStateException ( "verifyLock() called without attempting to take the lock" ) ; Map < String , Long > lockResult = readLockColumns ( readDataColumns ) ; for ( Entry < String , Long > entry : lockResult . entrySet ( ) ) { if ( entry . getValue ( ) != 0 && curTimeInMicros > entry . getValue ( ) ) { if ( failOnStaleLock ) { throw new StaleLockException ( "Stale lock on row '" + key + "'. Manual cleanup requried." ) ; } locksToDelete . add ( entry . getKey ( ) ) ; } else if ( ! entry . getKey ( ) . equals ( lockColumn ) ) { throw new BusyLockException ( "Lock already acquired for row '" + key + "' with lock column " + entry . getKey ( ) ) ; } } }
Verify that the lock was acquired . This shouldn t be called unless it s part of a recipe built on top of ColumnPrefixDistributedRowLock .
32,179
public void release ( ) throws Exception { if ( ! locksToDelete . isEmpty ( ) || lockColumn != null ) { MutationBatch m = keyspace . prepareMutationBatch ( ) . setConsistencyLevel ( consistencyLevel ) ; fillReleaseMutation ( m , false ) ; m . execute ( ) ; } }
Release the lock by releasing this and any other stale lock columns
32,180
public Map < String , Long > releaseLocks ( boolean force ) throws Exception { Map < String , Long > locksToDelete = readLockColumns ( ) ; MutationBatch m = keyspace . prepareMutationBatch ( ) . setConsistencyLevel ( consistencyLevel ) ; ColumnListMutation < String > row = m . withRow ( columnFamily , key ) ; long now = getCurrentTimeMicros ( ) ; for ( Entry < String , Long > c : locksToDelete . entrySet ( ) ) { if ( force || ( c . getValue ( ) > 0 && c . getValue ( ) < now ) ) { row . deleteColumn ( c . getKey ( ) ) ; } } m . execute ( ) ; return locksToDelete ; }
Delete locks columns . Set force = true to remove locks that haven t expired yet .
32,181
private ByteBuffer generateTimeoutValue ( long timeout ) { if ( columnFamily . getDefaultValueSerializer ( ) == ByteBufferSerializer . get ( ) || columnFamily . getDefaultValueSerializer ( ) == LongSerializer . get ( ) ) { return LongSerializer . get ( ) . toByteBuffer ( timeout ) ; } else { return columnFamily . getDefaultValueSerializer ( ) . fromString ( Long . toString ( timeout ) ) ; } }
Generate the expire time value to put in the column value .
32,182
public long readTimeoutValue ( Column < ? > column ) { if ( columnFamily . getDefaultValueSerializer ( ) == ByteBufferSerializer . get ( ) || columnFamily . getDefaultValueSerializer ( ) == LongSerializer . get ( ) ) { return column . getLongValue ( ) ; } else { return Long . parseLong ( column . getStringValue ( ) ) ; } }
Read the expiration time from the column value
32,183
public List < ListenableFuture < OperationResult < Void > > > replayWal ( int count ) { List < ListenableFuture < OperationResult < Void > > > futures = Lists . newArrayList ( ) ; WriteAheadEntry walEntry ; while ( null != ( walEntry = wal . readNextEntry ( ) ) && count -- > 0 ) { MutationBatch m = keyspace . prepareMutationBatch ( ) ; try { walEntry . readMutation ( m ) ; futures . add ( executeWalEntry ( walEntry , m ) ) ; } catch ( WalException e ) { wal . removeEntry ( walEntry ) ; } } return futures ; }
Replay records from the WAL
32,184
public ListenableFuture < OperationResult < Void > > execute ( final MutationBatch m ) throws WalException { final WriteAheadEntry walEntry = wal . createEntry ( ) ; walEntry . writeMutation ( m ) ; return executeWalEntry ( walEntry , m ) ; }
Write a mutation to the wal and execute it
32,185
public < V > V getColumnValue ( T instance , String columnName , Class < V > valueClass ) { Field field = fields . get ( columnName ) ; if ( field == null ) { throw new IllegalArgumentException ( "Column not found: " + columnName ) ; } try { return valueClass . cast ( field . get ( instance ) ) ; } catch ( IllegalAccessException e ) { throw new RuntimeException ( e ) ; } }
Return the value for the given column from the given instance
32,186
public < V > void setColumnValue ( T instance , String columnName , V value ) { Field field = fields . get ( columnName ) ; if ( field == null ) { throw new IllegalArgumentException ( "Column not found: " + columnName ) ; } try { field . set ( instance , value ) ; } catch ( IllegalAccessException e ) { throw new RuntimeException ( e ) ; } }
Set the value for the given column for the given instance
32,187
public void fillMutation ( T instance , ColumnListMutation < String > mutation ) { for ( String fieldName : getNames ( ) ) { Coercions . setColumnMutationFromField ( instance , fields . get ( fieldName ) , fieldName , mutation ) ; } }
Map a bean to a column mutation . i . e . set the columns in the mutation to the corresponding values from the instance
32,188
public T newInstance ( ColumnList < String > columns ) throws IllegalAccessException , InstantiationException { return initInstance ( clazz . newInstance ( ) , columns ) ; }
Allocate a new instance and populate it with the values from the given column list
32,189
public T initInstance ( T instance , ColumnList < String > columns ) { for ( com . netflix . astyanax . model . Column < String > column : columns ) { Field field = fields . get ( column . getName ( ) ) ; if ( field != null ) { Coercions . setFieldFromColumn ( instance , field , column ) ; } } return instance ; }
Populate the given instance with the values from the given column list
32,190
public List < T > getAll ( Rows < ? , String > rows ) throws InstantiationException , IllegalAccessException { List < T > list = Lists . newArrayList ( ) ; for ( Row < ? , String > row : rows ) { if ( ! row . getColumns ( ) . isEmpty ( ) ) { list . add ( newInstance ( row . getColumns ( ) ) ) ; } } return list ; }
Load a set of rows into new instances populated with values from the column lists
32,191
public BoundStatement getQueryStatement ( CqlRowSliceQueryImpl < ? , ? > rowSliceQuery , boolean useCaching ) { switch ( rowSliceQuery . getColQueryType ( ) ) { case AllColumns : return SelectAllColumnsForRowKeys . getBoundStatement ( rowSliceQuery , useCaching ) ; case ColumnSet : return SelectColumnSetForRowKeys . getBoundStatement ( rowSliceQuery , useCaching ) ; case ColumnRange : if ( isCompositeColumn ) { return SelectCompositeColumnRangeForRowKeys . getBoundStatement ( rowSliceQuery , useCaching ) ; } else { return SelectColumnRangeForRowKeys . getBoundStatement ( rowSliceQuery , useCaching ) ; } default : throw new RuntimeException ( "RowSliceQuery with row keys use case not supported." ) ; } }
Main method that is used to generate the java driver statement from the given Astyanax row slice query . Note that the method allows the caller to specify whether to use caching or not .
32,192
public synchronized boolean setPools ( Collection < HostConnectionPool < CL > > newPools ) { Set < HostConnectionPool < CL > > toRemove = Sets . newHashSet ( this . pools ) ; boolean didChange = false ; for ( HostConnectionPool < CL > pool : newPools ) { if ( this . pools . add ( pool ) ) didChange = true ; toRemove . remove ( pool ) ; } for ( HostConnectionPool < CL > pool : toRemove ) { if ( this . pools . remove ( pool ) ) didChange = true ; } if ( didChange ) refresh ( ) ; return didChange ; }
Sets all pools for this partition . Removes old partitions and adds new one .
32,193
public synchronized boolean addPool ( HostConnectionPool < CL > pool ) { if ( this . pools . add ( pool ) ) { refresh ( ) ; return true ; } return false ; }
Add a new pool to the partition . Checks to see if the pool already existed . If so then there is no need to refresh the pool .
32,194
public synchronized void refresh ( ) { List < HostConnectionPool < CL > > pools = Lists . newArrayList ( ) ; for ( HostConnectionPool < CL > pool : this . pools ) { if ( ! pool . isReconnecting ( ) ) { pools . add ( pool ) ; } } this . activePools . set ( strategy . sortAndfilterPartition ( pools , prioritize ) ) ; }
Refresh the partition
32,195
public void fillReleaseMutation ( MutationBatch m , boolean excludeCurrentLock ) { ColumnListMutation < C > row = m . withRow ( columnFamily , key ) ; for ( C c : locksToDelete ) { row . deleteColumn ( c ) ; } if ( ! excludeCurrentLock && lockColumn != null ) row . deleteColumn ( lockColumn ) ; locksToDelete . clear ( ) ; lockColumn = null ; }
Fill a mutation that will release the locks . This may be used from a separate recipe to release multiple locks .
32,196
public void trackCheckpoint ( String startToken , String checkpointToken ) { tokenMap . put ( startToken , checkpointToken ) ; }
Do nothing since checkpoints aren t being persisted .
32,197
public static < K > ColumnParent getColumnParent ( ColumnFamily < ? , ? > columnFamily , ColumnPath < ? > path ) throws BadRequestException { ColumnParent cp = new ColumnParent ( ) ; cp . setColumn_family ( columnFamily . getName ( ) ) ; if ( path != null ) { Iterator < ByteBuffer > columns = path . iterator ( ) ; if ( columnFamily . getType ( ) == ColumnType . SUPER && columns . hasNext ( ) ) { cp . setSuper_column ( columns . next ( ) ) ; } } return cp ; }
Construct a Hector ColumnParent based on the information in the query and the type of column family being queried .
32,198
public static < K > org . apache . cassandra . thrift . ColumnPath getColumnPath ( ColumnFamily < ? , ? > columnFamily , ColumnPath < ? > path ) throws BadRequestException { org . apache . cassandra . thrift . ColumnPath cp = new org . apache . cassandra . thrift . ColumnPath ( ) ; cp . setColumn_family ( columnFamily . getName ( ) ) ; if ( path != null ) { Iterator < ByteBuffer > columns = path . iterator ( ) ; if ( columnFamily . getType ( ) == ColumnType . SUPER && columns . hasNext ( ) ) { cp . setSuper_column ( columns . next ( ) ) ; } if ( columns . hasNext ( ) ) { cp . setColumn ( columns . next ( ) ) ; } if ( columns . hasNext ( ) ) { throw new BadRequestException ( "Path depth of " + path . length ( ) + " not supported for column family \'" + columnFamily . getName ( ) + "\'" ) ; } } return cp ; }
Construct a Thrift ColumnPath based on the information in the query and the type of column family being queried .
32,199
public static < C > SlicePredicate getPredicate ( ColumnSlice < C > columns , Serializer < C > colSer ) { if ( columns == null ) { SlicePredicate predicate = new SlicePredicate ( ) ; predicate . setSlice_range ( new SliceRange ( ByteBuffer . wrap ( new byte [ 0 ] ) , ByteBuffer . wrap ( new byte [ 0 ] ) , false , Integer . MAX_VALUE ) ) ; return predicate ; } if ( columns . getColumns ( ) != null ) { SlicePredicate predicate = new SlicePredicate ( ) ; predicate . setColumn_namesIsSet ( true ) ; predicate . column_names = colSer . toBytesList ( columns . getColumns ( ) ) ; return predicate ; } else { SlicePredicate predicate = new SlicePredicate ( ) ; predicate . setSlice_range ( new SliceRange ( ( columns . getStartColumn ( ) == null ) ? ByteBuffer . wrap ( new byte [ 0 ] ) : ByteBuffer . wrap ( colSer . toBytes ( columns . getStartColumn ( ) ) ) , ( columns . getEndColumn ( ) == null ) ? ByteBuffer . wrap ( new byte [ 0 ] ) : ByteBuffer . wrap ( colSer . toBytes ( columns . getEndColumn ( ) ) ) , columns . getReversed ( ) , columns . getLimit ( ) ) ) ; return predicate ; } }
Return a Hector SlicePredicate based on the provided column slice