idx
int64
0
165k
question
stringlengths
73
4.15k
target
stringlengths
5
918
len_question
int64
21
890
len_target
int64
3
255
32,100
@ Override public List < T > findAllById ( Iterable < ID > ids ) { Assert . notNull ( ids , "Iterable ids should not be null" ) ; return operation . findByIds ( ids , information . getJavaType ( ) , information . getCollectionName ( ) ) ; }
find entities based on id list from one collection without partitions
70
11
32,101
@ Override public Optional < T > findById ( ID id ) { Assert . notNull ( id , "id must not be null" ) ; if ( id instanceof String && ! StringUtils . hasText ( ( String ) id ) ) { return Optional . empty ( ) ; } return Optional . ofNullable ( operation . findById ( information . getCollectionName ( ) , id , information . getJavaType ( ) ) ) ; }
find one entity per id without partitions
95
7
32,102
@ Override public void deleteById ( ID id ) { Assert . notNull ( id , "id to be deleted should not be null" ) ; operation . deleteById ( information . getCollectionName ( ) , id , null ) ; }
delete one document per id without configuring partition key value
51
11
32,103
@ Override public void delete ( T entity ) { Assert . notNull ( entity , "entity to be deleted should not be null" ) ; final String partitionKeyValue = information . getPartitionKeyFieldValue ( entity ) ; operation . deleteById ( information . getCollectionName ( ) , information . getId ( entity ) , partitionKeyValue == null ? null : new PartitionKey ( partitionKeyValue ) ) ; }
delete one document per entity
90
5
32,104
@ Override public void deleteAll ( Iterable < ? extends T > entities ) { Assert . notNull ( entities , "Iterable entities should not be null" ) ; StreamSupport . stream ( entities . spliterator ( ) , true ) . forEach ( this :: delete ) ; }
delete list of entities without partitions
61
6
32,105
@ Override public boolean existsById ( ID primaryKey ) { Assert . notNull ( primaryKey , "primaryKey should not be null" ) ; return findById ( primaryKey ) . isPresent ( ) ; }
check if an entity exists per id without partition
46
9
32,106
@ Override public Page < T > findAll ( Pageable pageable ) { Assert . notNull ( pageable , "pageable should not be null" ) ; return operation . findAll ( pageable , information . getJavaType ( ) , information . getCollectionName ( ) ) ; }
FindQuerySpecGenerator Returns a Page of entities meeting the paging restriction provided in the Pageable object .
63
22
32,107
public static Object toDocumentDBValue ( Object fromPropertyValue ) { if ( fromPropertyValue == null ) { return null ; } // com.microsoft.azure.documentdb.JsonSerializable#set(String, T) cannot set values for Date and Enum correctly if ( fromPropertyValue instanceof Date ) { fromPropertyValue = ( ( Date ) fromPropertyValue ) . getTime ( ) ; } else if ( fromPropertyValue instanceof ZonedDateTime ) { fromPropertyValue = ( ( ZonedDateTime ) fromPropertyValue ) . format ( DateTimeFormatter . ofPattern ( ISO_8601_COMPATIBLE_DATE_PATTERN ) ) ; } else if ( fromPropertyValue instanceof Enum ) { fromPropertyValue = fromPropertyValue . toString ( ) ; } return fromPropertyValue ; }
Convert a property value to the value stored in CosmosDB
179
12
32,108
public void addProfile ( Profile profile ) { if ( PROFILE_KIND . equals ( profile . getKind ( ) ) ) { this . profileList . add ( profile . getSettings ( ) ) ; } }
Adds the profile .
45
4
32,109
@ Override public void addRuleInstances ( Digester digester ) { digester . addObjectCreate ( "profiles" , Profiles . class ) ; digester . addObjectCreate ( PROFILES_PROFILE , Profile . class ) ; digester . addObjectCreate ( PROFILES_PROFILE_SETTING , Setting . class ) ; digester . addSetNext ( PROFILES_PROFILE , "addProfile" ) ; digester . addSetNext ( PROFILES_PROFILE_SETTING , "addSetting" ) ; digester . addSetProperties ( PROFILES_PROFILE , "kind" , "kind" ) ; digester . addSetProperties ( PROFILES_PROFILE_SETTING , "id" , "id" ) ; digester . addSetProperties ( PROFILES_PROFILE_SETTING , "value" , "value" ) ; }
Adds the rule instances .
204
5
32,110
List < File > addCollectionFiles ( File newBasedir ) { final DirectoryScanner ds = new DirectoryScanner ( ) ; ds . setBasedir ( newBasedir ) ; if ( this . includes != null && this . includes . length > 0 ) { ds . setIncludes ( this . includes ) ; } else { ds . setIncludes ( DEFAULT_INCLUDES ) ; } ds . setExcludes ( this . excludes ) ; ds . addDefaultExcludes ( ) ; ds . setCaseSensitive ( false ) ; ds . setFollowSymlinks ( false ) ; ds . scan ( ) ; List < File > foundFiles = new ArrayList <> ( ) ; for ( String filename : ds . getIncludedFiles ( ) ) { foundFiles . add ( new File ( newBasedir , filename ) ) ; } return foundFiles ; }
Add source files to the files list .
190
8
32,111
private void storeFileHashCache ( Properties props ) { File cacheFile = new File ( this . targetDirectory , CACHE_PROPERTIES_FILENAME ) ; try ( OutputStream out = new BufferedOutputStream ( new FileOutputStream ( cacheFile ) ) ) { props . store ( out , null ) ; } catch ( IOException e ) { getLog ( ) . warn ( "Cannot store file hash cache properties file" , e ) ; } }
Store file hash cache .
100
5
32,112
private Properties readFileHashCacheFile ( ) { Properties props = new Properties ( ) ; Log log = getLog ( ) ; if ( ! this . targetDirectory . exists ( ) ) { this . targetDirectory . mkdirs ( ) ; } else if ( ! this . targetDirectory . isDirectory ( ) ) { log . warn ( "Something strange here as the '" + this . targetDirectory . getPath ( ) + "' supposedly target directory is not a directory." ) ; return props ; } File cacheFile = new File ( this . targetDirectory , CACHE_PROPERTIES_FILENAME ) ; if ( ! cacheFile . exists ( ) ) { return props ; } try ( BufferedInputStream stream = new BufferedInputStream ( new FileInputStream ( cacheFile ) ) ) { props . load ( stream ) ; } catch ( IOException e ) { log . warn ( "Cannot load file hash cache properties file" , e ) ; } return props ; }
Read file hash cache file .
208
6
32,113
private void formatFile ( File file , ResultCollector rc , Properties hashCache , String basedirPath ) throws MojoFailureException , MojoExecutionException { try { doFormatFile ( file , rc , hashCache , basedirPath , false ) ; } catch ( IOException | MalformedTreeException | BadLocationException e ) { rc . failCount ++ ; getLog ( ) . warn ( e ) ; } }
Format file .
89
3
32,114
private String readFileAsString ( File file ) throws java . io . IOException { StringBuilder fileData = new StringBuilder ( 1000 ) ; try ( BufferedReader reader = new BufferedReader ( ReaderFactory . newReader ( file , this . encoding ) ) ) { char [ ] buf = new char [ 1024 ] ; int numRead = 0 ; while ( ( numRead = reader . read ( buf ) ) != - 1 ) { String readData = String . valueOf ( buf , 0 , numRead ) ; fileData . append ( readData ) ; buf = new char [ 1024 ] ; } } return fileData . toString ( ) ; }
Read the given file and return the content as a string .
138
12
32,115
private void writeStringToFile ( String str , File file ) throws IOException { if ( ! file . exists ( ) && file . isDirectory ( ) ) { return ; } try ( BufferedWriter bw = new BufferedWriter ( WriterFactory . newWriter ( file , this . encoding ) ) ) { bw . write ( str ) ; } }
Write the given string to a file .
75
8
32,116
private JobParameters getNextJobParameters ( Job job ) throws JobParametersNotFoundException { String jobIdentifier = job . getName ( ) ; JobParameters jobParameters ; List < JobInstance > lastInstances = jobExplorer . getJobInstances ( jobIdentifier , 0 , 1 ) ; JobParametersIncrementer incrementer = job . getJobParametersIncrementer ( ) ; if ( lastInstances . isEmpty ( ) ) { jobParameters = incrementer . getNext ( new JobParameters ( ) ) ; if ( jobParameters == null ) { throw new JobParametersNotFoundException ( "No bootstrap parameters found from incrementer for job=" + jobIdentifier ) ; } } else { List < JobExecution > lastExecutions = jobExplorer . getJobExecutions ( lastInstances . get ( 0 ) ) ; jobParameters = incrementer . getNext ( lastExecutions . get ( 0 ) . getJobParameters ( ) ) ; } return jobParameters ; }
Borrowed from CommandLineJobRunner .
207
9
32,117
@ Override public String read ( ) throws Exception { String item = null ; if ( index < input . length ) { item = input [ index ++ ] ; LOGGER . info ( item ) ; return item ; } else { return null ; } }
Reads next record from input
52
6
32,118
String getShardKey ( Message message ) { return getShardKey ( message . getTokenTime ( ) , this . modShardPolicy . getMessageShard ( message , metadata ) ) ; }
Return the shard for this message
43
7
32,119
private String getShardKey ( long messageTime , int modShard ) { long timePartition ; if ( metadata . getPartitionDuration ( ) != null ) timePartition = ( messageTime / metadata . getPartitionDuration ( ) ) % metadata . getPartitionCount ( ) ; else timePartition = 0 ; return getName ( ) + ":" + timePartition + ":" + modShard ; }
Return the shard for this timestamp
90
7
32,120
@ Override public List < MessageHistory > getKeyHistory ( String key , Long startTime , Long endTime , int count ) throws MessageQueueException { List < MessageHistory > list = Lists . newArrayList ( ) ; ColumnList < UUID > columns ; try { columns = keyspace . prepareQuery ( historyColumnFamily ) . setConsistencyLevel ( consistencyLevel ) . getRow ( key ) . execute ( ) . getResult ( ) ; } catch ( ConnectionException e ) { throw new MessageQueueException ( "Failed to load history for " + key , e ) ; } for ( Column < UUID > column : columns ) { try { list . add ( deserializeString ( column . getStringValue ( ) , MessageHistory . class ) ) ; } catch ( Exception e ) { LOG . info ( "Error deserializing history entry" , e ) ; } } return list ; }
Return history for a single key for the specified time range
191
11
32,121
@ Override public List < Message > peekMessages ( int itemsToPeek ) throws MessageQueueException { List < Message > messages = Lists . newArrayList ( ) ; for ( MessageQueueShard shard : shardReaderPolicy . listShards ( ) ) { messages . addAll ( peekMessages ( shard . getName ( ) , itemsToPeek - messages . size ( ) ) ) ; if ( messages . size ( ) == itemsToPeek ) return messages ; } return messages ; }
Iterate through shards attempting to extract itemsToPeek items . Will return once itemToPeek items have been read or all shards have been checked .
109
31
32,122
private Collection < Message > peekMessages ( String shardName , int itemsToPeek ) throws MessageQueueException { try { ColumnList < MessageQueueEntry > result = keyspace . prepareQuery ( queueColumnFamily ) . setConsistencyLevel ( consistencyLevel ) . getKey ( shardName ) . withColumnRange ( new RangeBuilder ( ) . setLimit ( itemsToPeek ) . setStart ( entrySerializer . makeEndpoint ( ( byte ) MessageQueueEntryType . Message . ordinal ( ) , Equality . GREATER_THAN_EQUALS ) . toBytes ( ) ) . setEnd ( entrySerializer . makeEndpoint ( ( byte ) MessageQueueEntryType . Message . ordinal ( ) , Equality . LESS_THAN_EQUALS ) . toBytes ( ) ) . build ( ) ) . execute ( ) . getResult ( ) ; List < Message > messages = Lists . newArrayListWithCapacity ( result . size ( ) ) ; for ( Column < MessageQueueEntry > column : result ) { Message message = extractMessageFromColumn ( column ) ; if ( message != null ) messages . add ( message ) ; } return messages ; } catch ( ConnectionException e ) { throw new MessageQueueException ( "Error peeking for messages from shard " + shardName , e ) ; } }
Peek into messages contained in the shard . This call does not take trigger time into account and will return messages that are not yet due to be executed
285
31
32,123
Message extractMessageFromColumn ( Column < MessageQueueEntry > column ) { // Next, parse the message metadata and add a timeout entry Message message = null ; try { ByteArrayInputStream bais = new ByteArrayInputStream ( column . getByteArrayValue ( ) ) ; message = mapper . readValue ( bais , Message . class ) ; } catch ( Exception e ) { LOG . warn ( "Error processing message " , e ) ; try { message = invalidMessageHandler . apply ( column . getStringValue ( ) ) ; } catch ( Exception e2 ) { LOG . warn ( "Error processing invalid message" , e2 ) ; } } return message ; }
Extract a message body from a column
141
8
32,124
private boolean hasMessages ( String shardName ) throws MessageQueueException { UUID currentTime = TimeUUIDUtils . getUniqueTimeUUIDinMicros ( ) ; try { ColumnList < MessageQueueEntry > result = keyspace . prepareQuery ( queueColumnFamily ) . setConsistencyLevel ( consistencyLevel ) . getKey ( shardName ) . withColumnRange ( new RangeBuilder ( ) . setLimit ( 1 ) // Read extra messages because of the lock column . setStart ( entrySerializer . makeEndpoint ( ( byte ) MessageQueueEntryType . Message . ordinal ( ) , Equality . EQUAL ) . toBytes ( ) ) . setEnd ( entrySerializer . makeEndpoint ( ( byte ) MessageQueueEntryType . Message . ordinal ( ) , Equality . EQUAL ) . append ( ( byte ) 0 , Equality . EQUAL ) . append ( currentTime , Equality . LESS_THAN_EQUALS ) . toBytes ( ) ) . build ( ) ) . execute ( ) . getResult ( ) ; return ! result . isEmpty ( ) ; } catch ( ConnectionException e ) { throw new MessageQueueException ( "Error checking shard for messages. " + shardName , e ) ; } }
Fast check to see if a shard has messages to process
266
12
32,125
public void verifyLock ( long curTimeInMicros ) throws Exception , BusyLockException , StaleLockException { if ( lockColumn == null ) throw new IllegalStateException ( "verifyLock() called without attempting to take the lock" ) ; // Read back all columns. There should be only 1 if we got the lock Map < String , Long > lockResult = readLockColumns ( readDataColumns ) ; // Cleanup and check that we really got the lock for ( Entry < String , Long > entry : lockResult . entrySet ( ) ) { // This is a stale lock that was never cleaned up if ( entry . getValue ( ) != 0 && curTimeInMicros > entry . getValue ( ) ) { if ( failOnStaleLock ) { throw new StaleLockException ( "Stale lock on row '" + key + "'. Manual cleanup requried." ) ; } locksToDelete . add ( entry . getKey ( ) ) ; } // Lock already taken, and not by us else if ( ! entry . getKey ( ) . equals ( lockColumn ) ) { throw new BusyLockException ( "Lock already acquired for row '" + key + "' with lock column " + entry . getKey ( ) ) ; } } }
Verify that the lock was acquired . This shouldn t be called unless it s part of a recipe built on top of ColumnPrefixDistributedRowLock .
270
32
32,126
@ Override public void release ( ) throws Exception { if ( ! locksToDelete . isEmpty ( ) || lockColumn != null ) { MutationBatch m = keyspace . prepareMutationBatch ( ) . setConsistencyLevel ( consistencyLevel ) ; fillReleaseMutation ( m , false ) ; m . execute ( ) ; } }
Release the lock by releasing this and any other stale lock columns
74
12
32,127
public Map < String , Long > releaseLocks ( boolean force ) throws Exception { Map < String , Long > locksToDelete = readLockColumns ( ) ; MutationBatch m = keyspace . prepareMutationBatch ( ) . setConsistencyLevel ( consistencyLevel ) ; ColumnListMutation < String > row = m . withRow ( columnFamily , key ) ; long now = getCurrentTimeMicros ( ) ; for ( Entry < String , Long > c : locksToDelete . entrySet ( ) ) { if ( force || ( c . getValue ( ) > 0 && c . getValue ( ) < now ) ) { row . deleteColumn ( c . getKey ( ) ) ; } } m . execute ( ) ; return locksToDelete ; }
Delete locks columns . Set force = true to remove locks that haven t expired yet .
164
17
32,128
private ByteBuffer generateTimeoutValue ( long timeout ) { if ( columnFamily . getDefaultValueSerializer ( ) == ByteBufferSerializer . get ( ) || columnFamily . getDefaultValueSerializer ( ) == LongSerializer . get ( ) ) { return LongSerializer . get ( ) . toByteBuffer ( timeout ) ; } else { return columnFamily . getDefaultValueSerializer ( ) . fromString ( Long . toString ( timeout ) ) ; } }
Generate the expire time value to put in the column value .
98
13
32,129
public long readTimeoutValue ( Column < ? > column ) { if ( columnFamily . getDefaultValueSerializer ( ) == ByteBufferSerializer . get ( ) || columnFamily . getDefaultValueSerializer ( ) == LongSerializer . get ( ) ) { return column . getLongValue ( ) ; } else { return Long . parseLong ( column . getStringValue ( ) ) ; } }
Read the expiration time from the column value
84
8
32,130
public List < ListenableFuture < OperationResult < Void > > > replayWal ( int count ) { List < ListenableFuture < OperationResult < Void >>> futures = Lists . newArrayList ( ) ; WriteAheadEntry walEntry ; while ( null != ( walEntry = wal . readNextEntry ( ) ) && count -- > 0 ) { MutationBatch m = keyspace . prepareMutationBatch ( ) ; try { walEntry . readMutation ( m ) ; futures . add ( executeWalEntry ( walEntry , m ) ) ; } catch ( WalException e ) { wal . removeEntry ( walEntry ) ; } } return futures ; }
Replay records from the WAL
139
7
32,131
public ListenableFuture < OperationResult < Void > > execute ( final MutationBatch m ) throws WalException { final WriteAheadEntry walEntry = wal . createEntry ( ) ; walEntry . writeMutation ( m ) ; return executeWalEntry ( walEntry , m ) ; }
Write a mutation to the wal and execute it
61
9
32,132
public < V > V getColumnValue ( T instance , String columnName , Class < V > valueClass ) { Field field = fields . get ( columnName ) ; if ( field == null ) { throw new IllegalArgumentException ( "Column not found: " + columnName ) ; } try { return valueClass . cast ( field . get ( instance ) ) ; } catch ( IllegalAccessException e ) { throw new RuntimeException ( e ) ; // should never get here } }
Return the value for the given column from the given instance
101
11
32,133
public < V > void setColumnValue ( T instance , String columnName , V value ) { Field field = fields . get ( columnName ) ; if ( field == null ) { throw new IllegalArgumentException ( "Column not found: " + columnName ) ; } try { field . set ( instance , value ) ; } catch ( IllegalAccessException e ) { throw new RuntimeException ( e ) ; // should never get here } }
Set the value for the given column for the given instance
92
11
32,134
public void fillMutation ( T instance , ColumnListMutation < String > mutation ) { for ( String fieldName : getNames ( ) ) { Coercions . setColumnMutationFromField ( instance , fields . get ( fieldName ) , fieldName , mutation ) ; } }
Map a bean to a column mutation . i . e . set the columns in the mutation to the corresponding values from the instance
60
25
32,135
public T newInstance ( ColumnList < String > columns ) throws IllegalAccessException , InstantiationException { return initInstance ( clazz . newInstance ( ) , columns ) ; }
Allocate a new instance and populate it with the values from the given column list
37
16
32,136
public T initInstance ( T instance , ColumnList < String > columns ) { for ( com . netflix . astyanax . model . Column < String > column : columns ) { Field field = fields . get ( column . getName ( ) ) ; if ( field != null ) { // otherwise it may be a column that was // removed, etc. Coercions . setFieldFromColumn ( instance , field , column ) ; } } return instance ; }
Populate the given instance with the values from the given column list
96
13
32,137
public List < T > getAll ( Rows < ? , String > rows ) throws InstantiationException , IllegalAccessException { List < T > list = Lists . newArrayList ( ) ; for ( Row < ? , String > row : rows ) { if ( ! row . getColumns ( ) . isEmpty ( ) ) { list . add ( newInstance ( row . getColumns ( ) ) ) ; } } return list ; }
Load a set of rows into new instances populated with values from the column lists
93
15
32,138
public BoundStatement getQueryStatement ( CqlRowSliceQueryImpl < ? , ? > rowSliceQuery , boolean useCaching ) { switch ( rowSliceQuery . getColQueryType ( ) ) { case AllColumns : return SelectAllColumnsForRowKeys . getBoundStatement ( rowSliceQuery , useCaching ) ; case ColumnSet : return SelectColumnSetForRowKeys . getBoundStatement ( rowSliceQuery , useCaching ) ; case ColumnRange : if ( isCompositeColumn ) { return SelectCompositeColumnRangeForRowKeys . getBoundStatement ( rowSliceQuery , useCaching ) ; } else { return SelectColumnRangeForRowKeys . getBoundStatement ( rowSliceQuery , useCaching ) ; } default : throw new RuntimeException ( "RowSliceQuery with row keys use case not supported." ) ; } }
Main method that is used to generate the java driver statement from the given Astyanax row slice query . Note that the method allows the caller to specify whether to use caching or not .
187
37
32,139
public synchronized boolean setPools ( Collection < HostConnectionPool < CL > > newPools ) { Set < HostConnectionPool < CL >> toRemove = Sets . newHashSet ( this . pools ) ; // Add new pools not previously seen boolean didChange = false ; for ( HostConnectionPool < CL > pool : newPools ) { if ( this . pools . add ( pool ) ) didChange = true ; toRemove . remove ( pool ) ; } // Remove pools for hosts that no longer exist for ( HostConnectionPool < CL > pool : toRemove ) { if ( this . pools . remove ( pool ) ) didChange = true ; } if ( didChange ) refresh ( ) ; return didChange ; }
Sets all pools for this partition . Removes old partitions and adds new one .
150
17
32,140
public synchronized boolean addPool ( HostConnectionPool < CL > pool ) { if ( this . pools . add ( pool ) ) { refresh ( ) ; return true ; } return false ; }
Add a new pool to the partition . Checks to see if the pool already existed . If so then there is no need to refresh the pool .
39
29
32,141
public synchronized void refresh ( ) { List < HostConnectionPool < CL >> pools = Lists . newArrayList ( ) ; for ( HostConnectionPool < CL > pool : this . pools ) { if ( ! pool . isReconnecting ( ) ) { pools . add ( pool ) ; } } this . activePools . set ( strategy . sortAndfilterPartition ( pools , prioritize ) ) ; }
Refresh the partition
86
4
32,142
public void fillReleaseMutation ( MutationBatch m , boolean excludeCurrentLock ) { // Add the deletes to the end of the mutation ColumnListMutation < C > row = m . withRow ( columnFamily , key ) ; for ( C c : locksToDelete ) { row . deleteColumn ( c ) ; } if ( ! excludeCurrentLock && lockColumn != null ) row . deleteColumn ( lockColumn ) ; locksToDelete . clear ( ) ; lockColumn = null ; }
Fill a mutation that will release the locks . This may be used from a separate recipe to release multiple locks .
104
22
32,143
@ Override public void trackCheckpoint ( String startToken , String checkpointToken ) { tokenMap . put ( startToken , checkpointToken ) ; }
Do nothing since checkpoints aren t being persisted .
31
9
32,144
public static < K > ColumnParent getColumnParent ( ColumnFamily < ? , ? > columnFamily , ColumnPath < ? > path ) throws BadRequestException { ColumnParent cp = new ColumnParent ( ) ; cp . setColumn_family ( columnFamily . getName ( ) ) ; if ( path != null ) { Iterator < ByteBuffer > columns = path . iterator ( ) ; if ( columnFamily . getType ( ) == ColumnType . SUPER && columns . hasNext ( ) ) { cp . setSuper_column ( columns . next ( ) ) ; } } return cp ; }
Construct a Hector ColumnParent based on the information in the query and the type of column family being queried .
123
22
32,145
public static < K > org . apache . cassandra . thrift . ColumnPath getColumnPath ( ColumnFamily < ? , ? > columnFamily , ColumnPath < ? > path ) throws BadRequestException { org . apache . cassandra . thrift . ColumnPath cp = new org . apache . cassandra . thrift . ColumnPath ( ) ; cp . setColumn_family ( columnFamily . getName ( ) ) ; if ( path != null ) { Iterator < ByteBuffer > columns = path . iterator ( ) ; if ( columnFamily . getType ( ) == ColumnType . SUPER && columns . hasNext ( ) ) { cp . setSuper_column ( columns . next ( ) ) ; } if ( columns . hasNext ( ) ) { cp . setColumn ( columns . next ( ) ) ; } if ( columns . hasNext ( ) ) { throw new BadRequestException ( "Path depth of " + path . length ( ) + " not supported for column family \'" + columnFamily . getName ( ) + "\'" ) ; } } return cp ; }
Construct a Thrift ColumnPath based on the information in the query and the type of column family being queried .
229
23
32,146
public static < C > SlicePredicate getPredicate ( ColumnSlice < C > columns , Serializer < C > colSer ) { // Get all the columns if ( columns == null ) { SlicePredicate predicate = new SlicePredicate ( ) ; predicate . setSlice_range ( new SliceRange ( ByteBuffer . wrap ( new byte [ 0 ] ) , ByteBuffer . wrap ( new byte [ 0 ] ) , false , Integer . MAX_VALUE ) ) ; return predicate ; } // Get a specific list of columns if ( columns . getColumns ( ) != null ) { SlicePredicate predicate = new SlicePredicate ( ) ; predicate . setColumn_namesIsSet ( true ) ; predicate . column_names = colSer . toBytesList ( columns . getColumns ( ) ) ; return predicate ; } else { SlicePredicate predicate = new SlicePredicate ( ) ; predicate . setSlice_range ( new SliceRange ( ( columns . getStartColumn ( ) == null ) ? ByteBuffer . wrap ( new byte [ 0 ] ) : ByteBuffer . wrap ( colSer . toBytes ( columns . getStartColumn ( ) ) ) , ( columns . getEndColumn ( ) == null ) ? ByteBuffer . wrap ( new byte [ 0 ] ) : ByteBuffer . wrap ( colSer . toBytes ( columns . getEndColumn ( ) ) ) , columns . getReversed ( ) , columns . getLimit ( ) ) ) ; return predicate ; } }
Return a Hector SlicePredicate based on the provided column slice
321
13
32,147
public static ConnectionException ToConnectionPoolException ( Throwable e ) { if ( e instanceof ConnectionException ) { return ( ConnectionException ) e ; } LOGGER . debug ( e . getMessage ( ) ) ; if ( e instanceof InvalidRequestException ) { return new com . netflix . astyanax . connectionpool . exceptions . BadRequestException ( e ) ; } else if ( e instanceof TProtocolException ) { return new com . netflix . astyanax . connectionpool . exceptions . BadRequestException ( e ) ; } else if ( e instanceof UnavailableException ) { return new TokenRangeOfflineException ( e ) ; } else if ( e instanceof SocketTimeoutException ) { return new TimeoutException ( e ) ; } else if ( e instanceof TimedOutException ) { return new OperationTimeoutException ( e ) ; } else if ( e instanceof NotFoundException ) { return new com . netflix . astyanax . connectionpool . exceptions . NotFoundException ( e ) ; } else if ( e instanceof TApplicationException ) { return new ThriftStateException ( e ) ; } else if ( e instanceof AuthenticationException || e instanceof AuthorizationException ) { return new com . netflix . astyanax . connectionpool . exceptions . AuthenticationException ( e ) ; } else if ( e instanceof SchemaDisagreementException ) { return new com . netflix . astyanax . connectionpool . exceptions . SchemaDisagreementException ( e ) ; } else if ( e instanceof TTransportException ) { if ( e . getCause ( ) != null ) { if ( e . getCause ( ) instanceof SocketTimeoutException ) { return new TimeoutException ( e ) ; } if ( e . getCause ( ) . getMessage ( ) != null ) { if ( e . getCause ( ) . getMessage ( ) . toLowerCase ( ) . contains ( "connection abort" ) || e . getCause ( ) . getMessage ( ) . toLowerCase ( ) . contains ( "connection reset" ) ) { return new ConnectionAbortedException ( e ) ; } } } return new TransportException ( e ) ; } else { // e.getCause().printStackTrace(); return new UnknownException ( e ) ; } }
Convert from Thrift exceptions to an internal ConnectionPoolException
482
12
32,148
@ Override public Connection < CL > borrowConnection ( int timeout ) throws ConnectionException { Connection < CL > connection = null ; long startTime = System . currentTimeMillis ( ) ; try { // Try to get a free connection without blocking. connection = availableConnections . poll ( ) ; if ( connection != null ) { return connection ; } boolean isOpenning = tryOpenAsync ( ) ; // Wait for a connection to free up or a new one to be opened if ( timeout > 0 ) { connection = waitForConnection ( isOpenning ? config . getConnectTimeout ( ) : timeout ) ; return connection ; } else throw new PoolTimeoutException ( "Fast fail waiting for connection from pool" ) . setHost ( getHost ( ) ) . setLatency ( System . currentTimeMillis ( ) - startTime ) ; } finally { if ( connection != null ) { borrowedCount . incrementAndGet ( ) ; monitor . incConnectionBorrowed ( host , System . currentTimeMillis ( ) - startTime ) ; } } }
Create a connection as long the max hasn t been reached
219
11
32,149
private Connection < CL > waitForConnection ( int timeout ) throws ConnectionException { Connection < CL > connection = null ; long startTime = System . currentTimeMillis ( ) ; try { blockedThreads . incrementAndGet ( ) ; connection = availableConnections . poll ( timeout , TimeUnit . MILLISECONDS ) ; if ( connection != null ) return connection ; throw new PoolTimeoutException ( "Timed out waiting for connection" ) . setHost ( getHost ( ) ) . setLatency ( System . currentTimeMillis ( ) - startTime ) ; } catch ( InterruptedException e ) { Thread . currentThread ( ) . interrupt ( ) ; throw new InterruptedOperationException ( "Thread interrupted waiting for connection" ) . setHost ( getHost ( ) ) . setLatency ( System . currentTimeMillis ( ) - startTime ) ; } finally { blockedThreads . decrementAndGet ( ) ; } }
Internal method to wait for a connection from the available connection pool .
198
13
32,150
@ Override public boolean returnConnection ( Connection < CL > connection ) { returnedCount . incrementAndGet ( ) ; monitor . incConnectionReturned ( host ) ; ConnectionException ce = connection . getLastException ( ) ; if ( ce != null ) { if ( ce instanceof IsDeadConnectionException ) { noteError ( ce ) ; internalCloseConnection ( connection ) ; return true ; } } errorsSinceLastSuccess . set ( 0 ) ; // Still within the number of max active connection if ( activeCount . get ( ) <= config . getMaxConnsPerHost ( ) ) { availableConnections . add ( connection ) ; if ( isShutdown ( ) ) { discardIdleConnections ( ) ; return true ; } } else { // maxConnsPerHost was reduced. This may end up closing too many // connections, but that's ok. We'll open them later. internalCloseConnection ( connection ) ; return true ; } return false ; }
Return a connection to this host
200
6
32,151
@ Override public void markAsDown ( ConnectionException reason ) { // Make sure we're not triggering the reconnect process more than once if ( isReconnecting . compareAndSet ( false , true ) ) { markedDownCount . incrementAndGet ( ) ; if ( reason != null && ! ( reason instanceof TimeoutException ) ) { discardIdleConnections ( ) ; } listener . onHostDown ( this ) ; monitor . onHostDown ( getHost ( ) , reason ) ; retryContext . begin ( ) ; try { long delay = retryContext . getNextDelay ( ) ; executor . schedule ( new Runnable ( ) { @ Override public void run ( ) { Thread . currentThread ( ) . setName ( "RetryService : " + host . getName ( ) ) ; try { if ( activeCount . get ( ) == 0 ) reconnect ( ) ; // Created a new connection successfully. try { retryContext . success ( ) ; if ( isReconnecting . compareAndSet ( true , false ) ) { monitor . onHostReactivated ( host , SimpleHostConnectionPool . this ) ; listener . onHostUp ( SimpleHostConnectionPool . this ) ; } } catch ( Throwable t ) { LOG . error ( "Error reconnecting client" , t ) ; } return ; } catch ( Throwable t ) { // Ignore //t.printStackTrace(); } if ( ! isShutdown ( ) ) { long delay = retryContext . getNextDelay ( ) ; executor . schedule ( this , delay , TimeUnit . MILLISECONDS ) ; } } } , delay , TimeUnit . MILLISECONDS ) ; } catch ( Exception e ) { LOG . error ( "Failed to schedule retry task for " + host . getHostName ( ) , e ) ; } } }
Mark the host as down . No new connections will be created from this host . Connections currently in use will be allowed to continue processing .
397
28
32,152
private boolean tryOpenAsync ( ) { Connection < CL > connection = null ; // Try to open a new connection, as long as we haven't reached the max if ( activeCount . get ( ) < config . getMaxConnsPerHost ( ) ) { try { if ( activeCount . incrementAndGet ( ) <= config . getMaxConnsPerHost ( ) ) { // Don't try to open too many connections at the same time. if ( pendingConnections . incrementAndGet ( ) > config . getMaxPendingConnectionsPerHost ( ) ) { pendingConnections . decrementAndGet ( ) ; } else { try { connectAttempt . incrementAndGet ( ) ; connection = factory . createConnection ( this ) ; connection . openAsync ( new Connection . AsyncOpenCallback < CL > ( ) { @ Override public void success ( Connection < CL > connection ) { openConnections . incrementAndGet ( ) ; pendingConnections . decrementAndGet ( ) ; availableConnections . add ( connection ) ; // Sanity check in case the connection // pool was closed if ( isShutdown ( ) ) { discardIdleConnections ( ) ; } } @ Override public void failure ( Connection < CL > conn , ConnectionException e ) { failedOpenConnections . incrementAndGet ( ) ; pendingConnections . decrementAndGet ( ) ; activeCount . decrementAndGet ( ) ; if ( e instanceof IsDeadConnectionException ) { noteError ( e ) ; } } } ) ; return true ; } catch ( ThrottledException e ) { // Trying to open way too many connections here } finally { if ( connection == null ) pendingConnections . decrementAndGet ( ) ; } } } } finally { if ( connection == null ) { activeCount . decrementAndGet ( ) ; } } } return false ; }
Try to open a new connection asynchronously . We don t actually return a connection here . Instead the connection will be added to idle queue when it s ready .
391
33
32,153
private void discardIdleConnections ( ) { List < Connection < CL >> connections = Lists . newArrayList ( ) ; availableConnections . drainTo ( connections ) ; activeCount . addAndGet ( - connections . size ( ) ) ; for ( Connection < CL > connection : connections ) { try { closedConnections . incrementAndGet ( ) ; connection . close ( ) ; // This is usually an async operation } catch ( Throwable t ) { // TODO } } }
Drain all idle connections and close them . Connections that are currently borrowed will not be closed here .
101
21
32,154
public void fillMutationBatch ( ColumnListMutation < ByteBuffer > clm , Object entity ) throws IllegalArgumentException , IllegalAccessException { List < ? > list = ( List < ? > ) containerField . get ( entity ) ; if ( list != null ) { for ( Object element : list ) { fillColumnMutation ( clm , element ) ; } } }
Iterate through the list and create a column for each element
81
12
32,155
public void fillColumnMutation ( ColumnListMutation < ByteBuffer > clm , Object entity ) { try { ByteBuffer columnName = toColumnName ( entity ) ; ByteBuffer value = valueMapper . toByteBuffer ( entity ) ; clm . putColumn ( columnName , value ) ; } catch ( Exception e ) { throw new PersistenceException ( "failed to fill mutation batch" , e ) ; } }
Add a column based on the provided entity
89
8
32,156
public boolean setField ( Object entity , ColumnList < ByteBuffer > columns ) throws Exception { List < Object > list = getOrCreateField ( entity ) ; // Iterate through columns and add embedded entities to the list for ( com . netflix . astyanax . model . Column < ByteBuffer > c : columns ) { list . add ( fromColumn ( c ) ) ; } return true ; }
Set the collection field using the provided column list of embedded entities
84
12
32,157
public List < ByteBuffer > getBufferList ( ) { List < ByteBuffer > result = buffers ; reset ( ) ; for ( ByteBuffer buffer : result ) { buffer . flip ( ) ; } return result ; }
Returns all data written and resets the stream to be empty .
45
13
32,158
public void prepend ( List < ByteBuffer > lists ) { for ( ByteBuffer buffer : lists ) { buffer . position ( buffer . limit ( ) ) ; } buffers . addAll ( 0 , lists ) ; }
Prepend a list of ByteBuffers to this stream .
45
12
32,159
public < C2 > ColumnPath < C > append ( C2 name , Serializer < C2 > ser ) { path . add ( ByteBuffer . wrap ( ser . toBytes ( name ) ) ) ; return this ; }
Add a depth to the path
48
6
32,160
public static java . util . UUID getUniqueTimeUUIDinMillis ( ) { return new java . util . UUID ( UUIDGen . newTime ( ) , UUIDGen . getClockSeqAndNode ( ) ) ; }
Gets a new and unique time uuid in milliseconds . It is useful to use in a TimeUUIDType sorted column family .
52
27
32,161
public static ByteBuffer asByteBuffer ( java . util . UUID uuid ) { if ( uuid == null ) { return null ; } return ByteBuffer . wrap ( asByteArray ( uuid ) ) ; }
Coverts a java . util . UUID into a ByteBuffer .
46
14
32,162
public static UUID uuid ( ByteBuffer bb ) { bb = bb . slice ( ) ; return new UUID ( bb . getLong ( ) , bb . getLong ( ) ) ; }
Converts a ByteBuffer containing a UUID into a java . util . UUID
46
17
32,163
private ByteBuffer toColumnName ( Object obj ) { SimpleCompositeBuilder composite = new SimpleCompositeBuilder ( bufferSize , Equality . EQUAL ) ; // Iterate through each component and add to a CompositeType structure for ( FieldMapper < ? > mapper : components ) { try { composite . addWithoutControl ( mapper . toByteBuffer ( obj ) ) ; } catch ( Exception e ) { throw new RuntimeException ( e ) ; } } return composite . get ( ) ; }
Return the column name byte buffer for this entity
105
9
32,164
T constructEntity ( K id , com . netflix . astyanax . model . Column < ByteBuffer > column ) { try { // First, construct the parent class and give it an id T entity = clazz . newInstance ( ) ; idMapper . setValue ( entity , id ) ; setEntityFieldsFromColumnName ( entity , column . getRawName ( ) . duplicate ( ) ) ; valueMapper . setField ( entity , column . getByteBufferValue ( ) . duplicate ( ) ) ; return entity ; } catch ( Exception e ) { throw new PersistenceException ( "failed to construct entity" , e ) ; } }
Construct an entity object from a row key and column list .
137
12
32,165
Object fromColumn ( K id , com . netflix . astyanax . model . Column < ByteBuffer > c ) { try { // Allocate a new entity Object entity = clazz . newInstance ( ) ; idMapper . setValue ( entity , id ) ; setEntityFieldsFromColumnName ( entity , c . getRawName ( ) . duplicate ( ) ) ; valueMapper . setField ( entity , c . getByteBufferValue ( ) . duplicate ( ) ) ; return entity ; } catch ( Exception e ) { throw new PersistenceException ( "failed to construct entity" , e ) ; } }
Return an object from the column
131
6
32,166
public String getComparatorType ( ) { StringBuilder sb = new StringBuilder ( ) ; sb . append ( "CompositeType(" ) ; sb . append ( StringUtils . join ( Collections2 . transform ( components , new Function < FieldMapper < ? > , String > ( ) { public String apply ( FieldMapper < ? > input ) { return input . serializer . getComparatorType ( ) . getTypeName ( ) ; } } ) , "," ) ) ; sb . append ( ")" ) ; return sb . toString ( ) ; }
Return the cassandra comparator type for this composite structure
126
11
32,167
public ByteBuffer readData ( ) throws Exception { ColumnList < C > result = keyspace . prepareQuery ( columnFamily ) . setConsistencyLevel ( consistencyLevel ) . getKey ( key ) . execute ( ) . getResult ( ) ; boolean hasColumn = false ; ByteBuffer data = null ; for ( Column < C > column : result ) { if ( column . getTtl ( ) == 0 ) { if ( hasColumn ) { throw new IllegalStateException ( "Row has multiple uniquneness locks" ) ; } hasColumn = true ; data = column . getByteBufferValue ( ) ; } } if ( ! hasColumn ) { throw new NotFoundException ( this . key . toString ( ) + " has no uniquness lock" ) ; } return data ; }
Read the data stored with the unique row . This data is normally a foreign key to another column family .
169
21
32,168
public static < T > Callable < T > decorateWithBarrier ( CyclicBarrier barrier , Callable < T > callable ) { return new BarrierCallableDecorator < T > ( barrier , callable ) ; }
Create a callable that waits on a barrier before starting execution
50
12
32,169
private synchronized < R > OperationResult < R > executeDdlOperation ( AbstractOperationImpl < R > operation , RetryPolicy retry ) throws OperationException , ConnectionException { ConnectionException lastException = null ; for ( int i = 0 ; i < 2 ; i ++ ) { operation . setPinnedHost ( ddlHost ) ; try { OperationResult < R > result = connectionPool . executeWithFailover ( operation , retry ) ; ddlHost = result . getHost ( ) ; return result ; } catch ( ConnectionException e ) { lastException = e ; if ( e instanceof IsDeadConnectionException ) { ddlHost = null ; } } } throw lastException ; }
Attempt to execute the DDL operation on the same host
145
11
32,170
private void precheckSchemaAgreement ( Client client ) throws Exception { Map < String , List < String > > schemas = client . describe_schema_versions ( ) ; if ( schemas . size ( ) > 1 ) { throw new SchemaDisagreementException ( "Can't change schema due to pending schema agreement" ) ; } }
Do a quick check to see if there is a schema disagreement . This is done as an extra precaution to reduce the chances of putting the cluster into a bad state . This will not gurantee however that by the time a schema change is made the cluster will be in the same state .
74
59
32,171
private ThriftColumnFamilyDefinitionImpl toThriftColumnFamilyDefinition ( Map < String , Object > options , ColumnFamily columnFamily ) { ThriftColumnFamilyDefinitionImpl def = new ThriftColumnFamilyDefinitionImpl ( ) ; Map < String , Object > internalOptions = Maps . newHashMap ( ) ; if ( options != null ) internalOptions . putAll ( options ) ; internalOptions . put ( "keyspace" , getKeyspaceName ( ) ) ; if ( columnFamily != null ) { internalOptions . put ( "name" , columnFamily . getName ( ) ) ; if ( ! internalOptions . containsKey ( "comparator_type" ) ) internalOptions . put ( "comparator_type" , columnFamily . getColumnSerializer ( ) . getComparatorType ( ) . getTypeName ( ) ) ; if ( ! internalOptions . containsKey ( "key_validation_class" ) ) internalOptions . put ( "key_validation_class" , columnFamily . getKeySerializer ( ) . getComparatorType ( ) . getTypeName ( ) ) ; if ( columnFamily . getDefaultValueSerializer ( ) != null && ! internalOptions . containsKey ( "default_validation_class" ) ) internalOptions . put ( "default_validation_class" , columnFamily . getDefaultValueSerializer ( ) . getComparatorType ( ) . getTypeName ( ) ) ; } def . setFields ( internalOptions ) ; return def ; }
Convert a Map of options to an internal thrift column family definition
320
14
32,172
private ThriftKeyspaceDefinitionImpl toThriftKeyspaceDefinition ( final Map < String , Object > options ) { ThriftKeyspaceDefinitionImpl def = new ThriftKeyspaceDefinitionImpl ( ) ; Map < String , Object > internalOptions = Maps . newHashMap ( ) ; if ( options != null ) internalOptions . putAll ( options ) ; if ( internalOptions . containsKey ( "name" ) && ! internalOptions . get ( "name" ) . equals ( getKeyspaceName ( ) ) ) { throw new RuntimeException ( String . format ( "'name' attribute must match keyspace name. Expected '%s' but got '%s'" , getKeyspaceName ( ) , internalOptions . get ( "name" ) ) ) ; } else { internalOptions . put ( "name" , getKeyspaceName ( ) ) ; } def . setFields ( internalOptions ) ; return def ; }
Convert a Map of options to an internal thrift keyspace definition
195
14
32,173
private List < Future < Boolean > > startTasks ( ExecutorService executor , List < Callable < Boolean > > callables ) { List < Future < Boolean >> tasks = Lists . newArrayList ( ) ; for ( Callable < Boolean > callable : callables ) { tasks . add ( executor . submit ( callable ) ) ; } return tasks ; }
Submit all the callables to the executor by synchronize their execution so they all start AFTER the have all been submitted .
79
25
32,174
public < T , K > void remove ( ColumnFamily < K , String > columnFamily , T item ) throws Exception { @ SuppressWarnings ( { "unchecked" } ) Class < T > clazz = ( Class < T > ) item . getClass ( ) ; Mapping < T > mapping = getMapping ( clazz ) ; @ SuppressWarnings ( { "unchecked" } ) Class < K > idFieldClass = ( Class < K > ) mapping . getIdFieldClass ( ) ; // safe - // after // erasure, // this is // all // just // Class // anyway MutationBatch mutationBatch = keyspace . prepareMutationBatch ( ) ; mutationBatch . withRow ( columnFamily , mapping . getIdValue ( item , idFieldClass ) ) . delete ( ) ; mutationBatch . execute ( ) ; }
Remove the given item
187
4
32,175
public < T , K > List < T > getAll ( ColumnFamily < K , String > columnFamily , Class < T > itemClass ) throws Exception { Mapping < T > mapping = getMapping ( itemClass ) ; Rows < K , String > result = keyspace . prepareQuery ( columnFamily ) . getAllRows ( ) . execute ( ) . getResult ( ) ; return mapping . getAll ( result ) ; }
Get all rows of the specified item
93
7
32,176
public < T > Mapping < T > getMapping ( Class < T > clazz ) { return ( cache != null ) ? cache . getMapping ( clazz , annotationSet ) : new Mapping < T > ( clazz , annotationSet ) ; }
Return the mapping instance for the given class
56
8
32,177
@ Override public void start ( ) { ConnectionPoolMBeanManager . getInstance ( ) . registerMonitor ( config . getName ( ) , this ) ; String seeds = config . getSeeds ( ) ; if ( seeds != null && ! seeds . isEmpty ( ) ) { setHosts ( config . getSeedHosts ( ) ) ; } config . getLatencyScoreStrategy ( ) . start ( new Listener ( ) { @ Override public void onUpdate ( ) { rebuildPartitions ( ) ; } @ Override public void onReset ( ) { rebuildPartitions ( ) ; } } ) ; }
Starts the conn pool and resources associated with it
134
10
32,178
@ Override public void shutdown ( ) { ConnectionPoolMBeanManager . getInstance ( ) . unregisterMonitor ( config . getName ( ) , this ) ; for ( Entry < Host , HostConnectionPool < CL > > pool : hosts . entrySet ( ) ) { pool . getValue ( ) . shutdown ( ) ; } config . getLatencyScoreStrategy ( ) . shutdown ( ) ; config . shutdown ( ) ; }
Clean up resources associated with the conn pool
92
8
32,179
@ Override public final synchronized boolean addHost ( Host host , boolean refresh ) { // Already exists if ( hosts . containsKey ( host ) ) { // Check to see if we are adding token ranges or if the token ranges changed // which will force a rebuild of the token topology Host existingHost = hosts . get ( host ) . getHost ( ) ; if ( existingHost . getTokenRanges ( ) . size ( ) != host . getTokenRanges ( ) . size ( ) ) { existingHost . setTokenRanges ( host . getTokenRanges ( ) ) ; return true ; } ArrayList < TokenRange > currentTokens = Lists . newArrayList ( existingHost . getTokenRanges ( ) ) ; ArrayList < TokenRange > newTokens = Lists . newArrayList ( host . getTokenRanges ( ) ) ; Collections . sort ( currentTokens , compareByStartToken ) ; Collections . sort ( newTokens , compareByStartToken ) ; for ( int i = 0 ; i < currentTokens . size ( ) ; i ++ ) { if ( ! currentTokens . get ( i ) . getStartToken ( ) . equals ( newTokens . get ( i ) . getStartToken ( ) ) || ! currentTokens . get ( i ) . getEndToken ( ) . equals ( newTokens . get ( i ) . getEndToken ( ) ) ) { return false ; } } existingHost . setTokenRanges ( host . getTokenRanges ( ) ) ; return true ; } else { HostConnectionPool < CL > pool = newHostConnectionPool ( host , factory , config ) ; if ( null == hosts . putIfAbsent ( host , pool ) ) { try { monitor . onHostAdded ( host , pool ) ; if ( refresh ) { topology . addPool ( pool ) ; rebuildPartitions ( ) ; } pool . primeConnections ( config . getInitConnsPerHost ( ) ) ; } catch ( Exception e ) { // Ignore, pool will have been marked down internally } return true ; } else { return false ; } } }
Add host to the system . May need to rebuild the partition map of the system
438
16
32,180
@ Override public List < HostConnectionPool < CL > > getActivePools ( ) { return ImmutableList . copyOf ( topology . getAllPools ( ) . getPools ( ) ) ; }
list of all active pools
46
5
32,181
@ Override public synchronized boolean removeHost ( Host host , boolean refresh ) { HostConnectionPool < CL > pool = hosts . remove ( host ) ; if ( pool != null ) { topology . removePool ( pool ) ; rebuildPartitions ( ) ; monitor . onHostRemoved ( host ) ; pool . shutdown ( ) ; return true ; } else { return false ; } }
Remove host from the system . Shuts down pool associated with the host and rebuilds partition map
79
19
32,182
@ Override public < R > OperationResult < R > executeWithFailover ( Operation < CL , R > op , RetryPolicy retry ) throws ConnectionException { //Tracing operation OperationTracer opsTracer = config . getOperationTracer ( ) ; final AstyanaxContext context = opsTracer . getAstyanaxContext ( ) ; if ( context != null ) { opsTracer . onCall ( context , op ) ; } retry . begin ( ) ; ConnectionException lastException = null ; do { try { OperationResult < R > result = newExecuteWithFailover ( op ) . tryOperation ( op ) ; retry . success ( ) ; if ( context != null ) opsTracer . onSuccess ( context , op ) ; return result ; } catch ( OperationException e ) { if ( context != null ) opsTracer . onException ( context , op , e ) ; retry . failure ( e ) ; throw e ; } catch ( ConnectionException e ) { lastException = e ; } if ( retry . allowRetry ( ) ) { LOG . debug ( "Retry policy[" + retry . toString ( ) + "] will allow a subsequent retry for operation [" + op . getClass ( ) + "] on keyspace [" + op . getKeyspace ( ) + "] on pinned host[" + op . getPinnedHost ( ) + "]" ) ; } } while ( retry . allowRetry ( ) ) ; if ( context != null && lastException != null ) opsTracer . onException ( context , op , lastException ) ; retry . failure ( lastException ) ; throw lastException ; }
Executes the operation using failover and retry strategy
354
11
32,183
public BoundStatement getBoundStatement ( Q query , boolean useCaching ) { PreparedStatement pStatement = getPreparedStatement ( query , useCaching ) ; return bindValues ( pStatement , query ) ; }
Get the bound statement from the prepared statement
45
8
32,184
private Where addWhereClauseForRowRange ( String keyAlias , Select select , RowRange < ? > rowRange ) { Where where = null ; boolean keyIsPresent = false ; boolean tokenIsPresent = false ; if ( rowRange . getStartKey ( ) != null || rowRange . getEndKey ( ) != null ) { keyIsPresent = true ; } if ( rowRange . getStartToken ( ) != null || rowRange . getEndToken ( ) != null ) { tokenIsPresent = true ; } if ( keyIsPresent && tokenIsPresent ) { throw new RuntimeException ( "Cannot provide both token and keys for range query" ) ; } if ( keyIsPresent ) { if ( rowRange . getStartKey ( ) != null && rowRange . getEndKey ( ) != null ) { where = select . where ( gte ( keyAlias , BIND_MARKER ) ) . and ( lte ( keyAlias , BIND_MARKER ) ) ; } else if ( rowRange . getStartKey ( ) != null ) { where = select . where ( gte ( keyAlias , BIND_MARKER ) ) ; } else if ( rowRange . getEndKey ( ) != null ) { where = select . where ( lte ( keyAlias , BIND_MARKER ) ) ; } } else if ( tokenIsPresent ) { String tokenOfKey = "token(" + keyAlias + ")" ; if ( rowRange . getStartToken ( ) != null && rowRange . getEndToken ( ) != null ) { where = select . where ( gte ( tokenOfKey , BIND_MARKER ) ) . and ( lte ( tokenOfKey , BIND_MARKER ) ) ; } else if ( rowRange . getStartToken ( ) != null ) { where = select . where ( gte ( tokenOfKey , BIND_MARKER ) ) ; } else if ( rowRange . getEndToken ( ) != null ) { where = select . where ( lte ( tokenOfKey , BIND_MARKER ) ) ; } } else { where = select . where ( ) ; } if ( rowRange . getCount ( ) > 0 ) { // TODO: fix this //where.limit(rowRange.getCount()); } return where ; }
Private helper for constructing the where clause for row ranges
498
10
32,185
private void bindWhereClauseForRowRange ( List < Object > values , RowRange < ? > rowRange ) { boolean keyIsPresent = false ; boolean tokenIsPresent = false ; if ( rowRange . getStartKey ( ) != null || rowRange . getEndKey ( ) != null ) { keyIsPresent = true ; } if ( rowRange . getStartToken ( ) != null || rowRange . getEndToken ( ) != null ) { tokenIsPresent = true ; } if ( keyIsPresent && tokenIsPresent ) { throw new RuntimeException ( "Cannot provide both token and keys for range query" ) ; } if ( keyIsPresent ) { if ( rowRange . getStartKey ( ) != null ) { values . add ( rowRange . getStartKey ( ) ) ; } if ( rowRange . getEndKey ( ) != null ) { values . add ( rowRange . getEndKey ( ) ) ; } } else if ( tokenIsPresent ) { BigInteger startTokenB = rowRange . getStartToken ( ) != null ? new BigInteger ( rowRange . getStartToken ( ) ) : null ; BigInteger endTokenB = rowRange . getEndToken ( ) != null ? new BigInteger ( rowRange . getEndToken ( ) ) : null ; Long startToken = startTokenB . longValue ( ) ; Long endToken = endTokenB . longValue ( ) ; if ( startToken != null && endToken != null ) { if ( startToken != null ) { values . add ( startToken ) ; } if ( endToken != null ) { values . add ( endToken ) ; } } if ( rowRange . getCount ( ) > 0 ) { // TODO: fix this //where.limit(rowRange.getCount()); } return ; } }
Private helper for constructing the bind values for the given row range . Note that the assumption here is that we have a previously constructed prepared statement that we can bind these values with .
383
35
32,186
@ Override public OperationResult < R > tryOperation ( Operation < CL , R > operation ) throws ConnectionException { Operation < CL , R > filteredOperation = config . getOperationFilterFactory ( ) . attachFilter ( operation ) ; while ( true ) { attemptCounter ++ ; try { connection = borrowConnection ( filteredOperation ) ; startTime = System . currentTimeMillis ( ) ; OperationResult < R > result = connection . execute ( filteredOperation ) ; result . setAttemptsCount ( attemptCounter ) ; monitor . incOperationSuccess ( getCurrentHost ( ) , result . getLatency ( ) ) ; return result ; } catch ( Exception e ) { ConnectionException ce = ( e instanceof ConnectionException ) ? ( ConnectionException ) e : new UnknownException ( e ) ; try { informException ( ce ) ; monitor . incFailover ( ce . getHost ( ) , ce ) ; } catch ( ConnectionException ex ) { monitor . incOperationFailure ( getCurrentHost ( ) , ex ) ; throw ex ; } } finally { releaseConnection ( ) ; } } }
Basic impl that repeatedly borrows a conn and tries to execute the operation while maintaining metrics for success conn attempts failures and latencies for operation executions
223
28
32,187
@ Override public String getVersion ( ) throws ConnectionException { return connectionPool . executeWithFailover ( new AbstractOperationImpl < String > ( tracerFactory . newTracer ( CassandraOperationType . GET_VERSION ) ) { @ Override public String internalExecute ( Client client , ConnectionContext state ) throws Exception { return client . describe_version ( ) ; } } , config . getRetryPolicy ( ) . duplicate ( ) ) . getResult ( ) ; }
Get the version from the cluster
99
6
32,188
@ Override public Boolean call ( ) throws Exception { error . set ( null ) ; List < Callable < Boolean > > subtasks = Lists . newArrayList ( ) ; // We are iterating the entire ring using an arbitrary number of threads if ( this . concurrencyLevel != null || startToken != null || endToken != null ) { List < TokenRange > tokens = partitioner . splitTokenRange ( startToken == null ? partitioner . getMinToken ( ) : startToken , endToken == null ? partitioner . getMinToken ( ) : endToken , this . concurrencyLevel == null ? 1 : this . concurrencyLevel ) ; for ( TokenRange range : tokens ) { subtasks . add ( makeTokenRangeTask ( range . getStartToken ( ) , range . getEndToken ( ) ) ) ; } } // We are iterating through each token range else { List < TokenRange > ranges = keyspace . describeRing ( dc , rack ) ; for ( TokenRange range : ranges ) { if ( range . getStartToken ( ) . equals ( range . getEndToken ( ) ) ) subtasks . add ( makeTokenRangeTask ( range . getStartToken ( ) , range . getEndToken ( ) ) ) ; else subtasks . add ( makeTokenRangeTask ( partitioner . getTokenMinusOne ( range . getStartToken ( ) ) , range . getEndToken ( ) ) ) ; } } try { // Use a local executor if ( executor == null ) { ExecutorService localExecutor = Executors . newFixedThreadPool ( subtasks . size ( ) , new ThreadFactoryBuilder ( ) . setDaemon ( true ) . setNameFormat ( "AstyanaxAllRowsReader-%d" ) . build ( ) ) ; try { futures . addAll ( startTasks ( localExecutor , subtasks ) ) ; return waitForTasksToFinish ( ) ; } finally { localExecutor . shutdownNow ( ) ; } } // Use an externally provided executor else { futures . addAll ( startTasks ( executor , subtasks ) ) ; return waitForTasksToFinish ( ) ; } } catch ( Exception e ) { error . compareAndSet ( null , e ) ; LOG . warn ( "AllRowsReader terminated. " + e . getMessage ( ) , e ) ; cancel ( ) ; throw error . get ( ) ; } }
Main execution block for the all rows query .
515
9
32,189
protected XMLStreamReader createStreamReader ( InputStream input ) throws XMLStreamException { if ( inputFactory == null ) { inputFactory = XMLInputFactory . newInstance ( ) ; } return inputFactory . createXMLStreamReader ( input ) ; }
Get a new XML stream reader .
52
7
32,190
public ThriftType getThriftType ( Type javaType ) throws IllegalArgumentException { ThriftType thriftType = getThriftTypeFromCache ( javaType ) ; if ( thriftType == null ) { thriftType = buildThriftType ( javaType ) ; } return thriftType ; }
Gets the ThriftType for the specified Java type . The native Thrift type for the Java type will be inferred from the Java type and if necessary type coercions will be applied .
66
38
32,191
public < T extends Enum < T > > ThriftEnumMetadata < ? > getThriftEnumMetadata ( Class < ? > enumClass ) { ThriftEnumMetadata < ? > enumMetadata = enums . get ( enumClass ) ; if ( enumMetadata == null ) { enumMetadata = new ThriftEnumMetadataBuilder <> ( ( Class < T > ) enumClass ) . build ( ) ; ThriftEnumMetadata < ? > current = enums . putIfAbsent ( enumClass , enumMetadata ) ; if ( current != null ) { enumMetadata = current ; } } return enumMetadata ; }
Gets the ThriftEnumMetadata for the specified enum class . If the enum class contains a method annotated with
143
25
32,192
public < T > ThriftStructMetadata getThriftStructMetadata ( Type structType ) { ThriftStructMetadata structMetadata = structs . get ( structType ) ; Class < ? > structClass = TypeToken . of ( structType ) . getRawType ( ) ; if ( structMetadata == null ) { if ( structClass . isAnnotationPresent ( ThriftStruct . class ) ) { structMetadata = extractThriftStructMetadata ( structType ) ; } else if ( structClass . isAnnotationPresent ( ThriftUnion . class ) ) { structMetadata = extractThriftUnionMetadata ( structType ) ; } else { throw new IllegalStateException ( "getThriftStructMetadata called on a class that has no @ThriftStruct or @ThriftUnion annotation" ) ; } ThriftStructMetadata current = structs . putIfAbsent ( structType , structMetadata ) ; if ( current != null ) { structMetadata = current ; } } return structMetadata ; }
Gets the ThriftStructMetadata for the specified struct class . The struct class must be annotated with
219
22
32,193
private FieldDefinition declareTypeField ( ) { FieldDefinition typeField = new FieldDefinition ( a ( PRIVATE , FINAL ) , "type" , type ( ThriftType . class ) ) ; classDefinition . addField ( typeField ) ; // add constructor parameter to initialize this field parameters . add ( typeField , ThriftType . struct ( metadata ) ) ; return typeField ; }
Declares the private ThriftType field type .
81
10
32,194
private Map < Short , FieldDefinition > declareCodecFields ( ) { Map < Short , FieldDefinition > codecFields = new TreeMap <> ( ) ; for ( ThriftFieldMetadata fieldMetadata : metadata . getFields ( ) ) { if ( needsCodec ( fieldMetadata ) ) { ThriftCodec < ? > codec = codecManager . getCodec ( fieldMetadata . getThriftType ( ) ) ; String fieldName = fieldMetadata . getName ( ) + "Codec" ; FieldDefinition codecField = new FieldDefinition ( a ( PRIVATE , FINAL ) , fieldName , type ( codec . getClass ( ) ) ) ; classDefinition . addField ( codecField ) ; codecFields . put ( fieldMetadata . getId ( ) , codecField ) ; parameters . add ( codecField , codec ) ; } } return codecFields ; }
Declares a field for each delegate codec
192
8
32,195
private void defineConstructor ( ) { // // declare the constructor MethodDefinition constructor = new MethodDefinition ( a ( PUBLIC ) , "<init>" , type ( void . class ) , parameters . getParameters ( ) ) ; // invoke super (Object) constructor constructor . loadThis ( ) . invokeConstructor ( type ( Object . class ) ) ; // this.foo = foo; for ( FieldDefinition fieldDefinition : parameters . getFields ( ) ) { constructor . loadThis ( ) . loadVariable ( fieldDefinition . getName ( ) ) . putField ( codecType , fieldDefinition ) ; } // return; (implicit) constructor . ret ( ) ; classDefinition . addMethod ( constructor ) ; }
Defines the constructor with a parameter for the ThriftType and the delegate codecs . The constructor simply assigns these parameters to the class fields .
147
29
32,196
private void defineGetTypeMethod ( ) { classDefinition . addMethod ( new MethodDefinition ( a ( PUBLIC ) , "getType" , type ( ThriftType . class ) ) . loadThis ( ) . getField ( codecType , typeField ) . retObject ( ) ) ; }
Defines the getType method which simply returns the value of the type field .
61
16
32,197
private void defineReadStructMethod ( ) { MethodDefinition read = new MethodDefinition ( a ( PUBLIC ) , "read" , structType , arg ( "protocol" , TProtocol . class ) ) . addException ( Exception . class ) ; // TProtocolReader reader = new TProtocolReader(protocol); read . addLocalVariable ( type ( TProtocolReader . class ) , "reader" ) ; read . newObject ( TProtocolReader . class ) ; read . dup ( ) ; read . loadVariable ( "protocol" ) ; read . invokeConstructor ( type ( TProtocolReader . class ) , type ( TProtocol . class ) ) ; read . storeVariable ( "reader" ) ; // read all of the data in to local variables Map < Short , LocalVariableDefinition > structData = readFieldValues ( read ) ; // build the struct LocalVariableDefinition result = buildStruct ( read , structData ) ; // push instance on stack, and return it read . loadVariable ( result ) . retObject ( ) ; classDefinition . addMethod ( read ) ; }
Defines the read method for a struct .
233
9
32,198
private void injectStructFields ( MethodDefinition read , LocalVariableDefinition instance , Map < Short , LocalVariableDefinition > structData ) { for ( ThriftFieldMetadata field : metadata . getFields ( THRIFT_FIELD ) ) { injectField ( read , field , instance , structData . get ( field . getId ( ) ) ) ; } }
Defines the code to inject data into the struct public fields .
75
13
32,199
private void injectStructMethods ( MethodDefinition read , LocalVariableDefinition instance , Map < Short , LocalVariableDefinition > structData ) { for ( ThriftMethodInjection methodInjection : metadata . getMethodInjections ( ) ) { injectMethod ( read , methodInjection , instance , structData ) ; } }
Defines the code to inject data into the struct methods .
66
12