idx int64 0 41.2k | question stringlengths 83 4.15k | target stringlengths 5 715 |
|---|---|---|
7,000 | public Set < String > getPermissionsForRole ( RoleIdentifier id ) { checkNotNull ( id , "id" ) ; return _delegate . getPermissionsForRole ( id ) ; } | Although this implementation overrides specific roles the permissions associated with them are still managed by the permission manager so always defer to the delegate to read permissions . |
7,001 | private static ByteBuffer trim ( ByteBuffer buf ) { if ( buf . capacity ( ) <= 4 * buf . remaining ( ) ) { return buf ; } else { ByteBuffer clone = ByteBuffer . allocate ( buf . remaining ( ) ) ; buf . get ( clone . array ( ) ) ; return clone ; } } | Serialized Astyanax Composite objects use a lot of memory . Trim it down . |
7,002 | private void verifyPermission ( Subject subject , String permission ) throws UnauthorizedException { if ( ! subject . hasPermission ( permission ) ) { throw new UnauthorizedException ( ) ; } } | Verifies whether the user has a specific permission . If not it throws a standard UnauthorizedException . |
7,003 | private void verifyPermissionToGrantRoles ( Subject subject , Iterable < RoleIdentifier > roleIds ) { Set < RoleIdentifier > unauthorizedIds = Sets . newTreeSet ( ) ; boolean anyAuthorized = false ; for ( RoleIdentifier roleId : roleIds ) { if ( subject . hasPermission ( Permissions . grantRole ( roleId ) ) ) { anyAuthorized = true ; } else { unauthorizedIds . add ( roleId ) ; } } if ( ! unauthorizedIds . isEmpty ( ) ) { if ( ! anyAuthorized ) { throw new UnauthorizedException ( ) ; } else { throw new UnauthorizedException ( "Not authorized for roles: " + Joiner . on ( ", " ) . join ( unauthorizedIds ) ) ; } } } | Verifies whether the user has permission to grant all of the provided roles . If not it throws an UnauthorizedException . |
7,004 | private RuntimeException convertUncheckedException ( Exception e ) { if ( Throwables . getRootCause ( e ) instanceof TimeoutException ) { _lockTimeoutMeter . mark ( ) ; throw new ServiceUnavailableException ( "Failed to acquire update lock, try again later" , new Random ( ) . nextInt ( 5 ) + 1 ) ; } throw Throwables . propagate ( e ) ; } | Converts unchecked exceptions to appropriate API exceptions . Specifically if the subsystem fails to acquire the synchronization lock for a non - read operation it will throw a TimeoutException . This method converts that to a ServiceUnavailableException . All other exceptions are rethrown as - is . |
7,005 | private void ensureTextSet ( ) { if ( _text . limit ( ) == 0 ) { checkState ( _map != null , "Neither JSON text nor map has been set" ) ; _text . clear ( ) ; try { JsonHelper . writeJson ( new ByteBufferOutputStream ( _text ) , _map ) ; _text . flip ( ) ; } catch ( Exception e ) { if ( Iterables . tryFind ( Throwables . getCausalChain ( e ) , Predicates . instanceOf ( BufferOverflowException . class ) ) . isPresent ( ) ) { byte [ ] utf8 = JsonHelper . asUtf8Bytes ( _map ) ; _text = ByteBuffer . wrap ( utf8 ) ; } else { throw Throwables . propagate ( e ) ; } } } } | Ensures that either the UTF - 8 text has been set directly or by indirectly converting the Map contents to JSON . |
7,006 | public void readFields ( DataInput in ) throws IOException { int length = WritableUtils . readVInt ( in ) ; if ( length > _text . capacity ( ) ) { _text = ByteBuffer . allocate ( length ) ; } in . readFully ( _text . array ( ) , 0 , length ) ; _text . position ( 0 ) ; _text . limit ( length ) ; _map = null ; } | Sets this instance s content from the input . |
7,007 | public void write ( DataOutput out ) throws IOException { ensureTextSet ( ) ; WritableUtils . writeVInt ( out , _text . limit ( ) ) ; out . write ( _text . array ( ) , 0 , _text . limit ( ) ) ; } | Writes this instance s content to the output . Note that the format for Row is identical to Text so even though the classes are unrelated a Text object can read the bytes written by a Row as a JSON string . |
7,008 | public StashReader getLockedView ( ) { return new FixedStashReader ( URI . create ( String . format ( "s3://%s/%s" , _bucket , getRootPath ( ) ) ) , _s3 ) ; } | Returns a new StashReader that is locked to the same stash time the instance is currently using . Future calls to lock or unlock the stash time on this instance will not affect the returned instance . |
7,009 | private ScanPlan createPlan ( String scanId , ScanOptions options ) { ScanPlan plan = new ScanPlan ( scanId , options ) ; for ( String placement : options . getPlacements ( ) ) { String cluster = _dataTools . getPlacementCluster ( placement ) ; ScanRangeSplits scanRangeSplits = _dataTools . getScanRangeSplits ( placement , options . getRangeScanSplitSize ( ) , Optional . < ScanRange > absent ( ) ) ; if ( ! options . isScanByAZ ( ) ) { scanRangeSplits = scanRangeSplits . combineGroups ( ) ; } for ( ScanRangeSplits . SplitGroup splitGroup : scanRangeSplits . getSplitGroups ( ) ) { plan . startNewBatchForCluster ( cluster ) ; for ( ScanRangeSplits . TokenRange tokenRange : splitGroup . getTokenRanges ( ) ) { plan . addTokenRangeToCurrentBatchForCluster ( cluster , placement , tokenRange . getScanRanges ( ) ) ; } } } return plan ; } | Returns a ScanPlan based on the Cassandra rings and token ranges . |
7,010 | public ScanStatus resubmitWorkflowTasks ( String scanId ) { ScanStatus status = _scanStatusDAO . getScanStatus ( scanId ) ; if ( status == null ) { return null ; } if ( status . getCompleteTime ( ) == null ) { for ( ScanRangeStatus active : status . getActiveScanRanges ( ) ) { _scanWorkflow . addScanRangeTask ( scanId , active . getTaskId ( ) , active . getPlacement ( ) , active . getScanRange ( ) ) ; } _scanWorkflow . scanStatusUpdated ( scanId ) ; } return status ; } | Sometimes due to unexpected errors while submitting scan ranges to the underlying queues a scan can get stuck . This method takes all available tasks for a scan and resubmits them . This method is safe because the underlying system is resilient to task resubmissions and concurrent work on the same task . |
7,011 | public Instant getNextExecutionTimeAfter ( Instant now ) { OffsetTime timeOfDay = OffsetTime . from ( TIME_OF_DAY_FORMAT . parse ( getTimeOfDay ( ) ) ) ; Instant nextExecTime = now . atOffset ( ZoneOffset . UTC ) . with ( timeOfDay ) . toInstant ( ) ; while ( nextExecTime . isBefore ( now ) ) { nextExecTime = nextExecTime . plus ( Duration . ofDays ( 1 ) ) ; } return nextExecTime ; } | Gets the first execution time for the given scan and upload which is at or after now . |
7,012 | public ScanWriter createScanWriter ( final int taskId , Set < ScanDestination > destinations ) { checkArgument ( ! destinations . isEmpty ( ) , "destinations.isEmpty()" ) ; if ( destinations . size ( ) == 1 ) { return createScanWriter ( taskId , Iterables . getOnlyElement ( destinations ) ) ; } return new MultiScanWriter ( ImmutableList . copyOf ( Iterables . transform ( destinations , new Function < ScanDestination , ScanWriter > ( ) { public ScanWriter apply ( ScanDestination destination ) { return createScanWriter ( taskId , destination ) ; } } ) ) ) ; } | Creates a scan writer from the given desintations . |
7,013 | public ScanWriter createScanWriter ( int taskId , ScanDestination destination ) { if ( destination . isDiscarding ( ) ) { return _scanWriterFactory . createDiscardingScanWriter ( taskId , Optional . < Integer > absent ( ) ) ; } URI uri = destination . getUri ( ) ; String scheme = uri . getScheme ( ) ; if ( "file" . equals ( scheme ) ) { return _scanWriterFactory . createFileScanWriter ( taskId , uri , Optional . < Integer > absent ( ) ) ; } if ( "s3" . equals ( scheme ) ) { return _scanWriterFactory . createS3ScanWriter ( taskId , uri , Optional . < Integer > absent ( ) ) ; } throw new IllegalArgumentException ( "Unsupported destination: " + destination ) ; } | Creates a scan writer for the given destination . |
7,014 | public < K , C > ColumnMutation prepareColumnMutation ( ColumnFamily < K , C > cf , K rowKey , C column , ConsistencyLevel consistency ) { return _astyanaxKeyspace . prepareColumnMutation ( cf , rowKey , column ) . setConsistencyLevel ( clamp ( consistency ) ) ; } | Mutation for a single column . |
7,015 | private String getMismatchedPartitioner ( Class < ? extends IPartitioner > expectedPartitioner ) { String partitioner = null ; try { partitioner = _astyanaxKeyspace . describePartitioner ( ) ; boolean matches = CassandraPartitioner . fromClass ( partitioner ) . matches ( expectedPartitioner . getName ( ) ) ; if ( matches ) { return null ; } else { return partitioner ; } } catch ( ConnectionException e ) { throw Throwables . propagate ( e ) ; } catch ( IllegalArgumentException e ) { return partitioner ; } } | Returns the actual partitioner in use by Cassandra if it does not match the expected partitioner null if it matches . |
7,016 | public synchronized SlabAllocation attachAndAllocate ( SlabRef slab , PeekingIterator < Integer > eventSizes ) { attach ( slab ) ; return allocate ( eventSizes ) ; } | Attaches a slab and allocates from it in a single atomic operation . |
7,017 | public synchronized void attach ( SlabRef slab ) { checkState ( ! isAttached ( ) ) ; _slab = slab ; _slabConsumed = 0 ; _slabBytesConsumed = 0 ; _slabExpiresAt = System . currentTimeMillis ( ) + Constants . SLAB_ROTATE_TTL . toMillis ( ) ; } | Attaches a new slab to the channel with the specified capacity . |
7,018 | public synchronized SlabRef detach ( ) { if ( ! isAttached ( ) ) { return null ; } SlabRef slab = _slab ; _slab = null ; _slabExpiresAt = 0 ; return slab ; } | Detaches a slab from the channel and returns it to the caller to dispose of . |
7,019 | private MaintenanceOp getNextMaintenanceOp ( final TableJson json , boolean includeTask ) { if ( json . isDeleted ( ) || json . getStorages ( ) . isEmpty ( ) ) { return null ; } MaintenanceOp op = NULLS_LAST . min ( Iterables . transform ( json . getStorages ( ) , new Function < Storage , MaintenanceOp > ( ) { public MaintenanceOp apply ( Storage storage ) { return getNextMaintenanceOp ( json , storage ) ; } } ) ) ; if ( op != null && ! includeTask ) { op . clearTask ( ) ; } return op ; } | Returns the next maintenance operation that should be performed on the specified table . |
7,020 | public boolean checkFacadeAllowed ( String table , FacadeOptions options ) throws FacadeExistsException { return checkFacadeAllowed ( readTableJson ( table , true ) , options . getPlacement ( ) , null ) ; } | Returns true if facade may be created for the specified table and placement . Throws an exception if a facade is not allowed because of a conflict with the master or another facade . Returns false if there is already a facade at the specified placement so facade creation would be idempotent . |
7,021 | private void purgeData ( TableJson json , Storage storage , int iteration , Runnable progress ) { _log . info ( "Purging data for table '{}' and table uuid '{}' (facade={}, iteration={})." , json . getTable ( ) , storage . getUuidString ( ) , storage . isFacade ( ) , iteration ) ; Runnable rateLimitedProgress = rateLimited ( _placementCache . get ( storage . getPlacement ( ) ) , progress ) ; audit ( json . getTable ( ) , "doPurgeData" + iteration , new AuditBuilder ( ) . set ( "_uuid" , storage . getUuidString ( ) ) . set ( "_placement" , storage . getPlacement ( ) ) . build ( ) ) ; _dataPurgeDAO . purge ( newAstyanaxStorage ( storage , json . getTable ( ) ) , rateLimitedProgress ) ; } | Purge a dropped table or facade . Executed twice once for fast cleanup and again much later for stragglers . |
7,022 | private void deleteFinal ( TableJson json , Storage storage ) { Delta delta = json . newDeleteStorage ( storage ) ; Audit audit = new AuditBuilder ( ) . set ( "_op" , "doDeleteFinal" ) . set ( "_uuid" , storage . getUuidString ( ) ) . build ( ) ; updateTableMetadata ( json . getTable ( ) , delta , audit , InvalidationScope . LOCAL ) ; } | Last step in dropping a table or facade . |
7,023 | private void moveCancel ( TableJson json , Storage src , Storage dest ) { Delta delta = json . newMoveCancel ( src ) ; Audit audit = new AuditBuilder ( ) . set ( "_op" , "doMoveCancel" ) . set ( "_srcUuid" , src . getUuidString ( ) ) . set ( "_srcPlacement" , src . getPlacement ( ) ) . set ( "_destUuid" , dest . getUuidString ( ) ) . set ( "_destPlacement" , dest . getPlacement ( ) ) . build ( ) ; updateTableMetadata ( json . getTable ( ) , delta , audit , InvalidationScope . GLOBAL ) ; } | Cancel a move before mirror promotion has taken place . |
7,024 | Delta newUnpublishedDatabusEventUpdate ( String tableName , String updateType , String datetime ) { return Deltas . mapBuilder ( ) . update ( "tables" , Deltas . setBuilder ( ) . add ( ImmutableMap . of ( "table" , tableName , "date" , datetime , "event" , updateType ) ) . build ( ) ) . build ( ) ; } | Delta for storing the unpublished databus events . |
7,025 | private boolean conditionsEqual ( Collection < Condition > conditions ) { if ( conditions . size ( ) != _conditions . size ( ) ) { return false ; } List < Condition > unvalidatedConditions = new ArrayList < > ( conditions ) ; for ( Condition condition : _conditions ) { if ( ! unvalidatedConditions . remove ( condition ) ) { return false ; } } return true ; } | The order of the conditions is irrelevant just check the set is the same . |
7,026 | public CloseableDataStore getDataStore ( URI location , String apiKey , MetricRegistry metricRegistry ) throws IOException { String id = LocationUtil . getDataStoreIdentifier ( location , apiKey ) ; CloseableDataStore dataStore = null ; while ( dataStore == null ) { DataStoreMonitor dataStoreMonitor = _dataStoreByLocation . get ( id ) ; if ( dataStoreMonitor == null || ( dataStore = dataStoreMonitor . getDataStore ( ) ) == null ) { CloseableDataStore unmonitoredDataStore ; switch ( LocationUtil . getLocationType ( location ) ) { case EMO_HOST_DISCOVERY : unmonitoredDataStore = createDataStoreWithHostDiscovery ( location , apiKey , metricRegistry ) ; break ; case EMO_URL : unmonitoredDataStore = createDataStoreWithUrl ( location , apiKey , metricRegistry ) ; break ; default : throw new IllegalArgumentException ( "Location does not use a data store: " + location ) ; } dataStoreMonitor = new DataStoreMonitor ( id , unmonitoredDataStore ) ; if ( _dataStoreByLocation . putIfAbsent ( id , dataStoreMonitor ) != null ) { dataStoreMonitor . closeNow ( ) ; } else { dataStore = dataStoreMonitor . getDataStore ( ) ; } } } return dataStore ; } | Returns a DataStore for a given location . If a cached instance already exists its reference count is incremented and returned otherwise a new instance is created and cached . |
7,027 | private MultiThreadedServiceFactory < AuthDataStore > createDataStoreServiceFactory ( String cluster , MetricRegistry metricRegistry ) { HttpClientConfiguration clientConfig = new HttpClientConfiguration ( ) ; clientConfig . setKeepAlive ( Duration . seconds ( 1 ) ) ; clientConfig . setConnectionTimeout ( Duration . seconds ( 10 ) ) ; clientConfig . setTimeout ( Duration . minutes ( 5 ) ) ; return DataStoreClientFactory . forClusterAndHttpConfiguration ( cluster , clientConfig , metricRegistry ) ; } | Creates a ServiceFactory for a cluster with reasonable configurations . |
7,028 | private ResultSet columnScan ( DeltaPlacement placement , TableDDL tableDDL , ByteBuffer rowKey , Range < RangeTimeUUID > columnRange , boolean ascending , ConsistencyLevel consistency ) { Select . Where where = ( tableDDL == placement . getBlockedDeltaTableDDL ( ) ? selectDeltaFrom ( placement . getBlockedDeltaTableDDL ( ) ) : selectFrom ( tableDDL ) ) . where ( eq ( tableDDL . getRowKeyColumnName ( ) , rowKey ) ) ; if ( columnRange . hasLowerBound ( ) ) { if ( columnRange . lowerBoundType ( ) == BoundType . CLOSED ) { where = where . and ( gte ( tableDDL . getChangeIdColumnName ( ) , columnRange . lowerEndpoint ( ) . getUuid ( ) ) ) ; } else { where = where . and ( gt ( tableDDL . getChangeIdColumnName ( ) , columnRange . lowerEndpoint ( ) . getUuid ( ) ) ) ; } } if ( columnRange . hasUpperBound ( ) ) { if ( columnRange . upperBoundType ( ) == BoundType . CLOSED ) { where = where . and ( lte ( tableDDL . getChangeIdColumnName ( ) , columnRange . upperEndpoint ( ) . getUuid ( ) ) ) ; } else { where = where . and ( lt ( tableDDL . getChangeIdColumnName ( ) , columnRange . upperEndpoint ( ) . getUuid ( ) ) ) ; } } Statement statement = where . orderBy ( ascending ? asc ( tableDDL . getChangeIdColumnName ( ) ) : desc ( tableDDL . getChangeIdColumnName ( ) ) ) . setConsistencyLevel ( consistency ) ; return AdaptiveResultSet . executeAdaptiveQuery ( placement . getKeyspace ( ) . getCqlSession ( ) , statement , _driverConfig . getSingleRowFetchSize ( ) ) ; } | Reads columns from the delta or delta history table . The range of columns order and limit can be parameterized . |
7,029 | private boolean moveParserToField ( JsonParser parser , String path ) throws IOException { List < String > segments = getFieldPath ( path ) ; for ( String segment : segments ) { if ( parser . getCurrentToken ( ) != JsonToken . START_OBJECT ) { return false ; } boolean found = false ; JsonToken currentToken = parser . nextToken ( ) ; while ( ! found && currentToken != JsonToken . END_OBJECT ) { if ( currentToken != JsonToken . FIELD_NAME ) { throw new IOException ( "Field not found at expected location" ) ; } String fieldName = parser . getText ( ) ; if ( fieldName . equals ( segment ) ) { found = true ; currentToken = parser . nextToken ( ) ; } else { parser . nextValue ( ) ; currentToken = skipValue ( parser ) ; } } if ( ! found ) { return false ; } } return true ; } | Don t materialize the entire parser content do a targeted search for the value that matches the path . |
7,030 | static ByteBuffer getRowKeyRaw ( int shardId , long tableUuid , byte [ ] contentKeyBytes ) { checkArgument ( shardId >= 0 && shardId < 256 ) ; ByteBuffer rowKey = ByteBuffer . allocate ( 9 + contentKeyBytes . length ) ; rowKey . put ( ( byte ) shardId ) ; rowKey . putLong ( tableUuid ) ; rowKey . put ( contentKeyBytes ) ; rowKey . flip ( ) ; return rowKey ; } | Constructs a row key when the row s shard ID is already known which is rare . Generally this is used for range queries to construct the lower or upper bound for a query so it doesn t necessarily need to produce a valid row key . |
7,031 | private List < ClaimedTask > claimMigrationRangeTasks ( int max ) { try { Date claimTime = new Date ( ) ; List < ScanRangeTask > migrationRangeTasks = _workflow . claimScanRangeTasks ( max , QUEUE_CLAIM_TTL ) ; if ( migrationRangeTasks . isEmpty ( ) ) { return ImmutableList . of ( ) ; } List < ClaimedTask > newlyClaimedTasks = Lists . newArrayListWithCapacity ( migrationRangeTasks . size ( ) ) ; for ( ScanRangeTask task : migrationRangeTasks ) { final ClaimedTask claimedTask = new ClaimedTask ( task , claimTime ) ; boolean alreadyClaimed = _claimedTasks . putIfAbsent ( task . getId ( ) , claimedTask ) != null ; if ( alreadyClaimed ) { _log . warn ( "Workflow returned migration range task that is already claimed: {}" , task ) ; } else { _log . info ( "Claimed migration range task: {}" , task ) ; newlyClaimedTasks . add ( claimedTask ) ; _backgroundService . schedule ( new Runnable ( ) { public void run ( ) { validateClaimedTaskHasStarted ( claimedTask ) ; } } , CLAIM_START_TIMEOUT . toMillis ( ) , TimeUnit . MILLISECONDS ) ; } } return newlyClaimedTasks ; } catch ( Exception e ) { _log . error ( "Failed to start next available migration range" , e ) ; return ImmutableList . of ( ) ; } } | Claims migration range tasks that have been queued by the leader and are ready to scan . |
7,032 | static Storage initializeGroup ( Collection < Storage > group ) { List < Storage > sorted = Ordering . natural ( ) . immutableSortedCopy ( group ) ; Storage primary = sorted . get ( 0 ) ; if ( ! primary . isConsistent ( ) ) { return null ; } for ( Storage storage : sorted ) { storage . _group = sorted ; } return primary ; } | Post - construction initialization links all non - dropped members of group together . Returns the primary member of the group other members are mirrors . |
7,033 | Storage getMoveTo ( ) { String destUuid = get ( MOVE_TO ) ; return destUuid != null ? find ( getMirrors ( ) , destUuid ) : null ; } | Move - related properties |
7,034 | public int compareTo ( Storage o ) { return ComparisonChain . start ( ) . compareTrueFirst ( isConsistent ( ) , o . isConsistent ( ) ) . compareTrueFirst ( _masterPrimary , o . _masterPrimary ) . compare ( o . getPromotionId ( ) , getPromotionId ( ) , TimeUUIDs . ordering ( ) . nullsLast ( ) ) . compare ( _uuid , o . _uuid ) . result ( ) ; } | Storage objects sort such that primaries sort first mirrors after . |
7,035 | private void renewClaimedTasks ( ) { try { List < ClaimedTask > claimedTasks = ImmutableList . copyOf ( _claimedTasks . values ( ) ) ; List < ScanRangeTask > tasks = Lists . newArrayList ( ) ; for ( ClaimedTask claimedTask : claimedTasks ) { if ( claimedTask . isComplete ( ) ) { _log . info ( "Complete claimed task found during renew: id={}" , claimedTask . getTaskId ( ) ) ; _claimedTasks . remove ( claimedTask . getTaskId ( ) ) ; } else if ( claimedTask . isStarted ( ) ) { tasks . add ( claimedTask . getTask ( ) ) ; } } if ( ! tasks . isEmpty ( ) ) { _scanWorkflow . renewScanRangeTasks ( tasks , QUEUE_RENEW_TTL ) ; for ( ScanRangeTask task : tasks ) { _log . info ( "Renewed scan range task: {}" , task ) ; } } } catch ( Exception e ) { _log . error ( "Failed to renew scan ranges" , e ) ; } } | Renews all claimed scan range tasks that have not been released . Unless this is called periodically the scan workflow will make this task available to be claimed again . |
7,036 | private void unclaimTask ( ClaimedTask claimedTask , boolean releaseTask ) { _claimedTasks . remove ( claimedTask . getTaskId ( ) ) ; claimedTask . setComplete ( true ) ; if ( releaseTask ) { try { _scanWorkflow . releaseScanRangeTask ( claimedTask . getTask ( ) ) ; _log . info ( "Released scan range task: {}" , claimedTask . getTask ( ) ) ; } catch ( Exception e ) { _log . error ( "Failed to release scan range" , e ) ; } } } | Unclaims a previously claimed task . Effectively this stops the renewing the task and if releaseTask is true removes the task permanently from the workflow queue . |
7,037 | private boolean asyncRangeScan ( ScanRangeTask task ) { final String scanId = task . getScanId ( ) ; final int taskId = task . getId ( ) ; final String placement = task . getPlacement ( ) ; final ScanRange range = task . getRange ( ) ; RangeScanUploaderResult result ; try { ScanStatus status = _scanStatusDAO . getScanStatus ( scanId ) ; if ( status . isCanceled ( ) ) { _log . info ( "Ignoring scan range from canceled task: [task={}]" , task ) ; return true ; } ScanRangeStatus completedStatus = Iterables . getOnlyElement ( Iterables . filter ( status . getCompleteScanRanges ( ) , new Predicate < ScanRangeStatus > ( ) { public boolean apply ( ScanRangeStatus rangeStatus ) { return rangeStatus . getTaskId ( ) == taskId ; } } ) , null ) ; if ( completedStatus != null ) { _log . info ( "Ignoring duplicate post of completed scan range task: [task={}, completeTime={}]" , task , completedStatus . getScanCompleteTime ( ) ) ; return true ; } _log . info ( "Started scan range task: {}" , task ) ; _scanStatusDAO . setScanRangeTaskActive ( scanId , taskId , new Date ( ) ) ; result = _rangeScanUploader . scanAndUpload ( scanId , taskId , status . getOptions ( ) , placement , range , status . getCompactionControlTime ( ) ) ; _log . info ( "Completed scan range task: {}" , task ) ; } catch ( Throwable t ) { _log . error ( "Scan range task failed: {}" , task , t ) ; result = RangeScanUploaderResult . failure ( ) ; } try { switch ( result . getStatus ( ) ) { case SUCCESS : _scanStatusDAO . setScanRangeTaskComplete ( scanId , taskId , new Date ( ) ) ; break ; case FAILURE : _scanStatusDAO . setScanRangeTaskInactive ( scanId , taskId ) ; break ; case REPSPLIT : ScanRange completedRange = ScanRange . create ( range . getFrom ( ) , result . getResplitRange ( ) . getFrom ( ) ) ; _scanStatusDAO . setScanRangeTaskPartiallyComplete ( scanId , taskId , completedRange , result . getResplitRange ( ) , new Date ( ) ) ; break ; } } catch ( Throwable t ) { _log . error ( "Failed to mark scan range result: [id={}, placement={}, range={}, result={}]" , scanId , placement , range , result , t ) ; return false ; } return true ; } | Performs a range scan and updates the global scan status with the scan result . |
7,038 | protected Set < String > getUpdatedRolesFrom ( Set < String > roles ) { Set < String > updatedRoles = Sets . newHashSet ( roles ) ; updatedRoles . addAll ( _rolesAdded ) ; updatedRoles . removeAll ( _rolesRemoved ) ; return updatedRoles ; } | Helper method for subclasses which given a set of roles returns a new set of roles with all added and removed roles from this modification applied . |
7,039 | public static String getString ( ByteBuffer buf , Charset encoding ) { return getString ( buf , 0 , buf . remaining ( ) , encoding ) ; } | Converts all remaining bytes in the buffer a String using the specified encoding . Does not move the buffer position . |
7,040 | public static String getString ( ByteBuffer buf , int offset , int length , Charset encoding ) { buf = buf . duplicate ( ) ; buf . position ( buf . position ( ) + offset ) ; if ( buf . hasArray ( ) ) { return new String ( buf . array ( ) , buf . arrayOffset ( ) + buf . position ( ) , length , encoding ) ; } else { byte [ ] bytes = new byte [ length ] ; buf . get ( bytes ) ; return new String ( bytes , encoding ) ; } } | Converts the specified number of bytes in the buffer and converts them to a String using the specified encoding . Does not move the buffer position . |
7,041 | public void addThrottle ( AdHocThrottleEndpoint endpoint , AdHocThrottle throttle ) { checkNotNull ( throttle , "throttle" ) ; String key = endpoint . toString ( ) ; try { if ( throttle . isUnlimited ( ) ) { _throttleMap . remove ( key ) ; } _throttleMap . set ( key , throttle ) ; } catch ( Exception e ) { throw Throwables . propagate ( e ) ; } } | Adds a throttle for an HTTP method and path . |
7,042 | public void removeThrottle ( AdHocThrottleEndpoint endpoint ) { try { _throttleMap . remove ( endpoint . toString ( ) ) ; } catch ( Exception e ) { _log . warn ( "Failed to remove throttle for {} {}" , endpoint . getMethod ( ) , endpoint . getPath ( ) , e ) ; } } | Removes the throttle for an HTTP method and path . This effectively allows umlimited concurrency . |
7,043 | public AdHocThrottle getThrottle ( AdHocThrottleEndpoint endpoint ) { String key = endpoint . toString ( ) ; AdHocThrottle throttle = _throttleMap . get ( key ) ; if ( throttle == null ) { throttle = AdHocThrottle . unlimitedInstance ( ) ; } else if ( throttle . getExpiration ( ) . isBefore ( Instant . now ( ) ) ) { if ( throttle . getExpiration ( ) . isBefore ( Instant . now ( ) . minus ( Duration . ofDays ( 1 ) ) ) ) { try { _throttleMap . remove ( key ) ; } catch ( Exception e ) { _log . warn ( "Failed to remove expired throttle for {} {}" , endpoint . getMethod ( ) , endpoint . getPath ( ) , e ) ; } } throttle = AdHocThrottle . unlimitedInstance ( ) ; } return throttle ; } | Returns the throttle in effect for the given HTTP method and path . This method is always guaranteed to return a non - null throttle that is not expired . If no throttle has been configured or if the configured throttle has expired then it will return an unlimited throttle with an effectively infinite expiration date . |
7,044 | public static LocationType getLocationType ( URI location ) { String scheme = location . getScheme ( ) ; if ( EMODB_SCHEME . equals ( scheme ) ) { if ( LOCATOR_PATTERN . matcher ( location . getHost ( ) ) . matches ( ) ) { return LocationType . EMO_HOST_DISCOVERY ; } else { return LocationType . EMO_URL ; } } else if ( STASH_SCHEME . equals ( scheme ) ) { return LocationType . STASH ; } throw new IllegalArgumentException ( "Invalid location: " + location ) ; } | Returns the location type from a location URI . |
7,045 | public static String getDataStoreIdentifier ( URI location , String apiKey ) { checkArgument ( getLocationType ( location ) != LocationType . STASH , "Stash locations do not have a data source ID" ) ; UriBuilder uriBuilder = UriBuilder . fromUri ( location ) . userInfo ( apiKey ) . replacePath ( null ) . replaceQuery ( null ) ; if ( getLocationType ( location ) == LocationType . EMO_HOST_DISCOVERY ) { Optional < String > zkConnectionStringOverride = getZkConnectionStringOverride ( location ) ; if ( zkConnectionStringOverride . isPresent ( ) ) { uriBuilder . queryParam ( ZK_CONNECTION_STRING_PARAM , zkConnectionStringOverride . get ( ) ) ; } Optional < List < String > > hosts = getHostOverride ( location ) ; if ( hosts . isPresent ( ) ) { for ( String host : hosts . get ( ) ) { uriBuilder . queryParam ( HOST_PARAM , host ) ; } } } return uriBuilder . build ( ) . toString ( ) ; } | Converts a location URI to a data source identifier . |
7,046 | public static Optional < CuratorFramework > getCuratorForLocation ( URI location ) { final String defaultConnectionString ; final String namespace ; if ( getLocationType ( location ) != LocationType . EMO_HOST_DISCOVERY ) { return Optional . absent ( ) ; } if ( getHostOverride ( location ) . isPresent ( ) ) { return Optional . absent ( ) ; } Matcher matcher = getLocatorMatcher ( location ) ; checkArgument ( matcher . matches ( ) , "Invalid location: %s" , location ) ; if ( matcher . group ( "universe" ) != null ) { String universe = matcher . group ( "universe" ) ; Region region = getRegion ( Objects . firstNonNull ( matcher . group ( "region" ) , DEFAULT_REGION ) ) ; namespace = format ( "%s/%s" , universe , region ) ; defaultConnectionString = DEFAULT_ZK_CONNECTION_STRING ; } else { namespace = null ; defaultConnectionString = DEFAULT_LOCAL_ZK_CONNECTION_STRING ; } String connectionString = getZkConnectionStringOverride ( location ) . or ( defaultConnectionString ) ; CuratorFramework curator = CuratorFrameworkFactory . builder ( ) . ensembleProvider ( new ResolvingEnsembleProvider ( connectionString ) ) . retryPolicy ( new BoundedExponentialBackoffRetry ( 100 , 1000 , 10 ) ) . threadFactory ( new ThreadFactoryBuilder ( ) . setNameFormat ( "emo-zookeeper-%d" ) . build ( ) ) . namespace ( namespace ) . build ( ) ; curator . start ( ) ; return Optional . of ( curator ) ; } | Returns a configured started Curator for a given location or absent if the location does not use host discovery . |
7,047 | public static String getClusterForLocation ( URI location ) { Matcher matcher = getLocatorMatcher ( location ) ; checkArgument ( matcher . matches ( ) , "Invalid location: %s" , location ) ; final String clusterPrefix ; if ( matcher . group ( "universe" ) != null ) { clusterPrefix = matcher . group ( "universe" ) ; } else { clusterPrefix = "local" ; } String group = Objects . firstNonNull ( matcher . group ( "group" ) , DEFAULT_GROUP ) ; return format ( "%s_%s" , clusterPrefix , group ) ; } | Returns the EmoDB cluster name associated with the given location . |
7,048 | public static URI toLocation ( String source , String table ) { URI sourceUri = URI . create ( source ) ; return toLocation ( sourceUri , table ) ; } | Returns a location URI from a source and table name . |
7,049 | public static URI toLocation ( URI sourceUri , String table ) { getLocationType ( sourceUri ) ; return UriBuilder . fromUri ( sourceUri ) . path ( table ) . build ( ) ; } | Returns a Location URI from a source URI and table name . |
7,050 | public String replayAsync ( String apiKey , String subscription ) { return replayAsyncSince ( apiKey , subscription , null ) ; } | Any server can initiate a replay request no need for |
7,051 | public String moveAsync ( String apiKey , String from , String to ) { checkNotNull ( from , "from" ) ; checkNotNull ( to , "to" ) ; try { URI uri = _databus . clone ( ) . segment ( "_move" ) . queryParam ( "from" , from ) . queryParam ( "to" , to ) . build ( ) ; Map < String , Object > response = _client . resource ( uri ) . header ( ApiKeyRequest . AUTHENTICATION_HEADER , apiKey ) . post ( new TypeReference < Map < String , Object > > ( ) { } , null ) ; return response . get ( "id" ) . toString ( ) ; } catch ( EmoClientException e ) { throw convertException ( e ) ; } } | Any server can initiate a move request no need for |
7,052 | public MoveSubscriptionStatus getMoveStatus ( String apiKey , String reference ) { checkNotNull ( reference , "reference" ) ; try { URI uri = _databus . clone ( ) . segment ( "_move" ) . segment ( reference ) . build ( ) ; return _client . resource ( uri ) . header ( ApiKeyRequest . AUTHENTICATION_HEADER , apiKey ) . get ( MoveSubscriptionStatus . class ) ; } catch ( EmoClientException e ) { throw convertException ( e ) ; } } | Any server can get the move status no need for |
7,053 | private Map < String , Object > deserialized ( ) { DeserializationState deserState ; while ( ! ( deserState = _deserState . get ( ) ) . isDeserialized ( ) ) { Map < String , Object > deserialized = JsonHelper . fromJson ( deserState . json , new TypeReference < Map < String , Object > > ( ) { } ) ; deserialized . putAll ( deserState . overrides ) ; DeserializationState newDeserState = new DeserializationState ( deserialized ) ; _deserState . compareAndSet ( deserState , newDeserState ) ; } return deserState . deserialized ; } | Returns the JSON as a Map . If necessary the JSON is converted to a Map as a result of this call . |
7,054 | public Object put ( String key , Object value ) { DeserializationState deserializationState = _deserState . get ( ) ; if ( deserializationState . isDeserialized ( ) ) { return deserializationState . deserialized . put ( key , value ) ; } return deserializationState . overrides . put ( key , value ) ; } | For efficiency this method breaks the contract that the old value is returned . Otherwise common operations such as adding intrinsics and template attributes would require deserializing the object . |
7,055 | void writeTo ( JsonGenerator generator ) throws IOException { DeserializationState deserState = _deserState . get ( ) ; if ( deserState . isDeserialized ( ) ) { generator . writeObject ( deserState . deserialized ) ; return ; } if ( deserState . overrides . isEmpty ( ) ) { try { generator . writeRaw ( deserState . json ) ; return ; } catch ( UnsupportedOperationException e ) { } } ObjectCodec codec = generator . getCodec ( ) ; if ( codec == null ) { generator . writeObject ( deserialized ( ) ) ; return ; } JsonParser parser = codec . getFactory ( ) . createParser ( deserState . json ) ; checkState ( parser . nextToken ( ) == JsonToken . START_OBJECT , "JSON did not contain an object" ) ; generator . writeStartObject ( ) ; Iterator < Map . Entry < String , Object > > sortedOverrides = ( ( Map < String , Object > ) OrderedJson . ordered ( deserState . overrides ) ) . entrySet ( ) . iterator ( ) ; Map . Entry < String , Object > nextOverride = sortedOverrides . hasNext ( ) ? sortedOverrides . next ( ) : null ; JsonToken token ; while ( ( token = parser . nextToken ( ) ) != JsonToken . END_OBJECT ) { assert token == JsonToken . FIELD_NAME ; String field = parser . getText ( ) ; if ( deserState . overrides . containsKey ( field ) ) { token = parser . nextToken ( ) ; if ( token . isStructStart ( ) ) { parser . skipChildren ( ) ; } } else { while ( nextOverride != null && OrderedJson . KEY_COMPARATOR . compare ( nextOverride . getKey ( ) , field ) < 0 ) { generator . writeFieldName ( nextOverride . getKey ( ) ) ; generator . writeObject ( nextOverride . getValue ( ) ) ; nextOverride = sortedOverrides . hasNext ( ) ? sortedOverrides . next ( ) : null ; } generator . copyCurrentStructure ( parser ) ; } } while ( nextOverride != null ) { generator . writeFieldName ( nextOverride . getKey ( ) ) ; generator . writeObject ( nextOverride . getValue ( ) ) ; nextOverride = sortedOverrides . hasNext ( ) ? sortedOverrides . next ( ) : null ; } generator . writeEndObject ( ) ; } | Writes this record to the provided generator in the most efficient manner possible in the current state . |
7,056 | private SortedQueue getQueueReadOnly ( String queueName , Duration waitDuration ) { DedupQueue service = getQueueReadWrite ( queueName , waitDuration ) ; if ( service != null ) { try { return service . getQueue ( ) ; } catch ( ReadOnlyQueueException e ) { } } return _sortedQueueFactory . create ( queueName , true , _queueDAO ) ; } | Returns the persistent sorted queue managed by this JVM or a stub that supports only read - only operations if not managed by this JVM . |
7,057 | public void setCustomRequestParameter ( String param , String ... values ) { _customRequestParameters . putAll ( param , Arrays . asList ( values ) ) ; } | Sets custom request parameters . Custom parameters may include new features not yet officially supported or additional parameters to existing calls not intended for widespread use . As such this method is not typically used by most clients . Furthermore adding additional parameters may cause the request to fail . |
7,058 | void remove ( T obj ) { _queue . remove ( obj ) ; if ( _prioritize == obj ) { _prioritize = null ; } } | Removes an object from the queue . |
7,059 | T cycle ( ) { if ( _prioritize != null ) { return _prioritize ; } if ( ! _queue . isEmpty ( ) ) { T first = _queue . keySet ( ) . iterator ( ) . next ( ) ; _queue . get ( first ) ; return first ; } return null ; } | Returns the head of the queue then cycles it to the back of the queue . |
7,060 | private Delta createDelta ( PermissionUpdateRequest request ) { MapDeltaBuilder builder = Deltas . mapBuilder ( ) ; for ( String permissionString : request . getPermitted ( ) ) { builder . put ( "perm_" + validated ( permissionString ) , 1 ) ; } for ( String permissionString : request . getRevoked ( ) ) { builder . remove ( "perm_" + validated ( permissionString ) ) ; } if ( request . isRevokeRest ( ) ) { builder . removeRest ( ) ; } return builder . build ( ) ; } | Returns a delta constructed from this request or null if the request contained no changes . |
7,061 | public void addTokenRangeToCurrentBatchForCluster ( String cluster , String placement , Collection < ScanRange > ranges ) { PlanBatch batch = _clusterTails . get ( cluster ) ; if ( batch == null ) { batch = new PlanBatch ( ) ; _clusterHeads . put ( cluster , batch ) ; _clusterTails . put ( cluster , batch ) ; } batch . addPlanItem ( new PlanItem ( placement , ranges ) ) ; } | Adds a collection of scan ranges to the plan for a specific placement . The range collection should all belong to a single token range in the ring . |
7,062 | public ScanStatus toScanStatus ( ) { List < ScanRangeStatus > pendingRangeStatuses = Lists . newArrayList ( ) ; int taskId = 0 ; int batchId = 0 ; int concurrencyId = 0 ; for ( PlanBatch batch : _clusterHeads . values ( ) ) { Integer lastBatchId = null ; while ( batch != null ) { List < PlanItem > items = batch . getItems ( ) ; if ( ! items . isEmpty ( ) ) { Optional < Integer > blockingBatch = Optional . fromNullable ( lastBatchId ) ; for ( PlanItem item : items ) { String placement = item . getPlacement ( ) ; Optional < Integer > concurrency = item . getScanRanges ( ) . size ( ) > 1 ? Optional . of ( concurrencyId ++ ) : Optional . < Integer > absent ( ) ; for ( ScanRange scanRange : item . getScanRanges ( ) ) { pendingRangeStatuses . add ( new ScanRangeStatus ( taskId ++ , placement , scanRange , batchId , blockingBatch , concurrency ) ) ; } } lastBatchId = batchId ; batchId ++ ; } batch = batch . getNextBatch ( ) ; } } return new ScanStatus ( _scanId , _options , false , false , new Date ( ) , pendingRangeStatuses , ImmutableList . < ScanRangeStatus > of ( ) , ImmutableList . < ScanRangeStatus > of ( ) ) ; } | Creates a ScanStatus based on the current state of the plan . All scan ranges are added as pending tasks . |
7,063 | public ScanDestination getDestinationWithSubpath ( String path ) { if ( isDiscarding ( ) ) { return discard ( ) ; } if ( path == null ) { return new ScanDestination ( _uri ) ; } return new ScanDestination ( UriBuilder . fromUri ( _uri ) . path ( path ) . build ( ) ) ; } | Creates a new scan destination at the given path rooted at the current scan destination . |
7,064 | synchronized public void performHostDiscovery ( MetricRegistry metricRegistry ) { if ( _hostDiscoveryPerformed ) { return ; } Iterable < String > hosts = null ; if ( _seeds != null ) { hosts = Splitter . on ( ',' ) . trimResults ( ) . split ( _seeds ) ; } if ( _zooKeeperServiceName != null ) { checkState ( hosts == null , "Too many host discovery mechanisms configured." ) ; checkState ( _curator != null , "ZooKeeper host discovery is configured but withZooKeeperHostDiscovery() was not called." ) ; try ( HostDiscovery hostDiscovery = new ZooKeeperHostDiscovery ( _curator , _zooKeeperServiceName , metricRegistry ) ) { List < String > hostList = Lists . newArrayList ( ) ; for ( ServiceEndPoint endPoint : hostDiscovery . getHosts ( ) ) { hostList . add ( endPoint . getId ( ) ) ; if ( _partitioner == null && endPoint . getPayload ( ) != null ) { JsonNode payload = JsonHelper . fromJson ( endPoint . getPayload ( ) , JsonNode . class ) ; String partitioner = payload . path ( "partitioner" ) . textValue ( ) ; if ( partitioner != null ) { _partitioner = CassandraPartitioner . fromClass ( partitioner ) ; } } } hosts = hostList ; } catch ( IOException ex ) { } } checkState ( hosts != null , "No Cassandra host discovery mechanisms are configured." ) ; checkState ( ! Iterables . isEmpty ( hosts ) , "Unable to discover any Cassandra seed instances." ) ; checkState ( _partitioner != null , "Cassandra partitioner not configured or discoverable." ) ; _seeds = Joiner . on ( ',' ) . join ( hosts ) ; _hostDiscoveryPerformed = true ; } | Discover Cassandra seeds and partitioner if not statically configured . |
7,065 | private void setJettyAuthentication ( Subject subject ) { HttpConnection connection = HttpConnection . getCurrentConnection ( ) ; if ( connection == null ) { return ; } Request jettyRequest = connection . getHttpChannel ( ) . getRequest ( ) ; if ( jettyRequest == null ) { return ; } PrincipalWithRoles principal = ( PrincipalWithRoles ) subject . getPrincipal ( ) ; UserIdentity identity = principal . toUserIdentity ( ) ; jettyRequest . setAuthentication ( new UserAuthentication ( SecurityContext . BASIC_AUTH , identity ) ) ; } | Certain aspects of the container such as logging need the authentication information to behave properly . This method updates the request with the necessary objects to recognize the authenticated user . |
7,066 | public static boolean isPotentiallyEncryptedBytes ( byte [ ] bytes ) { checkNotNull ( bytes , "bytes" ) ; try { return bytes . length != 0 && bytes . length % Cipher . getInstance ( CIPHER ) . getBlockSize ( ) == 0 ; } catch ( Throwable t ) { throw Throwables . propagate ( t ) ; } } | Returns true if the provided bytes _could_ be encrypted credentials even if they can t be decrypted by a specific instance . |
7,067 | public static boolean isPotentiallyEncryptedString ( String string ) { checkNotNull ( string , "string" ) ; byte [ ] encryptedBytes ; try { encryptedBytes = BaseEncoding . base64 ( ) . omitPadding ( ) . decode ( string ) ; } catch ( IllegalArgumentException e ) { return false ; } return isPotentiallyEncryptedBytes ( encryptedBytes ) ; } | Returns true if the provided String _could_ be encrypted credentials even if it can t be decrypted by a specific instance . |
7,068 | public List < LeaderService > getPartitionLeaderServices ( ) { return _partitionLeaders . stream ( ) . map ( PartitionLeader :: getLeaderService ) . collect ( Collectors . toList ( ) ) ; } | Returns the underlying leader services for each partition . The services are guaranteed to be returned ordered by partition number . |
7,069 | private Iterator < Record > decodeRows ( Iterator < Row < ByteBuffer , UUID > > iter , final AstyanaxTable table , final int largeRowThreshold , final ReadConsistency consistency ) { return Iterators . transform ( iter , new Function < Row < ByteBuffer , UUID > , Record > ( ) { public Record apply ( Row < ByteBuffer , UUID > row ) { String key = AstyanaxStorage . getContentKey ( row . getRawKey ( ) ) ; return newRecord ( new Key ( table , key ) , row . getRawKey ( ) , row . getColumns ( ) , largeRowThreshold , consistency , null ) ; } } ) ; } | Decodes rows returned by scanning a table . |
7,070 | private Map < String , Function < HttpRequestContext , String > > createSubstitutionMap ( String [ ] permissions , AbstractMethod am ) { Map < String , Function < HttpRequestContext , String > > map = Maps . newLinkedHashMap ( ) ; for ( String permission : permissions ) { Matcher matcher = SUBSTITUTION_MATCHER . matcher ( permission ) ; while ( matcher . find ( ) ) { String match = matcher . group ( ) ; if ( map . containsKey ( match ) ) { continue ; } String param = matcher . group ( "param" ) ; Function < HttpRequestContext , String > substitution ; if ( param . startsWith ( "?" ) ) { substitution = createQuerySubstitution ( param . substring ( 1 ) ) ; } else { substitution = createPathSubstitution ( param , am ) ; } map . put ( match , substitution ) ; } } return map ; } | Returns a mapping from permissions found in the annotations to functions which can perform any necessary substitutions based on actual values in the request . |
7,071 | private synchronized void removeEmptyClaimSets ( ) { Iterables . removeIf ( _map . values ( ) , new Predicate < Handle > ( ) { public boolean apply ( Handle handle ) { handle . getClaimSet ( ) . pump ( ) ; return handle . getRefCount ( ) . get ( ) == 0 && handle . getClaimSet ( ) . size ( ) == 0 ; } } ) ; } | Cleans up old claim sets for subscriptions that have become inactive . Ensures the map of claim sets doesn t grow forever . |
7,072 | private ObjectInspector getObjectInspectorForType ( TypeInfo type ) throws SerDeException { switch ( type . getCategory ( ) ) { case PRIMITIVE : PrimitiveTypeInfo primitiveType = ( PrimitiveTypeInfo ) type ; if ( isSupportedPrimitive ( primitiveType ) ) { return PrimitiveObjectInspectorFactory . getPrimitiveJavaObjectInspector ( primitiveType . getPrimitiveCategory ( ) ) ; } break ; case STRUCT : StructTypeInfo structType = ( StructTypeInfo ) type ; List < ObjectInspector > structInspectors = Lists . newArrayListWithCapacity ( structType . getAllStructFieldTypeInfos ( ) . size ( ) ) ; for ( TypeInfo fieldType : structType . getAllStructFieldTypeInfos ( ) ) { structInspectors . add ( getObjectInspectorForType ( fieldType ) ) ; } return ObjectInspectorFactory . getStandardStructObjectInspector ( structType . getAllStructFieldNames ( ) , structInspectors ) ; case MAP : MapTypeInfo mapType = ( MapTypeInfo ) type ; return ObjectInspectorFactory . getStandardMapObjectInspector ( getObjectInspectorForType ( mapType . getMapKeyTypeInfo ( ) ) , getObjectInspectorForType ( mapType . getMapValueTypeInfo ( ) ) ) ; case LIST : ListTypeInfo listType = ( ListTypeInfo ) type ; return ObjectInspectorFactory . getStandardListObjectInspector ( getObjectInspectorForType ( listType . getListElementTypeInfo ( ) ) ) ; case UNION : UnionTypeInfo unionType = ( UnionTypeInfo ) type ; List < ObjectInspector > unionInspectors = Lists . newArrayListWithCapacity ( unionType . getAllUnionObjectTypeInfos ( ) . size ( ) ) ; for ( TypeInfo fieldType : unionType . getAllUnionObjectTypeInfos ( ) ) { unionInspectors . add ( getObjectInspectorForType ( fieldType ) ) ; } return ObjectInspectorFactory . getStandardUnionObjectInspector ( unionInspectors ) ; } throw new SerDeException ( "Unsupported type: " + type ) ; } | Returns the associated ObjectInspector for a type . This most delegates the to Hive java implementations but filters out primitives not supported by EmoDB . |
7,073 | private Object getRawValue ( String columnName , Map < String , Object > content ) throws ColumnNotFoundException { String field = columnName ; Object value = content ; while ( field != null ) { if ( value == null ) { throw new ColumnNotFoundException ( ) ; } if ( ! ( value instanceof Map ) ) { throw new ColumnNotFoundException ( ) ; } Map < String , Object > map = ( Map < String , Object > ) value ; String nextField = null ; int separator = field . indexOf ( '/' ) ; if ( separator != - 1 ) { nextField = field . substring ( separator + 1 ) ; field = field . substring ( 0 , separator ) ; } boolean found = false ; if ( map . containsKey ( field ) ) { value = map . get ( field ) ; found = true ; } else { for ( Iterator < String > iter = map . keySet ( ) . iterator ( ) ; ! found && iter . hasNext ( ) ; ) { String key = iter . next ( ) ; if ( key . equalsIgnoreCase ( field ) ) { value = map . get ( key ) ; found = true ; } } } if ( ! found ) { throw new ColumnNotFoundException ( ) ; } field = nextField ; } return value ; } | Returns the raw value for a given Map . If the value was found is and is null then null is returned . If no value is present then ColumnNotFoundException is thrown . |
7,074 | private Object deserialize ( TypeInfo type , Object rawValue ) throws SerDeException { Object value = null ; if ( rawValue != null ) { switch ( type . getCategory ( ) ) { case PRIMITIVE : value = deserializePrimitive ( ( PrimitiveTypeInfo ) type , rawValue ) ; break ; case STRUCT : value = deserializeStruct ( ( StructTypeInfo ) type , rawValue ) ; break ; case MAP : value = deserializeMap ( ( MapTypeInfo ) type , rawValue ) ; break ; case LIST : value = deserializeList ( ( ListTypeInfo ) type , rawValue ) ; break ; case UNION : value = deserializeUnion ( ( UnionTypeInfo ) type , rawValue ) ; break ; } } return value ; } | Deserializes a raw value to the provided type . |
7,075 | private boolean isSupportedPrimitive ( PrimitiveTypeInfo type ) { switch ( type . getPrimitiveCategory ( ) ) { case VOID : case STRING : case BOOLEAN : case BYTE : case SHORT : case INT : case LONG : case FLOAT : case DOUBLE : case DATE : case TIMESTAMP : return true ; default : return false ; } } | Determines if the given primitive is supported by this deserializer . At this time the only exclusions are BINARY DECIMAL VARCHAR CHAR and UNKNOWN . |
7,076 | private Object deserializePrimitive ( PrimitiveTypeInfo type , Object value ) throws SerDeException { switch ( type . getPrimitiveCategory ( ) ) { case VOID : return null ; case STRING : return deserializeString ( value ) ; case BOOLEAN : return deserializeBoolean ( value ) ; case BYTE : case SHORT : case INT : case LONG : case FLOAT : case DOUBLE : return deserializeNumber ( value , type ) ; case DATE : case TIMESTAMP : return deserializeDate ( value , type ) ; default : throw new SerDeException ( "Unsupported type: " + type . getPrimitiveCategory ( ) ) ; } } | Deserializes a primitive to its corresponding Java type doing a best - effort conversion when necessary . |
7,077 | public static ListenableFuture < ResultSet > executeAdaptiveQueryAsync ( Session session , Statement statement , int fetchSize ) { return executeAdaptiveQueryAsync ( session , statement , fetchSize , MAX_ADAPTATIONS ) ; } | Executes a query asychronously dynamically adjusting the fetch size down if necessary . |
7,078 | public static ResultSet executeAdaptiveQuery ( Session session , Statement statement , int fetchSize ) { int remainingAdaptations = MAX_ADAPTATIONS ; while ( true ) { try { statement . setFetchSize ( fetchSize ) ; ResultSet resultSet = session . execute ( statement ) ; return new AdaptiveResultSet ( session , resultSet , remainingAdaptations ) ; } catch ( Throwable t ) { if ( isAdaptiveException ( t ) && -- remainingAdaptations != 0 && fetchSize > MIN_FETCH_SIZE ) { fetchSize = Math . max ( fetchSize / 2 , MIN_FETCH_SIZE ) ; _log . debug ( "Repeating previous query with fetch size {} due to {}" , fetchSize , t . getMessage ( ) ) ; } else { throw Throwables . propagate ( t ) ; } } } } | Executes a query sychronously dynamically adjusting the fetch size down if necessary . |
7,079 | private static boolean isAdaptiveException ( Throwable t ) { if ( t instanceof FrameTooLongException ) { return true ; } if ( t instanceof NoHostAvailableException ) { Collection < Throwable > hostExceptions = ( ( NoHostAvailableException ) t ) . getErrors ( ) . values ( ) ; return ! hostExceptions . isEmpty ( ) && hostExceptions . stream ( ) . allMatch ( AdaptiveResultSet :: isAdaptiveException ) ; } return false ; } | Returns true if the exception is one which indicates that the frame size may be too large false otherwise . |
7,080 | private boolean reduceFetchSize ( Throwable reason ) { if ( ! isAdaptiveException ( reason ) || -- _remainingAdaptations == 0 ) { return false ; } ExecutionInfo executionInfo = _delegate . getExecutionInfo ( ) ; Statement statement = executionInfo . getStatement ( ) ; PagingState pagingState = executionInfo . getPagingState ( ) ; int fetchSize = statement . getFetchSize ( ) ; while ( fetchSize > MIN_FETCH_SIZE ) { fetchSize = Math . max ( fetchSize / 2 , MIN_FETCH_SIZE ) ; _log . debug ( "Retrying query at next page with fetch size {} due to {}" , fetchSize , reason . getMessage ( ) ) ; statement . setFetchSize ( fetchSize ) ; statement . setPagingState ( pagingState ) ; try { _delegate = _session . execute ( statement ) ; return true ; } catch ( Throwable t ) { if ( ! isAdaptiveException ( t ) || -- _remainingAdaptations == 0 ) { return false ; } } } return false ; } | Reduces the fetch size and retries the query . Returns true if the query succeeded false if the root cause of the exception does not indicate a frame size issue if the frame size cannot be adjusted down any further or if the retried query fails for an unrelated reason . |
7,081 | public ByteBuffer skipPrefix ( ByteBuffer value ) { value . position ( value . position ( ) + _prefixLength ) ; return value ; } | removes the hex prefix that indicates the number of blocks in the delta |
7,082 | public ReplicationClientFactory usingApiKey ( String apiKey ) { if ( Objects . equal ( _apiKey , apiKey ) ) { return this ; } return new ReplicationClientFactory ( _jerseyClient , apiKey ) ; } | Creates a view of this instance using the given API Key and sharing the same underlying resources . Note that this method may return a new instance so the caller must use the returned value . |
7,083 | private AnnotatedContent resolveAnnotated ( Record record , final ReadConsistency consistency ) { final Resolved resolved = resolve ( record , consistency ) ; final Table table = record . getKey ( ) . getTable ( ) ; return new AnnotatedContent ( ) { public Map < String , Object > getContent ( ) { return toContent ( resolved , consistency ) ; } public boolean isChangeDeltaPending ( UUID changeId ) { long fullConsistencyTimestamp = _dataWriterDao . getFullConsistencyTimestamp ( table ) ; return resolved . isChangeDeltaPending ( changeId , fullConsistencyTimestamp ) ; } public boolean isChangeDeltaRedundant ( UUID changeId ) { return resolved . isChangeDeltaRedundant ( changeId ) ; } } ; } | Resolve a set of changes returning an interface that includes info about specific change IDs . |
7,084 | public void createFacade ( String table , FacadeOptions facadeOptions , Audit audit ) { checkLegalTableName ( table ) ; checkNotNull ( facadeOptions , "facadeDefinition" ) ; checkNotNull ( audit , "audit" ) ; _tableDao . createFacade ( table , facadeOptions , audit ) ; } | Facade related methods |
7,085 | public T startIfOwner ( String name , Duration waitDuration ) { long timeoutAt = System . currentTimeMillis ( ) + waitDuration . toMillis ( ) ; LeaderService leaderService = _leaderMap . getUnchecked ( name ) . orNull ( ) ; if ( leaderService == null || ! awaitRunning ( leaderService , timeoutAt ) ) { return null ; } Service service ; for ( ; ; ) { Optional < Service > opt = leaderService . getCurrentDelegateService ( ) ; if ( opt . isPresent ( ) ) { service = opt . get ( ) ; break ; } if ( System . currentTimeMillis ( ) >= timeoutAt ) { return null ; } try { Thread . sleep ( 10 ) ; } catch ( InterruptedException e ) { throw Throwables . propagate ( e ) ; } } if ( ! awaitRunning ( service , timeoutAt ) ) { return null ; } return ( T ) service ; } | Returns the specified managed service if this server is responsible for the specified object and has won a ZooKeeper - managed leader election . |
7,086 | private boolean awaitRunning ( Service service , long timeoutAt ) { if ( service . isRunning ( ) ) { return true ; } long waitMillis = timeoutAt - System . currentTimeMillis ( ) ; if ( waitMillis <= 0 ) { return false ; } try { service . start ( ) . get ( waitMillis , TimeUnit . MILLISECONDS ) ; } catch ( Exception e ) { } return service . isRunning ( ) ; } | Returns true if the Guava service entered the RUNNING state within the specified time period . |
7,087 | public static int compare ( UUID uuid1 , UUID uuid2 ) { int timeResult = Longs . compare ( uuid1 . timestamp ( ) , uuid2 . timestamp ( ) ) ; if ( timeResult != 0 ) { return timeResult ; } return uuid1 . compareTo ( uuid2 ) ; } | Compare two time UUIDs deterministically first by their embedded timestamp next by their node - specific sequence number finally breaking ties using their embedded node identifier . \ |
7,088 | public static int compareTimestamps ( UUID uuid1 , UUID uuid2 ) { return Longs . compare ( uuid1 . timestamp ( ) , uuid2 . timestamp ( ) ) ; } | Compare the embedded timestamps of the given UUIDs . This is used when it is OK to return an equality based on timestamps alone |
7,089 | public static Iterable < SplitPath > getSplits ( final Configuration config , Path [ ] paths ) { final int splitSize = getSplitSize ( config ) ; return FluentIterable . from ( Arrays . asList ( paths ) ) . transformAndConcat ( new Function < Path , Iterable < ? extends SplitPath > > ( ) { public Iterable < ? extends SplitPath > apply ( Path path ) { try { EmoInputSplittable emoFs = ( EmoInputSplittable ) path . getFileSystem ( config ) ; return emoFs . getInputSplits ( config , path , splitSize ) ; } catch ( IOException e ) { throw Throwables . propagate ( e ) ; } } } ) ; } | Gets the splits for a given list of EmoDB paths . |
7,090 | public static BaseRecordReader createRecordReader ( Configuration config , Path path ) throws IOException { EmoInputSplittable emoFs = ( EmoInputSplittable ) path . getFileSystem ( config ) ; return emoFs . getBaseRecordReader ( config , path , getSplitSize ( config ) ) ; } | Gets a record reader for a given split . |
7,091 | boolean hasUUID ( long uuid ) { if ( _readStorage . hasUUID ( uuid ) ) { return true ; } for ( AstyanaxStorage storage : _writeStorage ) { if ( storage . hasUUID ( uuid ) ) { return true ; } } return false ; } | Test if a given UUID matches this table . |
7,092 | static Type parse ( String str ) { try { return doParse ( str ) ; } catch ( RuntimeException ex ) { throw ex ; } catch ( Exception ex ) { throw new RuntimeException ( ex ) ; } } | Parses the TypeToken string format . |
7,093 | private static Type doParse ( String str ) throws Exception { Class < ? > token = PRIMITIVES . get ( str ) ; if ( token != null ) { return token ; } int first = str . indexOf ( '<' ) ; if ( first < 0 ) { return StringConvert . loadType ( str ) ; } int last = str . lastIndexOf ( '>' ) ; String baseStr = str . substring ( 0 , first ) ; Class < ? > base = StringConvert . loadType ( baseStr ) ; String argsStr = str . substring ( first + 1 , last ) ; List < String > splitArgs = split ( argsStr ) ; List < Type > types = new ArrayList < Type > ( ) ; for ( String splitArg : splitArgs ) { Type argType ; if ( splitArg . startsWith ( EXTENDS ) ) { String remainder = splitArg . substring ( EXTENDS . length ( ) ) ; argType = wildExtendsType ( doParse ( remainder ) ) ; } else if ( splitArg . startsWith ( SUPER ) ) { String remainder = splitArg . substring ( SUPER . length ( ) ) ; argType = wildSuperType ( doParse ( remainder ) ) ; } else if ( splitArg . equals ( "?" ) ) { argType = wildExtendsType ( Object . class ) ; } else if ( splitArg . endsWith ( "[]" ) ) { String componentStr = splitArg . substring ( 0 , splitArg . length ( ) - 2 ) ; Class < ? > componentCls = StringConvert . loadType ( componentStr ) ; argType = Array . newInstance ( componentCls , 0 ) . getClass ( ) ; } else if ( splitArg . startsWith ( "[L" ) && splitArg . endsWith ( ";" ) ) { String componentStr = splitArg . substring ( 2 , splitArg . length ( ) - 1 ) ; Class < ? > componentCls = StringConvert . loadType ( componentStr ) ; argType = Array . newInstance ( componentCls , 0 ) . getClass ( ) ; } else { argType = doParse ( splitArg ) ; } types . add ( argType ) ; } return newParameterizedType ( base , types . toArray ( new Type [ types . size ( ) ] ) ) ; } | parse an element |
7,094 | private static List < String > split ( String str ) { List < String > result = new ArrayList < String > ( ) ; int genericCount = 0 ; int startPos = 0 ; for ( int i = 0 ; i < str . length ( ) ; i ++ ) { if ( str . charAt ( i ) == ',' && genericCount == 0 ) { result . add ( str . substring ( startPos , i ) . trim ( ) ) ; startPos = i + 1 ; } else if ( str . charAt ( i ) == '<' ) { genericCount ++ ; } else if ( str . charAt ( i ) == '>' ) { genericCount -- ; } } result . add ( str . substring ( startPos ) . trim ( ) ) ; return result ; } | split by comma handling nested generified types |
7,095 | private void tryRegisterGuava ( ) { try { Class < ? > moduleClass = Class . class . getMethod ( "getModule" ) . getReturnType ( ) ; Object convertModule = Class . class . getMethod ( "getModule" ) . invoke ( StringConvert . class ) ; Object layer = convertModule . getClass ( ) . getMethod ( "getLayer" ) . invoke ( convertModule ) ; if ( layer != null ) { Object optGuava = layer . getClass ( ) . getMethod ( "findModule" , String . class ) . invoke ( layer , "com.google.common" ) ; boolean found = ( Boolean ) optGuava . getClass ( ) . getMethod ( "isPresent" ) . invoke ( optGuava ) ; if ( found ) { Object guavaModule = optGuava . getClass ( ) . getMethod ( "get" ) . invoke ( optGuava ) ; moduleClass . getMethod ( "addReads" , moduleClass ) . invoke ( convertModule , guavaModule ) ; } } } catch ( Throwable ex ) { if ( LOG ) { System . err . println ( "tryRegisterGuava1: " + ex ) ; } } try { loadType ( "com.google.common.reflect.TypeToken" ) ; @ SuppressWarnings ( "unchecked" ) Class < ? > cls = ( Class < TypedStringConverter < ? > > ) loadType ( "org.joda.convert.TypeTokenStringConverter" ) ; TypedStringConverter < ? > conv = ( TypedStringConverter < ? > ) cls . getDeclaredConstructor ( ) . newInstance ( ) ; registered . put ( conv . getEffectiveType ( ) , conv ) ; } catch ( Throwable ex ) { if ( LOG ) { System . err . println ( "tryRegisterGuava2: " + ex ) ; } } } | Tries to register the Guava converters class . |
7,096 | private void tryRegisterJava8Optionals ( ) { try { loadType ( "java.util.OptionalInt" ) ; @ SuppressWarnings ( "unchecked" ) Class < ? > cls1 = ( Class < TypedStringConverter < ? > > ) loadType ( "org.joda.convert.OptionalIntStringConverter" ) ; TypedStringConverter < ? > conv1 = ( TypedStringConverter < ? > ) cls1 . getDeclaredConstructor ( ) . newInstance ( ) ; registered . put ( conv1 . getEffectiveType ( ) , conv1 ) ; @ SuppressWarnings ( "unchecked" ) Class < ? > cls2 = ( Class < TypedStringConverter < ? > > ) loadType ( "org.joda.convert.OptionalLongStringConverter" ) ; TypedStringConverter < ? > conv2 = ( TypedStringConverter < ? > ) cls2 . getDeclaredConstructor ( ) . newInstance ( ) ; registered . put ( conv2 . getEffectiveType ( ) , conv2 ) ; @ SuppressWarnings ( "unchecked" ) Class < ? > cls3 = ( Class < TypedStringConverter < ? > > ) loadType ( "org.joda.convert.OptionalDoubleStringConverter" ) ; TypedStringConverter < ? > conv3 = ( TypedStringConverter < ? > ) cls3 . getDeclaredConstructor ( ) . newInstance ( ) ; registered . put ( conv3 . getEffectiveType ( ) , conv3 ) ; } catch ( Throwable ex ) { if ( LOG ) { System . err . println ( "tryRegisterOptionals: " + ex ) ; } } } | Tries to register the Java 8 optional classes . |
7,097 | private void tryRegisterTimeZone ( ) { try { registered . put ( SimpleTimeZone . class , JDKStringConverter . TIME_ZONE ) ; } catch ( Throwable ex ) { if ( LOG ) { System . err . println ( "tryRegisterTimeZone1: " + ex ) ; } } try { TimeZone zone = TimeZone . getDefault ( ) ; registered . put ( zone . getClass ( ) , JDKStringConverter . TIME_ZONE ) ; } catch ( Throwable ex ) { if ( LOG ) { System . err . println ( "tryRegisterTimeZone2: " + ex ) ; } } try { TimeZone zone = TimeZone . getTimeZone ( "Europe/London" ) ; registered . put ( zone . getClass ( ) , JDKStringConverter . TIME_ZONE ) ; } catch ( Throwable ex ) { if ( LOG ) { System . err . println ( "tryRegisterTimeZone3: " + ex ) ; } } } | Tries to register the subclasses of TimeZone . Try various things doesn t matter if the map entry gets overwritten . |
7,098 | private void tryRegisterJava8 ( ) { try { tryRegister ( "java.time.Instant" , "parse" ) ; tryRegister ( "java.time.Duration" , "parse" ) ; tryRegister ( "java.time.LocalDate" , "parse" ) ; tryRegister ( "java.time.LocalTime" , "parse" ) ; tryRegister ( "java.time.LocalDateTime" , "parse" ) ; tryRegister ( "java.time.OffsetTime" , "parse" ) ; tryRegister ( "java.time.OffsetDateTime" , "parse" ) ; tryRegister ( "java.time.ZonedDateTime" , "parse" ) ; tryRegister ( "java.time.Year" , "parse" ) ; tryRegister ( "java.time.YearMonth" , "parse" ) ; tryRegister ( "java.time.MonthDay" , "parse" ) ; tryRegister ( "java.time.Period" , "parse" ) ; tryRegister ( "java.time.ZoneOffset" , "of" ) ; tryRegister ( "java.time.ZoneId" , "of" ) ; tryRegister ( "java.time.ZoneRegion" , "of" ) ; } catch ( Throwable ex ) { if ( LOG ) { System . err . println ( "tryRegisterJava8: " + ex ) ; } } } | Tries to register Java 8 classes . |
7,099 | private void tryRegisterThreeTenBackport ( ) { try { tryRegister ( "org.threeten.bp.Instant" , "parse" ) ; tryRegister ( "org.threeten.bp.Duration" , "parse" ) ; tryRegister ( "org.threeten.bp.LocalDate" , "parse" ) ; tryRegister ( "org.threeten.bp.LocalTime" , "parse" ) ; tryRegister ( "org.threeten.bp.LocalDateTime" , "parse" ) ; tryRegister ( "org.threeten.bp.OffsetTime" , "parse" ) ; tryRegister ( "org.threeten.bp.OffsetDateTime" , "parse" ) ; tryRegister ( "org.threeten.bp.ZonedDateTime" , "parse" ) ; tryRegister ( "org.threeten.bp.Year" , "parse" ) ; tryRegister ( "org.threeten.bp.YearMonth" , "parse" ) ; tryRegister ( "org.threeten.bp.MonthDay" , "parse" ) ; tryRegister ( "org.threeten.bp.Period" , "parse" ) ; tryRegister ( "org.threeten.bp.ZoneOffset" , "of" ) ; tryRegister ( "org.threeten.bp.ZoneId" , "of" ) ; tryRegister ( "org.threeten.bp.ZoneRegion" , "of" ) ; } catch ( Throwable ex ) { if ( LOG ) { System . err . println ( "tryRegisterThreeTenBackport: " + ex ) ; } } } | Tries to register ThreeTen backport classes . |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.