name stringlengths 12 178 | code_snippet stringlengths 8 36.5k | score float64 3.26 3.68 |
|---|---|---|
hbase_HBaseTestingUtility_createMockRegionServerService_rdh | /**
* Create a stubbed out RegionServerService, mainly for getting FS. This version is used by
* TestOpenRegionHandler
*/
public RegionServerServices createMockRegionServerService(ServerName name) throws IOException {
final MockRegionServerServices rss = new MockRegionServerServices(getZooKeeperWatcher(), name);
rss... | 3.26 |
hbase_HBaseTestingUtility_m4_rdh | /**
* Load region with rows from 'aaa' to 'zzz'.
*
* @param r
* Region
* @param f
* Family
* @param flush
* flush the cache if true
* @return Count of rows loaded.
*/
public int m4(final HRegion r, final byte[] f, final
boolean flush) throws IOException {
byte[] k = new byte[3];
int rowCount = 0;
for (b... | 3.26 |
hbase_MetricsMasterFileSystem_addSplit_rdh | /**
* Record a single instance of a split
*
* @param time
* time that the split took
* @param size
* length of original WALs that were split
*/
public synchronized void
addSplit(long time, long size) {
source.updateSplitTime(time);source.updateSplitSize(size);
} | 3.26 |
hbase_AnnotationReadingPriorityFunction_getPriority_rdh | /**
* Returns a 'priority' based on the request type.
* <p/>
* Currently the returned priority is used for queue selection.
* <p/>
* See the {@code SimpleRpcScheduler} as example. It maintains a queue per 'priority type':
* <ul>
* <li>HIGH_QOS (meta requests)</li>
* <li>REPLICATION_QOS (replication requests)</l... | 3.26 |
hbase_TableDescriptorChecker_sanityCheck_rdh | /**
* Checks whether the table conforms to some sane limits, and configured values (compression, etc)
* work. Throws an exception if something is wrong.
*/
public static void sanityCheck(final Configuration c, final TableDescriptor td) throws IOException {
CompoundConfiguration conf = new CompoundConfiguration(... | 3.26 |
hbase_TableDescriptorChecker_warnOrThrowExceptionForFailure_rdh | // HBASE-13350 - Helper method to log warning on sanity check failures if checks disabled.
private static void warnOrThrowExceptionForFailure(boolean logWarn, String message, Exception cause) throws IOException {
if
(!logWarn) {
throw new DoNotRetryIOException(((message + " Set ") + TABLE_SANITY_CHECK... | 3.26 |
hbase_BucketCache_cacheBlock_rdh | /**
* Cache the block with the specified name and buffer.
*
* @param cacheKey
* block's cache key
* @param cachedItem
* block buffer
* @param inMemory
* if block is in-memory
*/
@Override
public void cacheBlock(BlockCacheKey cacheKey, Cacheable cachedItem, boolean inMemory, boolean waitWhenCache) {
... | 3.26 |
hbase_BucketCache_bucketSizesAboveThresholdCount_rdh | /**
* Return the count of bucketSizeinfos still need free space
*/
private int bucketSizesAboveThresholdCount(float minFactor) {
BucketAllocator[] stats = bucketAllocator.getIndexStatistics();
int fullCount = 0;
for (int i = 0; i < stats.length; i++) {
long freeGoal
= ((long) (Math.flo... | 3.26 |
hbase_BucketCache_evictBlocksByHfileName_rdh | /**
* Evicts all blocks for a specific HFile.
* <p>
* This is used for evict-on-close to remove all blocks of a specific HFile.
*
* @return the number of blocks evicted
*/
@Override
public int evictBlocksByHfileName(String hfileName) {
removeFileFromPrefetch(hfileName);
Set<BlockCacheKey> keySet = blocks... | 3.26 |
hbase_BucketCache_disableWriter_rdh | // Used for test
void disableWriter() {
this.writerEnabled = false;
} | 3.26 |
hbase_BucketCache_retrieveFromFile_rdh | /**
*
* @see #persistToFile()
*/
private void retrieveFromFile(int[] bucketSizes) throws IOException {
LOG.info("Started retrieving bucket cache from file");
File persistenceFile = new File(persistencePath);
if (!persistenceFile.exists()) {
LOG.warn("Persistence file missing! " + "It's ok if it's... | 3.26 |
hbase_BucketCache_blockEvicted_rdh | /**
* This method is invoked after the bucketEntry is removed from {@link BucketCache#backingMap}
*/
void blockEvicted(BlockCacheKey cacheKey, BucketEntry bucketEntry, boolean decrementBlockNumber, boolean evictedByEvictionProcess) {
bucketEntry.markAsEvicted();
blocksByHFile.remove(cacheKey);
if (decr... | 3.26 |
hbase_BucketCache_cacheBlockWithWait_rdh | /**
* Cache the block to ramCache
*
* @param cacheKey
* block's cache key
* @param cachedItem
* block buffer
* @param inMemory
* if block is in-memory
* @param wait
* if true, blocking wait when queue is full
*/
public void cacheBlockWithWait(BlockCacheKey cacheKey, Cacheable cachedItem, boolean inMe... | 3.26 |
hbase_BucketCache_putIfAbsent_rdh | /**
* Return the previous associated value, or null if absent. It has the same meaning as
* {@link ConcurrentMap#putIfAbsent(Object, Object)}
*/
public RAMQueueEntry putIfAbsent(BlockCacheKey key, RAMQueueEntry entry) {
AtomicBoolean absent = new AtomicBoolean(false);
RAMQueueEntry re = delegate.computeIfAbs... | 3.26 |
hbase_BucketCache_createRecycler_rdh | /**
* <pre>
* Create the {@link Recycler} for {@link BucketEntry#refCnt},which would be used as
* {@link RefCnt#recycler} of {@link HFileBlock#buf} returned from {@link BucketCache#getBlock}.
* NOTE: for {@link BucketCache#getBlock},the {@link RefCnt#recycler} of {@link HFileBlock#buf}
* from {@link BucketCache#ba... | 3.26 |
hbase_BucketCache_evictBlock_rdh | /**
* Try to evict the block from {@link BlockCache} by force. We'll call this in few cases:<br>
* 1. Close an HFile, and clear all cached blocks. <br>
* 2. Call {@link Admin#clearBlockCache(TableName)} to clear all blocks for a given table.<br>
* <p>
* Firstly, we'll try to remove the block from RAMCache,and then... | 3.26 |
hbase_BucketCache_getAllocationFailWarningMessage_rdh | /**
* Prepare and return a warning message for Bucket Allocator Exception
*
* @param fle
* The exception
* @param re
* The RAMQueueEntry for which the exception was thrown.
* @retur... | 3.26 |
hbase_BucketCache_stopWriterThreads_rdh | /**
* Only used in test
*/
void stopWriterThreads() throws InterruptedException {
for (WriterThread v110 : writerThreads) {
v110.disableWriter();
v110.interrupt();
v110.join();
}
} | 3.26 |
hbase_BucketCache_freeSpace_rdh | /**
* Free the space if the used size reaches acceptableSize() or one size block couldn't be
* allocated. When freeing the space, we use the LRU algorithm and ensure there must be some
* blocks evicted
*
* @param why
* Why we are being called
*/
void freeSpace(final String why) {
// Ensure only one freeSpa... | 3.26 |
hbase_BucketCache_evictBucketEntryIfNoRpcReferenced_rdh | /**
* Evict {@link BlockCacheKey} and its corresponding {@link BucketEntry} only if
* {@link BucketEntry#isRpcRef} is false. <br/>
* NOTE:When evict from {@link BucketCache#backingMap},only the matched {@link BlockCacheKey} and
* {@link BucketEntry} could be removed.
*
* @param blockCacheKey
* {@link BlockCach... | 3.26 |
hbase_BucketCache_checkIOErrorIsTolerated_rdh | /**
* Check whether we tolerate IO error this time. If the duration of IOEngine throwing errors
* exceeds ioErrorsDurationTimeTolerated, we will disable the cache
*/
private void checkIOErrorIsTolerated() {long now = EnvironmentEdgeManager.currentTime();
// Do a single read to a local variable to avoid timing i... | 3.26 |
hbase_BucketCache_disableCache_rdh | /**
* Used to shut down the cache -or- turn it off in the case of something broken.
*/
private void disableCache() {
if (!cacheEnabled)
return;
LOG.info("Disabling cache");
cacheEnabled = false;
ioEngine.shutdown();
this.scheduleThreadPool.shutdown();
for (int i = 0; i < writerThread... | 3.26 |
hbase_BucketCache_m1_rdh | /**
* Get the buffer of the block with the specified key.
*
* @param key
* block's cache key
* @param caching
* true if the caller caches blocks on cache misses
* @param repeat
* Whether this is a repeat lookup for the same block
* @param updateCacheMetrics
* Whether we should update cache metrics or ... | 3.26 |
hbase_BucketCache_deleteFileOnClose_rdh | /**
* Create an input stream that deletes the file after reading it. Use in try-with-resources to
* avoid this pattern where an exception thrown from a finally block may mask earlier exceptions:
*
* <pre>
* File f = ...
* try (FileInputStream fis = new FileInputStream(f)) {
* // use the input stream
* ... | 3.26 |
hbase_BucketCache_evictBlockIfNoRpcReferenced_rdh | /**
* NOTE: This method is only for test.
*/
public boolean evictBlockIfNoRpcReferenced(BlockCacheKey blockCacheKey) {
BucketEntry bucketEntry = backingMap.get(blockCacheKey);
if (bucketEntry == null) {
return false;}
return evictBucketEntryIfNoRpcReferenced(blockCacheKey, bucketEntry);
} | 3.26 |
hbase_BucketCache_getRAMQueueEntries_rdh | /**
* Blocks until elements available in {@code q} then tries to grab as many as possible before
* returning.
*
* @param receptacle
* Where to stash the elements taken from queue. We clear before we use it just
* in case.
* @param q
* The queue to take from.
* @return {@code receptacle} laden with elemen... | 3.26 |
hbase_BucketCache_doEvictBlock_rdh | /**
* Evict the {@link BlockCacheKey} and {@link BucketEntry} from {@link BucketCache#backingMap} and
* {@link BucketCache#ramCache}. <br/>
* NOTE:When Evict from {@link BucketCache#backingMap},only the matched {@link BlockCacheKey} and
* {@link BucketEntry} could be removed.
*
* @param cacheKey
* {@link Block... | 3.26 |
hbase_BucketCache_persistToFile_rdh | /**
*
* @see #retrieveFromFile(int[])
*/
@SuppressWarnings(value = "OBL_UNSATISFIED_OBLIGATION", justification = "false positive, try-with-resources ensures close is called.")
void persistToFile() throws IOException {LOG.debug("Thread {} started persisting bucket cache to file", Thread.currentThread().getName());
... | 3.26 |
hbase_BucketCache_freeEntireBuckets_rdh | /**
* This method will find the buckets that are minimally occupied and are not reference counted and
* will free them completely without any constraint on the access times of the elements, and as a
* process will completely free at most the number of buckets passed, sometimes it might not due
* to changing refCoun... | 3.26 |
hbase_BucketCache_doDrain_rdh | /**
* Flush the entries in ramCache to IOEngine and add bucket entry to backingMap. Process all that
* are passed in even if failure being sure to remove from ramCache else we'll never undo the
* references and we'll OOME.
*
* @param entries
* Presumes list passed in here will be processed by this invocation on... | 3.26 |
hbase_BucketCache_startWriterThreads_rdh | /**
* Called by the constructor to start the writer threads. Used by tests that need to override
* starting the threads.
*/
protected void startWriterThreads() {for (WriterThread thread : writerThreads) {
thread.start();
}
} | 3.26 |
hbase_BucketCache_finalize_rdh | /**
* Needed mostly for UTs that might run in the same VM and create different BucketCache instances
* on different UT methods.
*/
@Override
protected void finalize() {
if ((cachePersister != null) && (!cachePersister.isInterrupted())) {
cachePersister.interrupt();
}
} | 3.26 |
hbase_BucketCache_remove_rdh | /**
* Defined an {@link Consumer} here, because once the removed entry release its reference count,
* then it's ByteBuffers may be recycled and accessing it outside this method will be thrown an
* exception. the consumer will access entry to remove before release its reference count.
* Notice, don't change its refe... | 3.26 |
hbase_BucketCache_freeBucketEntry_rdh | /**
* Free the {{@link BucketEntry} actually,which could only be invoked when the
* {@link BucketEntry#refCnt} becoming 0.
*/
void freeBucketEntry(BucketEntry bucketEntry) {
bucketAllocator.freeBlock(bucketEntry.offset(), bucketEntry.getLength());
realCacheSize.add((-1) * bucketEntry.getLength());
} | 3.26 |
hbase_BucketCache_getIOEngineFromName_rdh | /**
* Get the IOEngine from the IO engine name
*
* @return the IOEngine
*/
private IOEngine getIOEngineFromName(String ioEngineName, long capacity, String persistencePath) throws IOException {
if (ioEngineName.startsWith("file:") || ioEngineName.startsWith("files:")) {// In order to make the usage simple, we ... | 3.26 |
hbase_RpcClientFactory_createClient_rdh | /**
* Creates a new RpcClient by the class defined in the configuration or falls back to
* RpcClientImpl
*
* @param conf
* configuration
* @param clusterId
* the cluster id
* @param localAddr
* client socket bind address.
* @param metrics
* the connection metrics
* @return newly created RpcClient
*... | 3.26 |
hbase_RegionPlacementMaintainer_transform_rdh | /**
* Copy a given matrix into a new matrix, transforming each row index and each column index
* according to the randomization scheme that was created at construction time.
*
* @param matrix
* the cost matrix to transform
* @return a new matrix with row and column indices transformed
*/ public float[][] trans... | 3.26 |
hbase_RegionPlacementMaintainer_checkDifferencesWithOldPlan_rdh | /**
* Compares two plans and check whether the locality dropped or increased (prints the information
* as a string) also prints the baseline locality
*
* @param movesPerTable
* - how many primary regions will move per table
* @param regionLocalityMap
* - locality map from FS
* @param newPlan
* - new assi... | 3.26 |
hbase_RegionPlacementMaintainer_m0_rdh | /**
* Copy a given matrix into a new matrix, transforming each row index and each column index
* according to the inverse of the randomization scheme that was created at construction time.
*
* @param matrix
* the cost matrix to be inverted
* @return a new matrix with row and column indices inverted
*/
public f... | 3.26 |
hbase_RegionPlacementMaintainer_printAssignmentPlan_rdh | /**
* Print the assignment plan to the system output stream
*/
public static void printAssignmentPlan(FavoredNodesPlan plan) {
if (plan == null)
return;
LOG.info("========== Start to print the assignment plan ================");
// sort the map based on region info
Map<String, List<ServerName... | 3.26 |
hbase_RegionPlacementMaintainer_m1_rdh | /**
* Update the assignment plan into hbase:meta
*
* @param plan
* the assignments plan to be updated into hbase:meta
* @throws IOException
* if cannot update assignment plan in hbase:meta
*/
public void m1(FavoredNodesPlan plan) throws IOException {
try {
LOG.info("Start to update the hbase:met... | 3.26 |
hbase_RegionPlacementMaintainer_verifyRegionPlacement_rdh | /**
* Verify the region placement is consistent with the assignment plan
*/
public List<AssignmentVerificationReport> verifyRegionPlacement(boolean isDetailMode) throws IOException {
System.out.println("Start to verify the region assignment and " + "generate the verification report");
// Get the region assi... | 3.26 |
hbase_RegionPlacementMaintainer_getFavoredNodeList_rdh | /**
*
* @param favoredNodesStr
* The String of favored nodes
* @return the list of ServerName for the byte array of favored nodes.
*/
public static List<ServerName> getFavoredNodeList(String favoredNodesStr) {String[] v169 = StringUtils.split(favoredNodesStr, ",");
if (v169 ==
null)
return null... | 3.26 |
hbase_RegionPlacementMaintainer_invertIndices_rdh | /**
* Given an array where each element {@code indices[i]} represents the randomized column index
* corresponding to randomized row index {@code i}, create a new array with the corresponding
* inverted indices.
*
* @param indices
* an array of transformed indices to be inverted
* @return an array of inverted i... | 3.26 |
hbase_RegionPlacementMaintainer_getRegionsMovement_rdh | /**
* Return how many regions will move per table since their primary RS will change
*
* @param newPlan
* - new AssignmentPlan
* @return how many primaries will move per table
*/
public Map<TableName, Integer> getRegionsMovement(FavoredNodesPlan newPlan) throws IOException {
Map<TableName, Integer> v116 = n... | 3.26 |
hbase_RegionPlacementMaintainer_getRegionAssignmentSnapshot_rdh | /**
* Returns the new RegionAssignmentSnapshot
*/
public SnapshotOfRegionAssignmentFromMeta getRegionAssignmentSnapshot() throws IOException {
SnapshotOfRegionAssignmentFromMeta currentAssignmentShapshot = new SnapshotOfRegionAssignmentFromMeta(ConnectionFactory.createConnection(conf));
currentAssignmentShaps... | 3.26 |
hbase_RegionPlacementMaintainer_genAssignmentPlan_rdh | /**
* Generate the assignment plan for the existing table
*
* @param munkresForSecondaryAndTertiary
* if set on true the assignment plan for the tertiary and
* secondary will be generated with Munkres algorithm,
* otherwise will be generated using
* placeSecondaryAndTertiaryRS
*/
private void genAssignm... | 3.26 |
hbase_CompactionConfiguration_getThrottlePoint_rdh | /**
* Returns ThrottlePoint used for classifying small and large compactions
*/
public long getThrottlePoint() {
return throttlePoint;
} | 3.26 |
hbase_CompactionConfiguration_getMinCompactSize_rdh | /**
* Returns lower bound below which compaction is selected without ratio test
*/
public long getMinCompactSize() {
return minCompactSize;
} | 3.26 |
hbase_CompactionConfiguration_getMaxFilesToCompact_rdh | /**
* Returns upper bound on number of files to be included in minor compactions
*/
public int getMaxFilesToCompact() {
return maxFilesToCompact;
} | 3.26 |
hbase_CompactionConfiguration_getMaxCompactSize_rdh | /**
* Returns upper bound on file size to be included in minor compactions
*/
public long getMaxCompactSize() {
return maxCompactSize;
} | 3.26 |
hbase_CompactionConfiguration_getCompactionRatio_rdh | /**
* Returns Ratio used for compaction
*/
public double getCompactionRatio() {
return compactionRatio;
} | 3.26 |
hbase_CompactionConfiguration_setMinFilesToCompact_rdh | /**
* Set lower bound on number of files to be included in minor compactions
*
* @param threshold
* value to set to
*/
public void setMinFilesToCompact(int threshold) {
minFilesToCompact = threshold;
} | 3.26 |
hbase_CompactionConfiguration_getCompactionRatioOffPeak_rdh | /**
* Returns Off peak Ratio used for compaction
*/
public double getCompactionRatioOffPeak() {
return offPeakCompactionRatio;
} | 3.26 |
hbase_CompactionConfiguration_getMinFilesToCompact_rdh | /**
* Returns lower bound on number of files to be included in minor compactions
*/
public int getMinFilesToCompact() {
return minFilesToCompact;
} | 3.26 |
hbase_CellComparatorImpl_getCellComparator_rdh | /**
* Utility method that makes a guess at comparator to use based off passed tableName. Use in
* extreme when no comparator specified.
*
* @return CellComparator to use going off the {@code tableName} passed.
*/
public static CellComparator getCellComparator(byte[] tableName) {
// FYI, TableName.toBytes does ... | 3.26 |
hbase_CellComparatorImpl_m0_rdh | /**
* Compares the family and qualifier part of the cell
*
* @return 0 if both cells are equal, 1 if left cell is bigger than right, -1 otherwise
*/
public final int m0(final Cell left, final Cell right) {
int diff = compareFamilies(left, right);if (diff !=
0) {
return diff;
}
return compareQualifiers(left, rig... | 3.26 |
hbase_CellComparatorImpl_compareRows_rdh | /**
* Compares the rows of the left and right cell. For the hbase:meta case this method is overridden
* such that it can handle hbase:meta cells. The caller should ensure using the appropriate
* comparator for hbase:meta.
*
* @return 0 if both cells are equal, 1 if left cell is bigger than right, -1 otherwise
*/
... | 3.26 |
hbase_CellComparatorImpl_compareFamilies_rdh | /**
* This method will be overridden when we compare cells inner store to bypass family comparing.
*/
protected int compareFamilies(KeyValue left, int leftFamilyPosition, int leftFamilyLength, ByteBufferKeyValue right, int rightFamilyPosition, int rightFamilyLength) {
return
ByteBufferUtils.compareTo(left.getFamilyAr... | 3.26 |
hbase_TableHFileArchiveTracker_clearTables_rdh | /**
* Remove the currently archived tables.
* <p>
* Does some intelligent checking to make sure we don't prematurely create an archive tracker.
*/
private void clearTables() {
getMonitor().clearArchive();
} | 3.26 |
hbase_TableHFileArchiveTracker_keepHFiles_rdh | /**
* Determine if the given table should or should not allow its hfiles to be deleted
*
* @param tableName
* name of the table to check
* @return <tt>true</tt> if its store files should be ret... | 3.26 |
hbase_TableHFileArchiveTracker_safeStopTrackingTable_rdh | /**
* Stop tracking a table. Ensures that the table doesn't exist, but if it does, it attempts to add
* the table back via {@link #addAndReWatchTable(String)} - its a 'safe' removal.
*
* @param tableZnode
* full zookeeper path to the table to be added
* @throws KeeperException
* if an unexpected zk exception... | 3.26 |
hbase_TableHFileArchiveTracker_getMonitor_rdh | /**
* Returns the tracker for which tables should be archived.
*/
public final HFileArchiveTableMonitor getMonitor() {
return this.monitor;
} | 3.26 |
hbase_TableHFileArchiveTracker_start_rdh | /**
* Start monitoring for archive updates
*
* @throws KeeperException
* on failure to find/create nodes
*/
public void start() throws
KeeperException {
// if archiving is enabled, then read in the list of tables to archive
LOG.debug("Starting hfile archive tracker...");
this.checkEnabledAndUpdate();... | 3.26 |
hbase_TableHFileArchiveTracker_addAndReWatchTable_rdh | /**
* Add this table to the tracker and then read a watch on that node.
* <p>
* Handles situation where table is deleted in the time between the update and resetting the watch
* by deleting the table via {@link #safeStopTrackingTable(String)}
*
* @param tableZnode
* full zookeeper path to the table to be added... | 3.26 |
hbase_TableHFileArchiveTracker_updateWatchedTables_rdh | /**
* Read the list of children under the archive znode as table names and then sets those tables to
* the list of tables that we should archive
*
* @throws KeeperException
* if there is an unexpected zk exception
*/
private void updateWatchedTables() throws KeeperException {
// get the children and watch f... | 3.26 |
hbase_TableHFileArchiveTracker_stop_rdh | /**
* Stop this tracker and the passed zookeeper
*/
public void stop() {
if (this.stopped) {
return;
}
this.stopped = true;
this.watcher.close();
} | 3.26 |
hbase_Queue_add_rdh | // ======================================================================
// Functions to handle procedure queue
// ======================================================================
public void add(Procedure<?> proc,
boolean addToFront) {
if (addToFront) {
... | 3.26 |
hbase_Queue_compareKey_rdh | // ======================================================================
// Generic Helpers
// ======================================================================
public int compareKey(TKey cmpKey)
{
return key.compareTo(cmpKey);
} | 3.26 |
hbase_Increment_toString_rdh | /**
*/
@Override
public String toString() {
StringBuilder sb = new StringBuilder();
sb.append("row=");
sb.append(Bytes.toStringBinary(this.row));
if (this.familyMap.isEmpty()) {
sb.append(", no columns set to be incremented");
return sb.toString();
}
sb.append(", families=");
... | 3.26 |
hbase_Increment_setReturnResults_rdh | /**
*
* @param returnResults
* True (default) if the increment operation should return the results. A
* client that is not interested in the result can save network bandwidth
* setting this to false.
*/
@Override
public Increment setReturnResults(boolean returnResults) {
super.setReturnResults(returnRes... | 3.26 |
hbase_Increment_isReturnResults_rdh | /**
* Returns current setting for returnResults
*/
// This method makes public the superclasses's protected method.
@Override
public boolean isReturnResults() {
return super.isReturnResults();
} | 3.26 |
hbase_Increment_getTimeRange_rdh | /**
* Gets the TimeRange used for this increment.
*/
public TimeRange getTimeRange() {
return this.tr;
} | 3.26 |
hbase_Increment_add_rdh | /**
* Add the specified KeyValue to this operation.
*
* @param cell
* individual Cell
* @throws java.io.IOException
* e
*/
@Override
public Increment add(Cell cell) throws IOException { super.add(cell);
return this;
} | 3.26 |
hbase_Increment_getFamilyMapOfLongs_rdh | /**
* Before 0.95, when you called Increment#getFamilyMap(), you got back a map of families to a list
* of Longs. Now, {@link #getFamilyCellMap()} returns families by list of Cells. This method has
* been added so you can have the old behavior.
*
* @return Map of families to a Map of qualifiers and their Long incr... | 3.26 |
hbase_Increment_hasFamilies_rdh | /**
* Method for checking if any families have been inserted into this Increment
*
* @return true if familyMap is non empty false otherwise
*/
public boolean hasFamilies() {
return !this.familyMap.isEmpty();
} | 3.26 |
hbase_Increment_setTimeRange_rdh | /**
* Sets the TimeRange to be used on the Get for this increment.
* <p>
* This is useful for when you have counters that only last for specific periods of time (ie.
* counters that are partitioned by time). By setting the range of valid times for this increment,
* you can potentially gain some performance with a ... | 3.26 |
hbase_Increment_numFamilies_rdh | /**
* Method for retrieving the number of families to increment from
*
* @return number of families
*/
@Override
public int numFamilies() {
return this.familyMap.size();
} | 3.26 |
hbase_AsyncAggregationClient_sumByRegion_rdh | // the map key is the startRow of the region
private static <R, S, P extends Message, Q extends Message, T extends Message> CompletableFuture<NavigableMap<byte[], S>> sumByRegion(AsyncTable<?> table, ColumnInterpreter<R, S, P, Q, T> ci, Scan scan) {
CompletableFuture<NavigableMap<byte[], S>> future = new CompletableFut... | 3.26 |
hbase_InputStreamBlockDistribution_isStreamUnsupported_rdh | /**
* For tests only, returns whether the passed stream is supported
*/
@RestrictedApi(explanation = "Should only be called in tests", link = "", allowedOnPath = ".*/src/test/.*")
boolean isStreamUnsupported() {
return streamUnsupported;
} | 3.26 |
hbase_InputStreamBlockDistribution_isEnabled_rdh | /**
* True if we should derive StoreFile HDFSBlockDistribution from the underlying input stream
*/
public static boolean isEnabled(Configuration conf) {
return conf.getBoolean(HBASE_LOCALITY_INPUTSTREAM_DERIVE_ENABLED, DEFAULT_HBASE_LOCALITY_INPUTSTREAM_DERIVE_ENABLED);
} | 3.26 |
hbase_InputStreamBlockDistribution_getHDFSBlockDistribution_rdh | /**
* Get the HDFSBlocksDistribution derived from the StoreFile input stream, re-computing if cache
* is expired.
*/
public synchronized HDFSBlocksDistribution getHDFSBlockDistribution() {
if ((EnvironmentEdgeManager.currentTime() - lastCachedAt) > cachePeriodMs) {
try {
LOG.debug("Refreshing... | 3.26 |
hbase_InputStreamBlockDistribution_setLastCachedAt_rdh | /**
* For tests only, sets lastCachedAt so we can force a refresh
*/
@RestrictedApi(explanation = "Should only be called in tests", link = "", allowedOnPath = ".*/src/test/.*")
synchronized void setLastCachedAt(long timestamp) {
lastCachedAt = timestamp;
} | 3.26 |
hbase_InputStreamBlockDistribution_getCachePeriodMs_rdh | /**
* For tests only, returns the configured cache period
*/
@RestrictedApi(explanation = "Should only be called in tests", link = "", allowedOnPath = ".*/src/test/.*")
long getCachePeriodMs()
{
return cachePeriodMs;
} | 3.26 |
hbase_TablePermission_implies_rdh | /**
* Check if given action can performs on given table:family.
*
* @param table
* table name
* @param family
* family name
* @param action
* one of [Read, Write, Create, Exec, Admin]
* @return true if can, false otherwise
*/
public boolean implies(TableName table, byte[] family, Action action) {
if... | 3.26 |
hbase_TablePermission_tableFieldsEqual_rdh | /**
* Check if fields of table in table permission equals.
*
* @param tp
* to be checked table permission
* @return true if equals, false otherwise
*/
public boolean tableFieldsEqual(TablePermission tp) {
if (tp == null) {
return false;
} boolean tEq = ((table == null) && (tp.table == null)) ||
... | 3.26 |
hbase_LruCachedBlockQueue_heapSize_rdh | /**
* Total size of all elements in this queue.
*
* @return size of all elements currently in queue, in bytes
*/
@Override
public long heapSize() {
return heapSize;} | 3.26 |
hbase_LruCachedBlockQueue_poll_rdh | /**
* Returns The next element in this queue, or {@code null} if the queue is empty.
*/
public LruCachedBlock poll() {
return queue.poll();
} | 3.26 |
hbase_LruCachedBlockQueue_add_rdh | /**
* Attempt to add the specified cached block to this queue.
* <p>
* If the queue is smaller than the max size, or if the specified element is ordered before the
* smallest element in the queue, the element will be added to the queue. Otherwise, there is no
* side effect of this call.
*
* @param cb
* block ... | 3.26 |
hbase_LruCachedBlockQueue_pollLast_rdh | /**
* Returns The last element in this queue, or {@code null} if the queue is empty.
*/
public LruCachedBlock pollLast() {
return queue.pollLast();
} | 3.26 |
hbase_MetricsSource_setAgeOfLastShippedOpByTable_rdh | /**
* Set the age of the last edit that was shipped group by table
*
* @param timestamp
* write time of the edit
* @param tableName
* String as group and tableName
*/
public void setAgeOfLastShippedOpByTable(long timestamp, String tableName) {
long age = EnvironmentEdgeManager.currentTime() - timestamp;
... | 3.26 |
hbase_MetricsSource_setWALReaderEditsBufferUsage_rdh | /**
* Sets the amount of memory in bytes used in this RegionServer by edits pending replication.
*/
public void setWALReaderEditsBufferUsage(long
usageInBytes) {
globalSourceSource.setWALReaderEditsBufferBytes(usageInBytes);
} | 3.26 |
hbase_MetricsSource_incrementFailedBatches_rdh | /**
* Convenience method to update metrics when batch of operations has failed.
*/
public void incrementFailedBatches() {
singleSourceSource.incrFailedBatches();
globalSourceSource.incrFailedBatches();
} | 3.26 |
hbase_MetricsSource_shipBatch_rdh | /**
* Convience method to apply changes to metrics do to shipping a batch of logs.
*
* @param batchSize
* the size of the batch that was shipped to sinks.
* @param hfiles
* total number of hfiles shipped to sinks.
*/
public void shipBatch(long batchSize, int sizeInBytes, long hfiles) {
shipBatch(batchSiz... | 3.26 |
hbase_MetricsSource_m1_rdh | /* Sets the age of oldest log file just for source. */
public void m1(long age) {
singleSourceSource.setOldestWalAge(age);
} | 3.26 |
hbase_MetricsSource_incrSizeOfLogQueue_rdh | /**
* Increment size of the log queue.
*/
public void incrSizeOfLogQueue() {
singleSourceSource.incrSizeOfLogQueue(1);
globalSourceSource.incrSizeOfLogQueue(1);
} | 3.26 |
hbase_MetricsSource_refreshAgeOfLastShippedOp_rdh | /**
* Convenience method to use the last given timestamp to refresh the age of the last edit. Used
* when replication fails and need to keep that metric accurate.
*
* @param walGroupId
* id of the group to update
*/
public void refreshAgeOfLastShippedOp(String walGroupId) {
Long lastTimestamp = this.lastShi... | 3.26 |
hbase_MetricsSource_getWALReaderEditsBufferUsage_rdh | /**
* Returns the amount of memory in bytes used in this RegionServer by edits pending replication.
*/
public long getWALReaderEditsBufferUsage()
{
return globalSourceSource.getWALReaderEditsBufferBytes();
} | 3.26 |
hbase_MetricsSource_getAgeOfLastShippedOp_rdh | /**
* Get AgeOfLastShippedOp
*/
public Long getAgeOfLastShippedOp() {
return singleSourceSource.getLastShippedAge();
} | 3.26 |
hbase_MetricsSource_getUncleanlyClosedWALs_rdh | /**
* Get the value of uncleanlyClosedWAL counter
*/
public long getUncleanlyClosedWALs() {
return singleSourceSource.getUncleanlyClosedWALs();
} | 3.26 |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.