name stringlengths 12 178 | code_snippet stringlengths 8 36.5k | score float64 3.26 3.68 |
|---|---|---|
hbase_PrivateCellUtil_writeCellToBuffer_rdh | /**
* Writes a cell to the buffer at the given offset
*
* @param cell
* the cell to be written
* @param buf
* the buffer to which the cell has to be wrriten
* @param offset
* the offset at which the cell should be written
*/
public static void writeCellToBuffer(Cell cell, ByteBuffer buf, int offset) {
... | 3.26 |
hbase_PrivateCellUtil_getValueAsBigDecimal_rdh | /**
* Converts the value bytes of the given cell into a BigDecimal
*
* @return value as BigDecimal
*/
public static BigDecimal getValueAsBigDecimal(Cell cell) {
if (cell instanceof ByteBufferExtendedCell) {
return ByteBufferUtils.toBigDecimal(((ByteBufferExtendedCell) (cell)).getValueByteBuffer(), ((ByteBufferExten... | 3.26 |
hbase_PrivateCellUtil_compareQualifier_rdh | /**
* Compare cell's qualifier against given comparator
*
* @param cell
* the cell to use for comparison
* @param comparator
* the {@link CellComparator} to use for comparison
* @return result comparing cell's qualifier
*/
public static int compareQualifier(Cell cell, ByteArrayComparable
comparator) {
if (c... | 3.26 |
hbase_PrivateCellUtil_writeFlatKey_rdh | /**
* Writes the Cell's key part as it would have serialized in a KeyValue. The format is <2 bytes
* rk len><rk><1 byte cf len><cf><qualifier><8 bytes
* timestamp><1 byte type>
*/
public static void writeFlatKey(Cell cell, DataOutput out) throws IOException {
short rowL... | 3.26 |
hbase_PrivateCellUtil_isDelete_rdh | /**
* Return true if a delete type, a {@link KeyValue.Type#Delete} or a {KeyValue.Type#DeleteFamily}
* or a {@link KeyValue.Type#DeleteColumn} KeyValue type.
*/
public static boolean isDelete(final byte type) {
return (Type.Delete.getCode() <= type) && (type <= Type.DeleteFamily.getCode());
} | 3.26 |
hbase_PrivateCellUtil_writeTags_rdh | /**
* Writes the tag from the given cell to the output stream
*
* @param out
* The outputstream to which the data has to be written
* @param cell
* The cell whose contents has to be written
* @param tagsLength
* the tag length
*/
public static void writeTags(OutputStream out, Cell cell, int tagsLength) t... | 3.26 |
hbase_PrivateCellUtil_overlappingKeys_rdh | /**
* Returns true if the first range start1...end1 overlaps with the second range start2...end2,
* assuming the byte arrays represent row keys
*/
public static boolean overlappingKeys(final byte[] start1, final byte[] end1, final byte[] start2, final byte[] end2) {
return (((end2.length == 0) || (start1.length == 0... | 3.26 |
hbase_PrivateCellUtil_compareFamily_rdh | /**
* Compare cell's column family against given comparator
*
* @param cell
* the cell to use for comparison
* @param comparator
* the {@link CellComparator} to use for comparison
* @return result comparing cell's column family
*/
public static int compareFamily(Cell cell, ByteArrayComparable comparator) {... | 3.26 |
hbase_PrivateCellUtil_writeQualifierSkippingBytes_rdh | /**
* Writes the qualifier from the given cell to the output stream excluding the common prefix
*
* @param out
* The dataoutputstream to which the data has to be written
* @param cell
* The cell whose contents has to be written
* @param qlength
* the qualifier length
*/
public static void writeQualifierS... | 3.26 |
hbase_PrivateCellUtil_writeRowSkippingBytes_rdh | /**
* Writes the row from the given cell to the output stream excluding the common prefix
*
* @param out
* The dataoutputstream to which the data has to be written
* @param cell
* The cell whose contents has to be written
* @param rlength
* the row length
*/
public static void
writeRowSkippingBytes(DataO... | 3.26 |
hbase_PrivateCellUtil_writeRow_rdh | /**
* Writes the row from the given cell to the output stream
*
* @param out
* The outputstream to which the data has to be written
* @param cell
* The cell whose contents has to be written
* @param rlength
* the row length
*/
public static void writeRow(OutputStream out, Cell cell, short rlength) throws... | 3.26 |
hbase_PrivateCellUtil_isDeleteType_rdh | /**
* Returns True if this cell is a {@link KeyValue.Type#Delete} type.
*/
public static boolean isDeleteType(Cell cell) {
return cell.getTypeByte() == Type.Delete.getCode();
} | 3.26 |
hbase_PrivateCellUtil_getValueAsInt_rdh | /**
* Converts the value bytes of the given cell into a int value
*
* @return value as int
*/
public static int getValueAsInt(Cell cell) {if (cell instanceof ByteBufferExtendedCell) {
return ByteBufferUtils.toInt(((ByteBufferExtendedCell) (cell)).getValueByteBuffer(), ((ByteBufferExtendedCell) (cell)).getValuePosi... | 3.26 |
hbase_PrivateCellUtil_qualifierStartsWith_rdh | /**
* Finds if the start of the qualifier part of the Cell matches <code>buf</code>
*
* @param left
* the cell with which we need to match the qualifier
* @param startsWith
* the serialized keyvalue format byte[]
* @return true if the qualifier have same staring characters, false otherwise
*/
public static ... | 3.26 |
hbase_PrivateCellUtil_isDeleteColumnOrFamily_rdh | /**
* Returns True if this cell is a delete family or column type.
*/
public static boolean isDeleteColumnOrFamily(Cell cell) {
int
t = cell.getTypeByte();
return (t == Type.DeleteColumn.getCode()) || (t == Type.DeleteFamily.getCode());
} | 3.26 |
hbase_CellCounter_createSubmittableJob_rdh | /**
* Sets up the actual job.
*
* @param conf
* The current configuration.
* @param args
* The command line parameters.
* @return The newly created job.
* @throws IOException
* When setting up the job fails.
*/
public static Job createSubmittableJob(Configuration conf, String[] args) throws IOException ... | 3.26 |
hbase_AsyncRpcRetryingCaller_preProcessError_rdh | // Sub classes can override this method to change the error type, to control the retry logic.
// For example, during rolling upgrading, if we call this newly added method, we will get a
// UnsupportedOperationException(wrapped by a DNRIOE), and sometimes we may want to fallback to
// use the old method first, so the su... | 3.26 |
hbase_DefaultMemStore_snapshot_rdh | /**
* Creates a snapshot of the current memstore. Snapshot must be cleared by call to
* {@link #clearSnapshot(long)}
*/
@Override
public MemStoreSnapshot snapshot() {
// If snapshot currently has entries, then flusher failed or didn't call
// cleanup. Log a warning.
if (!this.snapshot.isEmpty()) {
... | 3.26 |
hbase_DefaultMemStore_getScanners_rdh | /**
* This method is protected under {@link HStore#lock} read lock. <br/>
* Scanners are ordered from 0 (oldest) to newest in increasing order.
*/
@Override
public List<KeyValueScanner> getScanners(long readPt) throws IOException {
List<KeyValueScanner> list = new ArrayList<>();
addToScanners(getActive(), re... | 3.26 |
hbase_DefaultMemStore_main_rdh | /**
* Code to help figure if our approximation of object heap sizes is close enough. See hbase-900.
* Fills memstores then waits so user can heap dump and bring up resultant hprof in something like
* jprofiler which allows you get 'deep size' on objects.
*
* @param args
* main args
*/
public static void main(S... | 3.26 |
hbase_DefaultMemStore_getNextRow_rdh | /**
*
* @param cell
* Find the row that comes after this one. If null, we return the first.
* @return Next row or null if none found.
*/
Cell getNextRow(final Cell cell) {
return getLowest(getNextRow(cell, this.getActive().getCellSet()), getNextRow(cell, this.snapshot.getCellSet()));
} | 3.26 |
hbase_SyncReplicationReplayWALManager_addUsedPeerWorker_rdh | /**
* Will only be called when loading procedures, where we need to construct the used worker set for
* each peer.
*/
public void addUsedPeerWorker(String peerId, ServerName worker) {
usedWorkersByPeer.get(peerId).used(worker);
} | 3.26 |
hbase_SyncReplicationReplayWALManager_acquirePeerWorker_rdh | /**
* Get a worker for replaying remote wal for a give peer. If no worker available, i.e, all the
* region servers have been used by others, a {@link ProcedureSuspendedException} will be thrown
* to suspend the procedure. And it will be woken up later when there are available workers,
* either by others release a w... | 3.26 |
hbase_MetaRegionLocationCache_loadMetaLocationsFromZk_rdh | /**
* Populates the current snapshot of meta locations from ZK. If no meta znodes exist, it registers
* a watcher on base znode to check for any CREATE/DELETE events on the children.
*
* @param retryCounter
* controls the number of retries and sleep between retries.
*/
private void loadMetaLocationsFromZk(Retry... | 3.26 |
hbase_MetaRegionLocationCache_isValidMetaPath_rdh | /**
* Helper to check if the given 'path' corresponds to a meta znode. This listener is only
* interested in changes to meta znodes.
*/
private boolean isValidMetaPath(String path) {
return watcher.getZNodePaths().isMetaZNodePath(path);
} | 3.26 |
hbase_MetaRegionLocationCache_getMetaRegionLocations_rdh | /**
* Returns Optional list of HRegionLocations for meta replica(s), null if the cache is empty.
*/
public List<HRegionLocation> getMetaRegionLocations() {
ConcurrentNavigableMap<Integer, HRegionLocation> snapshot ... | 3.26 |
hbase_MetaRegionLocationCache_getMetaRegionLocation_rdh | /**
* Gets the HRegionLocation for a given meta replica ID. Renews the watch on the znode for future
* updates.
*
* @param replicaId
* ReplicaID of the region.
* @return HRegionLocation for the meta replica.
* @throws KeeperException
* if there is any issue fetching/parsing the serialized data.
*/
private ... | 3.26 |
hbase_KeyValueUtil_previousKey_rdh | /**
* ************* next/previous *********************************
*/
/**
* Decrement the timestamp. For tests (currently wasteful) Remember timestamps are sorted reverse
* chronologically.
*
* @return previous key
*/
public static KeyValue previousKey(final KeyValue in) {
return createFirstOnRow(CellUtil.clon... | 3.26 |
hbase_KeyValueUtil_keyLength_rdh | /**
* Returns number of bytes this cell's key part would have been used if serialized as in
* {@link KeyValue}. Key includes rowkey, family, qualifier, timestamp and type.
*
* @return the key length
*/
public static int keyLength(final Cell cell) {
return keyLength(cell.getRowLength(), cell.getFamilyLength(), ... | 3.26 |
hbase_KeyValueUtil_appendTo_rdh | /**
* Copy the Cell content into the passed buf in KeyValue serialization format.
*/
public static int appendTo(Cell cell, ByteBuffer buf, int offset, boolean withTags) {
offset = ByteBufferUtils.putInt(buf, offset, keyLength(cell));// Key length
offset = ByteBufferUtils.putInt(buf, offset, cell.getValueLen... | 3.26 |
hbase_KeyValueUtil_m2_rdh | /**
* Write out a KeyValue in the manner in which we used to when KeyValue was a Writable.
*
* @return Length written on stream
* @see #create(DataInput) for the inverse function
*/
public static long m2(final KeyValue kv, final DataOutput out) throws IOException {
// This is how the old Writables write used to ... | 3.26 |
hbase_KeyValueUtil_appendToByteArray_rdh | /**
* ************** copy key and value ********************
*/
public static int appendToByteArray(Cell cell, byte[]
output, int offset, boolean withTags) {
int pos = offset;
pos = Bytes.putInt(output, pos,
keyLength(cell));
pos = Bytes.putInt(output, pos, cell.getValueLength());
pos = append... | 3.26 |
hbase_KeyValueUtil_createFirstOnRow_rdh | /**
* Create a KeyValue for the specified row, family and qualifier that would be smaller than all
* other possible KeyValues that have the same row, family, qualifier. Used for seeking.
*
* @param buffer
* the buffer to use for the new <code>KeyValue</code> object
* @param boffset
* buffer offset
* @param ... | 3.26 |
hbase_KeyValueUtil_copyToNewKeyValue_rdh | /**
* ************** copy the cell to create a new keyvalue ********************
*/
public static KeyValue copyToNewKeyValue(final Cell cell) {
byte[] bytes = copyToNewByteArray(cell);
KeyValue kvCell = new KeyValue(bytes, 0, bytes.length);
kvCell.setSequenceId(cell.getSequenceId());
return kvCell;
} | 3.26 |
hbase_KeyValueUtil_createLastOnRow_rdh | /**
* Creates a KeyValue that is last on the specified row id. That is, every other possible KeyValue
* for the given row would compareTo() less than the result of this call.
*
* @param row
* row key
* @return Last possible KeyValue on passed <code>row</code>
*/
public static KeyValue createLastOnRow(final by... | 3.26 |
hbase_KeyValueUtil_m0_rdh | /**
* ************** length ********************
*/
public static int m0(short rlen, byte flen, int qlen, int vlen, int tlen, boolean withTags) {
if (withTags) {
return ((int) (KeyValue.getKeyValueDataStructureSize(rlen, flen, qlen, vlen, tlen)));
}
return ((int) (KeyValue.getKeyValueDataStructur... | 3.26 |
hbase_KeyValueUtil_m1_rdh | /**
* Create a KeyValue for the specified row, family and qualifier that would be smaller than all
* other possible KeyValues that have the same row,family,qualifier. Used for seeking.
*
* @param row
* - row key (arbitrary byte array)
* @param family
* - family name
* @param qualifier
* - column qualifie... | 3.26 |
hbase_KeyValueUtil_copyKeyToNewByteBuffer_rdh | /**
* The position will be set to the beginning of the new ByteBuffer
*
* @return the Bytebuffer containing the key part of the cell
*/
public static ByteBuffer copyKeyToNewByteBuffer(final Cell cell) {
byte[]
bytes = new byte[keyLength(cell)];
appendKeyTo(cell, bytes, 0);
ByteBuffer buffer = ByteBu... | 3.26 |
hbase_KeyValueUtil_createKeyValueFromKey_rdh | /**
* Return a KeyValue made of a byte array that holds the key-only part. Needed to convert hfile
* index members to KeyValues.
*/
public static KeyValue createKeyValueFromKey(final byte[] b, final int o, final int l) {
byte[] newb = new byte[l + KeyValue.ROW_OFFSET];
System.arraycopy(b, o, newb, KeyValue.ROW_OFFSE... | 3.26 |
hbase_KeyValueUtil_nextShallowCopy_rdh | /**
* ************** iterating ******************************
*/
/**
* Creates a new KeyValue object positioned in the supplied ByteBuffer and sets the ByteBuffer's
* position to the start of the next KeyValue. Does not allocate a new array or copy data.
*/
public static KeyValue nextShallowCopy(final ByteBuffer b... | 3.26 |
hbase_CachedMobFile_getReferenceCount_rdh | /**
* Gets the reference of the current mob file. Internal usage, currently it's for testing.
*
* @return The reference of the current mob file.
*/
public long getReferenceCount()
{
return this.referenceCount.longValue();
} | 3.26 |
hbase_CachedMobFile_close_rdh | /**
* Decreases the reference of the underlying reader for the mob file. It's not thread-safe. Use
* MobFileCache.closeFile() instead. This underlying reader isn't closed until the reference is 0.
*/
@Override
public void close() throws IOException {
long refs = referenceCount.decrementAndGet();
if (refs ==... | 3.26 |
hbase_CachedMobFile_open_rdh | /**
* Opens the mob file if it's not opened yet and increases the reference. It's not thread-safe.
* Use MobFileCache.openFile() instead. The reader of the mob file is just opened when it's not
* opened no matter how many times this open() method is invoked. The reference is a counter that
* how many times this rea... | 3.26 |
hbase_IncrementalBackupManager_getLogFilesForNewBackup_rdh | /**
* For each region server: get all log files newer than the last timestamps but not newer than the
* newest timestamps.
*
* @param olderTimestamps
* the timestamp for each region server of the last backup.
* @param newestTimestamps
* the timestamp for each region server tha... | 3.26 |
hbase_IncrementalBackupManager_getIncrBackupLogFileMap_rdh | /**
* Obtain the list of logs that need to be copied out for this incremental backup. The list is set
* in BackupInfo.
*
* @return The new HashMap of RS log time stamps after the log roll for this incremental backup.
* @throws IOException
* exception
*/public Map<String, Long> getIncrBackupLogFileMap() throws ... | 3.26 |
hbase_AsyncRegionLocationCache_getAll_rdh | /**
* Returns all cached RegionLocations
*/
public Collection<RegionLocations> getAll() {return Collections.unmodifiableCollection(cache.values());
} | 3.26 |
hbase_AsyncRegionLocationCache_cleanProblematicOverlappedRegions_rdh | /**
* When caching a location, the region may have been the result of a merge. Check to see if the
* region's boundaries overlap any other cached locations in a problematic way. Those would have
* been merge parents which no longer exist. We need to proactively clear them out to avoid a case
* where a merged region... | 3.26 |
hbase_AsyncRegionLocationCache_findForRow_rdh | /**
* Finds the RegionLocations for the region with the greatest startKey less than or equal to the
* given row
*
* @param row
* row to find locations
*/
public RegionLocations findForRow(byte[] row, int replicaId) {
Map.Entry<byte[], RegionLocations> entry = cache.floorEntry(row);
if (entry == null)... | 3.26 |
hbase_AsyncRegionLocationCache_size_rdh | /**
* Returns the size of the region locations cache
*/
public int size() {
return cache.size();
} | 3.26 |
hbase_AsyncRegionLocationCache_add_rdh | /**
* Add the given locations to the cache, merging with existing if necessary. Also cleans out any
* previously cached locations which may have been superseded by this one (i.e. in case of merged
* regions). See {@link #cleanProblematicOverlappedRegions(RegionLocations)}
*
* @param locs
* the locations to cach... | 3.26 |
hbase_AsyncRegionLocationCache_removeForServer_rdh | /**
* Removes serverName from all locations in the cache, fully removing any RegionLocations which
* are empty after removing the server from it.
*
* @param serverName
* server to remove from locations
*/public synchronized void removeForServer(ServerName serverName) {
for (Map.Entry<byte[], RegionLocation... | 3.26 |
hbase_AsyncRegionLocationCache_get_rdh | /**
* Gets the RegionLocations for a given region's startKey. This is a direct lookup, if the key
* does not exist in the cache it will return null.
*
* @param startKey
* region start key to directly look up
*/
public RegionLocations get(byte[] startKey) {
return cache.get(startKey);
} | 3.26 |
hbase_AsyncRegionLocationCache_findForBeforeRow_rdh | /**
* Finds the RegionLocations for the region with the greatest startKey strictly less than the
* given row
*
* @param row
* row to find locations
*/
public RegionLocations findForBeforeRow(byte[] row, int replicaId) {
boolean isEmptyStopRow = isEmptyStopRow(row);
Map.Entry<byte[], RegionLocations> ent... | 3.26 |
hbase_AsyncRegionLocationCache_remove_rdh | /**
* Removes the location from the cache if it exists and can be removed.
*
* @return true if entry was removed
*/
public synchronized boolean remove(HRegionLocation loc) {
byte[] startKey = loc.getRegion().getStartKey();
RegionLocations oldLocs = cache.get(startKey);
if (oldLocs == null) {
ret... | 3.26 |
hbase_Superusers_isSuperUser_rdh | /**
* Check if the current user is a super user
*
* @return true if current user is a super user, false otherwise.
* @param user
* to check
*/
public static boolean isSuperUser(String user) {
return superUsers.contains(user) || superGroups.contains(user);
} | 3.26 |
hbase_Superusers_initialize_rdh | /**
* Should be called only once to pre-load list of super users and super groups from Configuration.
* This operation is idempotent.
*
* @param conf
* configuration to load users from
* @throws IOException
* if unable to initialize lists of superusers or super groups
* @throws IllegalStateException
* if... | 3.26 |
hbase_CombinedBlockCache_getFullyCachedFiles_rdh | /**
* Returns the list of fully cached files
*/
@Override
public Optional<Map<String, Pair<String, Long>>> getFullyCachedFiles() {
return this.l2Cache.getFullyCachedFiles();
} | 3.26 |
hbase_ThriftMetrics_exception_rdh | /**
* Increment the count for a specific exception type. This is called for each exception type that
* is returned to the thrift handler.
*
* @param rawThrowable
* type of exception
*/
public void exception(Throwable rawThrowable) {
source.exception();
Throwable throwable = unwrap(rawThrowable);
/**... | 3.26 |
hbase_Operation_toJSON_rdh | /**
* Produces a JSON object sufficient for description of a query in a debugging or logging context.
*
* @return the produced JSON object, as a string
*/
public String toJSON() throws IOException {
return toJSON(DEFAULT_MAX_COLS);} | 3.26 |
hbase_Operation_toMap_rdh | /**
* Produces a Map containing a full summary of a query.
*
* @return a map containing parameters of a query (i.e. rows, columns...)
*/
public Map<String, Object> toMap() {
return toMap(DEFAULT_MAX_COLS);
} | 3.26 |
hbase_Operation_toString_rdh | /**
* Produces a string representation of this Operation. It defaults to a JSON representation, but
* falls back to a string representation of the fingerprint and details in the case of a JSON
* encoding failure.
*/
@Override
public String toString() {
return toString(DEFAULT_MAX_COLS);
} | 3.26 |
hbase_MetricsREST_incrementSucessfulGetRequests_rdh | /**
*
* @param inc
* How much to add to sucessfulGetCount.
*/
public void incrementSucessfulGetRequests(final int inc) {
source.incrementSucessfulGetRequests(inc);
} | 3.26 |
hbase_MetricsREST_incrementSucessfulAppendRequests_rdh | /**
*
* @param inc
* How much to add to sucessfulAppendCount.
*/
public synchronized void incrementSucessfulAppendRequests(final int inc) {
source.incrementSucessfulAppendRequests(inc);
} | 3.26 |
hbase_MetricsREST_incrementSucessfulIncrementRequests_rdh | /**
*
* @param inc
* How much to add to sucessfulIncrementCount.
*/
public synchronized void incrementSucessfulIncrementRequests(final int inc) {
source.incrementSucessfulIncrementRequests(inc);
} | 3.26 |
hbase_MetricsREST_incrementFailedDeleteRequests_rdh | /**
*
* @param inc
* How much to add to failedDeleteCount.
*/
public void incrementFailedDeleteRequests(final int inc) {
source.incrementFailedDeleteRequests(inc);
} | 3.26 |
hbase_MetricsREST_incrementFailedPutRequests_rdh | /**
*
* @param inc
* How much to add to failedPutCount.
*/
public void incrementFailedPutRequests(final int inc) {
source.incrementFailedPutRequests(inc);
} | 3.26 |
hbase_MetricsREST_incrementFailedIncrementRequests_rdh | /**
*
* @param inc
* How much to add to failedIncrementCount.
*/
public void incrementFailedIncrementRequests(final int inc) {
source.incrementFailedIncrementRequests(inc);} | 3.26 |
hbase_MetricsREST_incrementFailedAppendRequests_rdh | /**
*
* @param inc
* How much to add to failedAppendCount.
*/
public void incrementFailedAppendRequests(final int inc) {
source.incrementFailedAppendRequests(inc); } | 3.26 |
hbase_MetricsREST_incrementSucessfulPutRequests_rdh | /**
*
* @param inc
* How much to add to sucessfulPutCount.
*/
public void incrementSucessfulPutRequests(final int inc) {source.incrementSucessfulPutRequests(inc);
} | 3.26 |
hbase_MetricsREST_incrementSucessfulScanRequests_rdh | /**
*
* @param inc
* How much to add to sucessfulScanCount.
*/
public synchronized void incrementSucessfulScanRequests(final int inc) {
source.incrementSucessfulScanRequests(inc);
} | 3.26 |
hbase_MetricsREST_incrementFailedGetRequests_rdh | /**
*
* @param inc
* How much to add to failedGetCount.
*/
public void incrementFailedGetRequests(final int inc) {
source.incrementFailedGetRequests(inc);
} | 3.26 |
hbase_MetricsREST_incrementFailedScanRequests_rdh | /**
*
* @param inc
* How much to add to failedScanCount.
*/
public void incrementFailedScanRequests(final int inc) {
source.incrementFailedScanRequests(inc);
} | 3.26 |
hbase_MetricsREST_incrementRequests_rdh | /**
*
* @param inc
* How much to add to requests.
*/
public void incrementRequests(final int inc) {
source.incrementRequests(inc);
} | 3.26 |
hbase_MetricsREST_incrementSucessfulDeleteRequests_rdh | /**
*
* @param inc
* How much to add to sucessfulDeleteCount.
*/
public void incrementSucessfulDeleteRequests(final int inc) {
source.incrementSucessfulDeleteRequests(inc);
} | 3.26 |
hbase_MemStoreSizing_decMemStoreSize_rdh | /**
* Returns The new dataSize ONLY as a convenience
*/
default long decMemStoreSize(long dataSizeDelta, long heapSizeDelta, long offHeapSizeDelta, int cellsCountDelta) {
return incMemStoreSize(-dataSizeDelta, -heapSizeDelta, -offHeapSizeDelta, -cellsCountDelta);
} | 3.26 |
hbase_QuotaUtil_enableTableIfNotEnabled_rdh | /**
* Method to enable a table, if not already enabled. This method suppresses
* {@link TableNotDisabledException} and {@link TableNotFoundException}, if thrown while enabling
* the table.
*
* @param conn
* connection to re-use
* @param tableName
* name of the table to be enabled
*/
public static void enab... | 3.26 |
hbase_QuotaUtil_isQuotaEnabled_rdh | /**
* Returns true if the support for quota is enabled
*/
public static boolean isQuotaEnabled(final Configuration conf) {
return conf.getBoolean(QUOTA_CONF_KEY,
QUOTA_ENABLED_DEFAULT);
} | 3.26 |
hbase_QuotaUtil_disableTableIfNotDisabled_rdh | /**
* Method to disable a table, if not already disabled. This method suppresses
* {@link TableNotEnabledException}, if thrown while disabling the table.
*
* @param conn
* connection to re-use
* @param tableName
* table name which has moved into space quota violation
*/public static void disableTableIfNotDi... | 3.26 |
hbase_QuotaUtil_doPut_rdh | /* ========================================================================= HTable helpers */
private static void doPut(final Connection connection, final Put put) throws IOException {
try (Table table = connection.getTable(QuotaUtil.QUOTA_TABLE_NAME)) {
table.put(put);
}
} | 3.26 |
hbase_QuotaUtil_calculateMutationSize_rdh | /* ========================================================================= Data Size Helpers */
public static long calculateMutationSize(final Mutation mutation) {
long size = 0;
for (Map.Entry<byte[], List<Cell>> entry : mutation.getFamilyCellMap().entrySet()) {
for (Cell cell : entry.getValue()) {
... | 3.26 |
hbase_QuotaUtil_updateClusterQuotaToMachineQuota_rdh | /**
* Convert cluster scope quota to machine scope quota
*
* @param quotas
* the original quota
* @param factor
* factor used to divide cluster limiter to machine limiter
* @return the converted quota whose quota limiters all in machine scope
*/
private static Quotas updateClusterQuotaToMachineQuota(Quotas ... | 3.26 |
hbase_ConfigurationManager_notifyAllObservers_rdh | /**
* The conf object has been repopulated from disk, and we have to notify all the observers that
* are expressed interest to do that.
*/
public void notifyAllObservers(Configuration conf) {
LOG.info("Starting to notify all observers that config changed.");
synchronized(configurationObservers) {
for... | 3.26 |
hbase_ConfigurationManager_containsObserver_rdh | /**
* Returns true if contains the observer, for unit test only
*/
public boolean containsObserver(ConfigurationObserver observer) {
synchronized(configurationObservers) {
return configurationObservers.contains(observer);
}} | 3.26 |
hbase_ConfigurationManager_deregisterObserver_rdh | /**
* Deregister an observer class
*
* @param observer
* to be deregistered.
*/
public void deregisterObserver(ConfigurationObserver observer) {synchronized(configurationObservers)
{
configurationObservers.remove(observer);
if (observer instanceof PropagatingConfigurationObserver) {
... | 3.26 |
hbase_ConfigurationManager_registerObserver_rdh | /**
* Register an observer class
*
* @param observer
* observer to be registered.
*/ public void registerObserver(ConfigurationObserver observer) {
synchronized(configurationObservers) {
configurationObservers.add(observer);
if (observer instanceof PropagatingConfigurationObserver) {
... | 3.26 |
hbase_ConfigurationManager_getNumObservers_rdh | /**
* Returns the number of observers.
*/
public int getNumObservers() {
synchronized(configurationObservers)
{
return configurationObservers.size();
} } | 3.26 |
hbase_CloseChecker_isSizeLimit_rdh | /**
* Check periodically to see if a system stop is requested every written bytes reach size limit.
*
* @return if true, system stop.
*/
public boolean isSizeLimit(Store store, long bytesWritten) {
if (closeCheckSizeLimit <= 0) {
return false;
}
bytesWrittenProgressForCloseCheck += bytesWritten... | 3.26 |
hbase_CloseChecker_isTimeLimit_rdh | /**
* Check periodically to see if a system stop is requested every time.
*
* @return if true, system stop.
*/
public boolean isTimeLimit(Store store, long now) {
if (closeCheckTimeLimit <= 0) {
return false;
}
final long elapsedMillis = now -
lastCloseCheckMillis;
if (elapsedMillis <=... | 3.26 |
hbase_MultiTableSnapshotInputFormatImpl_setInput_rdh | /**
* Configure conf to read from snapshotScans, with snapshots restored to a subdirectory of
* restoreDir.
* <p/>
* Sets: {@link #RESTORE_DIRS_KEY}, {@link #SNAPSHOT_TO_SCANS_KEY}
*/
public void setInput(Configuration conf, Map<String, Collection<Scan>> snapshotScans, Path restoreDir) throws IOException {
Pat... | 3.26 |
hbase_MultiTableSnapshotInputFormatImpl_getSnapshotDirs_rdh | /**
* Retrieve the directories into which snapshots have been restored from
* ({@link #RESTORE_DIRS_KEY})
*
* @param conf
* Configuration to extract restore directories from
* @return the directories into which snapshots have been restored from
*/
public Map<String, Path> getSnapshotDirs(Configuration conf) th... | 3.26 |
hbase_MultiTableSnapshotInputFormatImpl_setSnapshotToScans_rdh | /**
* Push snapshotScans to conf (under the key {@link #SNAPSHOT_TO_SCANS_KEY})
*/
public void setSnapshotToScans(Configuration conf, Map<String, Collection<Scan>> snapshotScans) throws IOException {
// flatten out snapshotScans for serialization to the job conf
List<Map.Entry<String, String>> snapshotToSeria... | 3.26 |
hbase_MultiTableSnapshotInputFormatImpl_getSplits_rdh | /**
* Return the list of splits extracted from the scans/snapshots pushed to conf by
* {@link #setInput(Configuration, Map, Path)}
*
* @param conf
* Configuration to determine splits from
* @return Return the list of splits extracted from the scans/snapshots pushed to conf
*/
... | 3.26 |
hbase_MultiTableSnapshotInputFormatImpl_restoreSnapshots_rdh | /**
* Restore each (snapshot name, restore directory) pair in snapshotToDir
*
* @param conf
* configuration to restore with
* @param snapshotToDir
* mapping from snapshot names to restore directories
* @param fs
* filesystem to do snapshot restoration on
*/
public void restoreSnapshots(Configuration con... | 3.26 |
hbase_MultiTableSnapshotInputFormatImpl_getSnapshotsToScans_rdh | /**
* Retrieve the snapshot name -> list<scan> mapping pushed to configuration by
* {@link #setSnapshotToScans(Configuration, Map)}
*
* @param conf
* Configuration to extract name -> list<scan> mappings from.
* @return the snapshot name -> list<scan> mapping pushed to configuration
*... | 3.26 |
hbase_MultiTableSnapshotInputFormatImpl_generateSnapshotToRestoreDirMapping_rdh | /**
* Generate a random path underneath baseRestoreDir for each snapshot in snapshots and return a
* map from the snapshot to the restore directory.
*
* @param snapshots
* collection of snapshot names to restore
* @param baseRestoreDir
* base directory under which all snapshots in snapshots will be restored
... | 3.26 |
hbase_MemStoreCompactor_start_rdh | /**
* ---------------------------------------------------------------------- The request to dispatch
* the compaction asynchronous task. The method returns true if compaction was successfully
* dispatched, or false if there is already an ongoing compaction or no segments to compact.
*/
public boolean start() throws... | 3.26 |
hbase_MemStoreCompactor_stop_rdh | /**
* ---------------------------------------------------------------------- The request to cancel
* the compaction asynchronous task The compaction may still happen if the request was sent too
* late Non-blocking request
*/
public void stop() {
isInterrupted.compareAndSet(false, true);
} | 3.26 |
hbase_MemStoreCompactor_releaseResources_rdh | /**
* ---------------------------------------------------------------------- Reset the interruption
* indicator and clear the pointers in order to allow good garbage collection
*/
private void releaseResources() {
isInterrupted.set(false);
versionedList = null;
} | 3.26 |
hbase_MemStoreCompactor_doCompaction_rdh | /**
* ---------------------------------------------------------------------- The worker thread
* performs the compaction asynchronously. The solo (per compactor) thread only reads the
* compaction pipeline. There is at most one thread per memstore instance.
*/
private void doCompaction() {
ImmutableSegment re... | 3.26 |
hbase_MemStoreCompactor_createSubstitution_rdh | /**
* ---------------------------------------------------------------------- Creation of the
* ImmutableSegment either by merge or copy-compact of the segments of the pipeline, based on the
* Compactor Iterator. The new ImmutableSegment is returned.
*/
private ImmutableSegment createSubstitution(MemStoreCompaction... | 3.26 |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.