name stringlengths 12 178 | code_snippet stringlengths 8 36.5k | score float64 3.26 3.68 |
|---|---|---|
hbase_StripeStoreFileManager_rowEquals_rdh | /**
* Compare two keys for equality.
*/
private final boolean rowEquals(byte[] k1, byte[] k2) {
return Bytes.equals(k1, 0, k1.length, k2, 0, k2.length);
} | 3.26 |
hbase_StripeStoreFileManager_findStripeIndexByEndRow_rdh | /**
* Finds the stripe index by end row.
*/
private final int findStripeIndexByEndRow(byte[] endRow) {
assert !isInvalid(endRow);if (isOpen(endRow))
return state.f0.length;
return Arrays.binarySearch(state.f0, endRow, Bytes.BYTES_COMPARATOR);
} | 3.26 |
hbase_StripeStoreFileManager_processNewCandidateStripes_rdh | /**
* See {@link #addCompactionResults(Collection, Collection)} - updates the stripe list with new
* candidate stripes/removes old stripes; produces new set of stripe end rows.
*
* @param newStripes
* New stripes - files by end row.
*/
private void processNewCandidateStripes(TreeMap<byte[], HStoreFile> newStrip... | 3.26 |
hbase_StripeStoreFileManager_updateCandidateFilesForRowKeyBefore_rdh | /**
* See {@link StoreFileManager#getCandidateFilesForRowKeyBefore(KeyValue)} and
* {@link StoreFileManager#updateCandidateFilesForRowKeyBefore(Iterator, KeyValue, Cell)} for
* details on this methods.
*/
@Override public Iterator<HStoreFile> updateCandidateFilesForRowKeyBefore(Iterator<HStoreFile> candidateFiles, ... | 3.26 |
hbase_StripeStoreFileManager_isInvalid_rdh | /**
* Checks whether the key is invalid (e.g. from an L0 file, or non-stripe-compacted files).
*/
private static final boolean isInvalid(byte[] key) {
// No need to use Arrays.equals because INVALID_KEY is null
return key == INVALID_KEY;
} | 3.26 |
hbase_StripeStoreFileManager_getStripeFilesSize_rdh | /**
* Gets the total size of all files in the stripe.
*
* @param stripeIndex
* Stripe index.
* @return Size.
*/
private long
getStripeFilesSize(int stripeIndex) {
long result = 0;for (HStoreFile sf : state.stripeFiles.get(stripeIndex)) {
result += sf.getReader().length();
}
return result;
} | 3.26 |
hbase_StripeStoreFileManager_getStripeCopy_rdh | /**
*
* @param index
* Index of the stripe we need.
* @return A lazy stripe copy from current stripes.
*/
private final ArrayList<HStoreFile> getStripeCopy(int index) {
List<HStoreFile> stripeCopy = this.stripeFiles.get(index);
ArrayList<HStoreFile> v81 = null;
if (stripeCopy instanceof ImmutableList<?>) {
v81
... | 3.26 |
hbase_StripeStoreFileManager_nonOpenRowCompare_rdh | /**
* Compare two keys. Keys must not be open (isOpen(row) == false).
*/
private final int nonOpenRowCompare(byte[] k1, byte[] k2) {
assert (!isOpen(k1)) && (!isOpen(k2));
return Bytes.compareTo(k1, k2);
} | 3.26 |
hbase_StripeStoreFileManager_loadUnclassifiedStoreFiles_rdh | /**
* Loads initial store files that were picked up from some physical location pertaining to this
* store (presumably). Unlike adding files after compaction, assumes empty initial sets, and is
* forgiving with regard to stripe constraints - at worst, many/all files will go to level 0.
*
* @param storeFiles
* S... | 3.26 |
hbase_StripeStoreFileManager_getCandidateFilesForRowKeyBefore_rdh | /**
* See {@link StoreFileManager#getCandidateFilesForRowKeyBefore(KeyValue)} for details on this
* methods.
*/
@Override
public Iterator<HStoreFile> getCandidateFilesForRowKeyBefore(final KeyValue targetKey) {
KeyBeforeConcatenatedLists v3 = new KeyBeforeConcatenatedLists();
// Order matters for this call.
... | 3.26 |
hbase_StripeStoreFileManager_isOpen_rdh | /**
* Checks whether the key indicates an open interval boundary (i.e. infinity).
*/
private static final boolean isOpen(byte[] key) {
return (key != null) && (key.length == 0);} | 3.26 |
hbase_RawBytes_encode_rdh | /**
* Write {@code val} into {@code dst}, respecting {@code voff} and {@code vlen}.
*
* @param dst
* the {@link PositionedByteRange} to write to
* @param val
* the value to write to {@code dst}
* @param voff
* the offset in {@code dst} where to write {@code val} to
* @param vlen
* the length of {@code... | 3.26 |
hbase_RawBytes_decode_rdh | /**
* Read a {@code byte[]} from the buffer {@code src}.
*
* @param src
* the {@link PositionedByteRange} to read the {@code byte[]} from
* @param length
* the length to read from the buffer
* @return the {@code byte[]} read from the buffer
*/
public byte[] decode(PositionedByteRange src, int length) {
... | 3.26 |
hbase_RecordFilter_newBuilder_rdh | /* For FilterBuilder */
public static FilterBuilder newBuilder(Field field) {
return new FilterBuilder(field, false);
} | 3.26 |
hbase_RecordFilter_parse_rdh | /* Parse a filter string and build a RecordFilter instance. */
public static RecordFilter parse(String filterString, List<Field> fields, boolean ignoreCase) {
int index = 0;
boolean not = isNot(filterString);
if (not) {
index += 1;
}
StringBuilder fieldString ... | 3.26 |
hbase_FileChangeWatcher_setState_rdh | /**
* Sets the state to <code>newState</code>.
*
* @param newState
* the new state.
*/
private synchronized void setState(State newState) {
state = newState;
this.notifyAll();
} | 3.26 |
hbase_FileChangeWatcher_waitForState_rdh | /**
* Blocks until the current state becomes <code>desiredState</code>. Currently only used by tests,
* thus package-private.
*
* @param desiredState
* the desired state.
* @throws InterruptedException
* if the current thread gets interrupted.
*/
synchronized void waitForState(State desiredState) throws Int... | 3.26 |
hbase_FileChangeWatcher_stop_rdh | /**
* Tells the background thread to stop. Does not wait for it to exit.
*/
public void stop() {
if (compareAndSetState(new State[]{ State.RUNNING, State.STARTING }, State.STOPPING)) {
watcherThread.interrupt();
}
} | 3.26 |
hbase_FileChangeWatcher_getState_rdh | /**
* Returns the current {@link FileChangeWatcher.State}.
*
* @return the current state.
*/
public synchronized State getState() {
return state;
} | 3.26 |
hbase_FileChangeWatcher_start_rdh | /**
* Tells the background thread to start. Does not wait for it to be running. Calling this method
* more than once has no effect.
*/
public void start() {
if (!compareAndSetState(State.NEW, State.STARTING)) {
// If previous state was not NEW, start() has already been called.
return;
}
t... | 3.26 |
hbase_ValueFilter_parseFrom_rdh | /**
* Parse a serialized representation of {@link ValueFilter}
*
* @param pbBytes
* A pb serialized {@link ValueFilter} instance
* @return An instance of {@link ValueFilter} made from <code>bytes</code>
* @throws DeserializationException
* if an error occurred
* @see #toByteArray
*/
public static ValueFilt... | 3.26 |
hbase_ValueFilter_toByteArray_rdh | /**
* Returns The filter serialized using pb
*/
@Override
public byte[] toByteArray() {
FilterProtos.ValueFilter.Builder builder = FilterProtos.ValueFilter.newBuilder();
builder.setCompareFilter(super.convert());
return builder.build().toByteArray();
} | 3.26 |
hbase_ValueFilter_areSerializedFieldsEqual_rdh | /**
* Returns true if and only if the fields of the filter that are serialized are equal to the
* corresponding fields in other. Used for testing.
*/
@Override
boolean areSerializedFieldsEqual(Filter o) {
if (o == this) {
return true;
}
if (!(o instanceof ValueFilter)) {... | 3.26 |
hbase_Reference_createBottomReference_rdh | /**
* Returns A {@link Reference} that points at the bottom half of a an hfile
*/
public static Reference createBottomReference(final byte[] splitRow) {
return new Reference(splitRow, Range.bottom);
} | 3.26 |
hbase_Reference_toByteArray_rdh | /**
* Use this when writing to a stream and you want to use the pb mergeDelimitedFrom (w/o the
* delimiter, pb reads to EOF which may not be what you want).
*
* @return This instance serialized as a delimited protobuf w/ a magic pb prefix.
*/
byte[] toByteArray() throws IOException {
return ProtobufUtil.prepen... | 3.26 |
hbase_Reference_createTopReference_rdh | /**
* Returns A {@link Reference} that points at top half of a an hfile
*/
public static Reference createTopReference(final byte[] splitRow) {
return
new Reference(splitRow, Range.top);} | 3.26 |
hbase_Reference_getFileRegion_rdh | /**
*/
public Range getFileRegion() {
return this.region;
} | 3.26 |
hbase_Reference_read_rdh | /**
* Read a Reference from FileSystem.
*
* @return New Reference made from passed <code>p</code>
*/
public static Reference read(final FileSystem fs, final Path p) throws IOException {
InputStream in = fs.open(p);
try {
// I need to be able to move back in the stream if this is not a pb seriali... | 3.26 |
hbase_Reference_toString_rdh | /**
*
* @see java.lang.Object#toString()
*/
@Override
public String toString() {
return "" + this.region;
} | 3.26 |
hbase_Reference_getSplitKey_rdh | /**
*/
public byte[] getSplitKey() {
return f0;} | 3.26 |
hbase_TableRegionModel_toString_rdh | /* (non-Javadoc)
@see java.lang.Object#toString()
*/@Override
public String toString() {
StringBuilder sb = new StringBuilder();
sb.append(getName());
sb.append(" [\n id=");
sb.append(id);
sb.append("\n startKey='");
sb.append(Bytes.toString(startKey));
sb.append("'\n endKey='");
sb.... | 3.26 |
hbase_TableRegionModel_m0_rdh | /**
*
* @param startKey
* the start key
*/
public void m0(byte[] startKey) {
this.startKey = startKey;
} | 3.26 |
hbase_TableRegionModel_getEndKey_rdh | /**
* Returns the end key
*/
@XmlAttribute
public byte[] getEndKey() {
return endKey;
} | 3.26 |
hbase_TableRegionModel_setLocation_rdh | /**
*
* @param location
* the name and port of the region server hosting the region
*/
public void setLocation(String location) {
this.location = location;
} | 3.26 |
hbase_TableRegionModel_setId_rdh | /**
*
* @param id
* the region's encoded id
*/
public void setId(long id) {
this.id = id;
} | 3.26 |
hbase_TableRegionModel_getStartKey_rdh | /**
* Returns the start key
*/
@XmlAttribute
public byte[] getStartKey() {
return startKey;
} | 3.26 |
hbase_TableRegionModel_setName_rdh | /**
*
* @param name
* region printable name
*/
public void setName(String name) {
String split[] = name.split(",");
this.table = split[0];
this.startKey = Bytes.toBytes(split[1]);
String tail = split[2];split =
tail.split("\\.");
id = Long.parseLong(split[0]);
} | 3.26 |
hbase_TableRegionModel_getName_rdh | /**
* Returns the region name
*/
@XmlAttribute
public String
getName() { byte[] tableNameAsBytes = Bytes.toBytes(this.table);
TableName tableName = TableName.valueOf(tableNameAsBytes);
byte[]
nameAsBytes = RegionInfo.createRegionName(tableName, this.startKey, this.id, !tableName.isSystemTable());return B... | 3.26 |
hbase_TableRegionModel_getLocation_rdh | /**
* Returns the name and port of the region server hosting the region
*/
@XmlAttribute
public String getLocation() {
return location;
} | 3.26 |
hbase_TableRegionModel_setEndKey_rdh | /**
*
* @param endKey
* the end key
*/
public void setEndKey(byte[] endKey) {
this.endKey = endKey;
} | 3.26 |
hbase_TableRegionModel_getId_rdh | /**
* Returns the encoded region id
*/
@XmlAttribute
public long getId() {
return id;
} | 3.26 |
hbase_VisibilityClient_listLabels_rdh | /**
* Retrieve the list of visibility labels defined in the system.
*
* @param connection
* The Connection instance to use.
* @param regex
* The regular expression to filter which labels are returned.
* @return labels The list of visibility labels defined in the system.
*/
public static ListLabelsResponse l... | 3.26 |
hbase_VisibilityClient_getAuths_rdh | /**
* Get the authorization for a given user
*
* @param connection
* the Connection instance to use
* @param user
* the user
* @return labels the given user is globally authorized for
*/
public static GetAuthsResponse getAuths(Connection connection, final String user) throws Throwable {
try (Table tab... | 3.26 |
hbase_VisibilityClient_addLabels_rdh | /**
* Utility method for adding labels to the system.
*/
public static VisibilityLabelsResponse addLabels(Connection connection, final String[] labels) throws Throwable {
try (Table table = connection.getTable(LABELS_TABLE_NAME)) {
Batch.Call<VisibilityLabelsService,
VisibilityLabelsResponse> call... | 3.26 |
hbase_VisibilityClient_clearAuths_rdh | /**
* Removes given labels from user's globally authorized list of labels.
*/
public static VisibilityLabelsResponse clearAuths(Connection connection, final String[] auths, final String user) throws Throwable {
return setOrClearAuths(connection, auths, user, false);
} | 3.26 |
hbase_VisibilityClient_addLabel_rdh | /**
* Utility method for adding label to the system.
*/
public static VisibilityLabelsResponse addLabel(Connection connection, final String label) throws Throwable
{
return addLabels(connection, new String[]{ label });
} | 3.26 |
hbase_VisibilityClient_setAuths_rdh | /**
* Sets given labels globally authorized for the user.
*/
public static VisibilityLabelsResponse setAuths(Connection connection, final String[] auths, final String user) throws Throwable {
return setOrClearAuths(connection, auths, user, true);
} | 3.26 |
hbase_DeleteTableProcedure_cleanRegionsInMeta_rdh | /**
* There may be items for this table still up in hbase:meta in the case where the info:regioninfo
* column was empty because of some write error. Remove ALL rows from hbase:meta that have to do
* with this table.
* <p/>
* See HBASE-12980.
*/
private static void cleanRegionsInMeta(final MasterProcedureEnv env, ... | 3.26 |
hbase_TableName_createTableNameIfNecessary_rdh | /**
* Check that the object does not exist already. There are two reasons for creating the objects
* only once: 1) With 100K regions, the table names take ~20MB. 2) Equals becomes much faster as
* it's resolved with a reference and an int comparison.
*/
private static TableName createTableNameIfNecessary(ByteBuff... | 3.26 |
hbase_TableName_toBytes_rdh | /**
* Returns A pointer to TableName as String bytes.
*/public byte[] toBytes() {
return name;
} | 3.26 |
hbase_TableName_isLegalNamespaceName_rdh | /**
* Valid namespace characters are alphabetic characters, numbers, and underscores.
*/
public static void isLegalNamespaceName(final byte[] namespaceName, final int start, final int end) {
if ((end - start) < 1) {
throw new IllegalArgumentException("Namespace name must not be empty");
}
Stri... | 3.26 |
hbase_TableName_valueOf_rdh | /**
* Construct a TableName
*
* @throws IllegalArgumentException
* if fullName equals old root or old meta. Some code depends on
* this.
*/
public static TableName valueOf(String name) {
for (TableName tn : tableCache) {
if (name.equals(tn.getNameAsString())) {
return tn;
}
... | 3.26 |
hbase_TableName_isLegalFullyQualifiedTableName_rdh | /**
* Check passed byte array, "tableName", is legal user-space table name.
*
* @return Returns passed <code>tableName</code> param
* @throws IllegalArgumentException
* if passed a tableName is null or is made of other than 'word'
* characters or underscores: i.e.
* <code>[\p{IsAlphabetic}\p{Digit}.-:]</co... | 3.26 |
hbase_TableName_getADummyTableName_rdh | /**
* It is used to create table names for old META, and ROOT table. These tables are not really
* legal tables. They are not added into the cache.
*
* @return a dummy TableName instance (with no validation) for the passed qualifier
*/
private static TableName getADummyTableName(String qualifier) {
return new ... | 3.26 |
hbase_TableName_isLegalTableQualifierName_rdh | /**
* Qualifier names can only contain 'word' characters <code>[\p{IsAlphabetic}\p{Digit}]</code> or
* '_', '.' or '-'. The name may not start with '.' or '-'.
*
* @param qualifierName
* byte array containing the qualifier name
* @param start
* start index
* @param end
* end index (exclusive)
*/
public ... | 3.26 |
hbase_RawBytesFixedLength_encode_rdh | /**
* Write {@code val} into {@code buff}, respecting {@code offset} and {@code length}.
*/
public int encode(PositionedByteRange dst, byte[] val, int voff, int vlen) {
return ((RawBytes) (base)).encode(dst, val, voff, vlen);
} | 3.26 |
hbase_RawBytesFixedLength_decode_rdh | /**
* Read a {@code byte[]} from the buffer {@code src}.
*/
public byte[] decode(PositionedByteRange src, int length) {
return ((RawBytes) (base)).decode(src, length);
} | 3.26 |
hbase_RowResource_checkAndDelete_rdh | /**
* Validates the input request parameters, parses columns from CellSetModel, and invokes
* checkAndDelete on HTable.
*
* @param model
* instance of CellSetModel
* @retur... | 3.26 |
hbase_RowResource_increment_rdh | /**
* Validates the input request parameters, parses columns from CellSetModel, and invokes Increment
* on HTable.
*
* @param model
* instance of CellSetModel
* @return Response 200 OK, 304 Not modified, 400 Bad request
*/
Response increment(final CellSetModel model) {
Table table = null;
Increment inc... | 3.26 |
hbase_RowResource_append_rdh | /**
* Validates the input request parameters, parses columns from CellSetModel, and invokes Append on
* HTable.
*
* @param model
* instance of CellSetModel
* @return Response 200 OK, 304 Not modified, 400 Bad request
... | 3.26 |
hbase_RowResource_updateBinary_rdh | // This currently supports only update of one row at a time.
Response updateBinary(final byte[] message, final HttpHeaders headers, final boolean replace) {
servlet.getMetrics().incrementRequests(1);
if (servlet.isReadOnly()) {
servlet.getMetrics().incrementFailedPutRequests(1);
return Respo... | 3.26 |
hbase_RowResource_checkAndPut_rdh | /**
* Validates the input request parameters, parses columns from CellSetModel, and invokes
* checkAndPut on HTable.
*
* @param model
* instance of CellSetModel
* @return Response 200 OK, 304 Not modified, 400 Bad request
*/
Response
checkAndPut(final CellSetModel model) {
Table table = null;
try {
... | 3.26 |
hbase_CleanerChore_checkAndDeleteFiles_rdh | /**
* Run the given files through each of the cleaners to see if it should be deleted, deleting it if
* necessary.
*
* @param files
* List of FileStatus for the files to check (and possibly delete)
* @return true iff successfully deleted all files
*/private boolean checkAndDeleteFiles(List<FileStatus> files) {... | 3.26 |
hbase_CleanerChore_traverseAndDelete_rdh | /**
* Attempts to clean up a directory(its subdirectories, and files) in a
* {@link java.util.concurrent.ThreadPoolExecutor} concurrently. We can get the final result by
* calling result.get().
*/
private void traverseAndDelete(Path
dir, boolean root, CompletableFuture<Boolean> result) {
try {
// Step.1: List... | 3.26 |
hbase_CleanerChore_isEmptyDirDeletable_rdh | /**
* Check if a empty directory with no subdirs or subfiles can be deleted
*
* @param dir
* Path of the directory
* @return True if the directory can be deleted, otherwise false
*/
private boolean isEmptyDirDeletable(Path dir) {
for (T cleaner : cleanersChain) {
if (cleaner.isStopped() || this.getStopper().isS... | 3.26 |
hbase_CleanerChore_m0_rdh | /**
* Sort the given list in (descending) order of the space each element takes
*
* @param dirs
* the list to sort, element in it should be directory (not file)
*/
private void m0(List<FileStatus> dirs) {
if ((dirs == null) || (dirs.size() < 2))
{
// no need to sort for empty or single directory
return;}
dirs.s... | 3.26 |
hbase_CleanerChore_deleteAction_rdh | /**
* Perform a delete on a specified type.
*
* @param deletion
* a delete
* @param type
* possible values are 'files', 'subdirs', 'dirs'
* @return true if it deleted successfully, false otherwise
*/private boolean deleteAction(Action<Boolean> deletion, String type, Path dir) {
boolean deleted;
try { LOG.t... | 3.26 |
hbase_CleanerChore_calculatePoolSize_rdh | /**
* Calculate size for cleaner pool.
*
* @param poolSize
* size from configuration
* @return size of pool after calculation
*/static int calculatePoolSize(String poolSize) {
if (poolSize.matches("[1-9][0-9]*")) {
// If poolSize is an integer, return it directly,
// but upmost to the number... | 3.26 |
hbase_CleanerChore_triggerCleanerNow_rdh | /**
* Trigger the cleaner immediately and return a CompletableFuture for getting the result. Return
* {@code true} means all the old files have been deleted, otherwise {@code false}.
*/
public synchronized CompletableFuture<Boolean> triggerCleanerNow() throws InterruptedException {
for (; ;) {
if (f0 != null) {
retu... | 3.26 |
hbase_CleanerChore_deleteFiles_rdh | /**
* Delete the given files
*
* @param filesToDelete
* files to delete
* @return number of deleted files
*/
protected int deleteFiles(Iterable<FileStatus> filesToDelete) {
int deletedFileCount = 0;
for (FileStatus file : filesToDelete) {
Path filePath = file.getPath();
LOG.trace("Removing {} from archive", fil... | 3.26 |
hbase_CleanerChore_shouldExclude_rdh | /**
* Check if a path should not perform clear
*/
private boolean shouldExclude(FileStatus f) {
if (!f.isDirectory()) {
return false;
}
if ((excludeDirs != null) && (!excludeDirs.isEmpty())) {
for (String dirPart : excludeDirs) {
// since we make excludeDirs end with '/',
// if a path contains() the dirPart,... | 3.26 |
hbase_CleanerChore_initCleanerChain_rdh | /**
* Validate the file to see if it even belongs in the directory. If it is valid, then the file
* will go through the cleaner delegates, but otherwise the file is just deleted.
*
* @param file
* full {@link Path} of the file to be checked
* @return <tt>true</tt> if the file is valid, <tt>false</tt> otherwise
... | 3.26 |
hbase_CleanerChore_newFileCleaner_rdh | /**
* A utility method to create new instances of LogCleanerDelegate based on the class name of the
* LogCleanerDelegate.
*
* @param className
* fully qualified class name of the LogCleanerDelegate
* @param conf
* used configuration
* @return the new instance
*/
private T newFileCleaner(String className, C... | 3.26 |
hbase_CandidateGenerator_pickRandomRegion_rdh | /**
* From a list of regions pick a random one. Null can be returned which
* {@link StochasticLoadBalancer#balanceCluster(Map)} recognize as signal to try a region move
* rather than swap.
*
* @param cluster
* The state of the cluster
* @param server
* index of the server
* @param chanceOfNoSwap
* Chanc... | 3.26 |
hbase_Call_setTimeout_rdh | /**
* called from timeoutTask, prevent self cancel
*/
public void setTimeout(IOException error) {
synchronized(this) {
if (done) {
return;
}
this.done = true;
this.error = error;
}
callback.run(this);
} | 3.26 |
hbase_Call_setException_rdh | /**
* Set the exception when there is an error. Notify the caller the call is done.
*
* @param error
* exception thrown by the call; either local or remote
*/public
void setException(IOException error) {
synchronized(this) {
if (done) {return;
}
this.done = true;
this.error ... | 3.26 |
hbase_Call_m0_rdh | /**
* Set the return value when there is no error. Notify the caller the call is done.
*
* @param response
* return value of the call.
* @param cells
* Can be null
*/
public void m0(Message response, final CellScanner cells) {
synchronized(this) {
if (done) {
return;
}
... | 3.26 |
hbase_Call_toShortString_rdh | /**
* Builds a simplified {@link #toString()} that includes just the id and method name.
*/
public String toShortString() {
return new ToStringBuilder(this, ToStringStyle.SHORT_PREFIX_STYLE).append("id", id).append("methodName", md.getName()).toString();
} | 3.26 |
hbase_MetricsTableAggregateSourceImpl_getMetrics_rdh | /**
* Yes this is a get function that doesn't return anything. Thanks Hadoop for breaking all
* expectations of java programmers. Instead of returning anything Hadoop metrics expects
* getMetrics to push the metrics into the collector.
*
* @param collector
* the collector
* @param all
* get all the metrics ... | 3.26 |
hbase_RegionGroupingProvider_getStrategy_rdh | /**
* instantiate a strategy from a config property. requires conf to have already been set (as well
* as anything the provider might need to read).
*/
RegionGroupingStrategy getStrategy(final Configuration conf, final String key, final String defaultValue) throws IOException {
Class<? extends RegionGroupingStr... | 3.26 |
hbase_LoadBalancerFactory_getDefaultLoadBalancerClass_rdh | /**
* The default {@link LoadBalancer} class.
*
* @return The Class for the default {@link LoadBalancer}.
*/
public static Class<? extends LoadBalancer> getDefaultLoadBalancerClass() {
return StochasticLoadBalancer.class;} | 3.26 |
hbase_LoadBalancerFactory_getLoadBalancer_rdh | /**
* Create a loadbalancer from the given conf.
*
* @return A {@link LoadBalancer}
*/
public static LoadBalancer getLoadBalancer(Configuration conf) {// Create the balancer
Class<? extends LoadBalancer> v0 = conf.getClass(HConstants.HBASE_MASTER_LOADBALANCER_CLASS, getDefaultLoadBalancerClass(), LoadBalancer.... | 3.26 |
hbase_AbstractByteRange_deepCopyToNewArray_rdh | //
// methods for duplicating the current instance
//
@Override
public byte[] deepCopyToNewArray() {
byte[] result = new byte[f1];
System.arraycopy(bytes, f0, result, 0, f1);
return result;
} | 3.26 |
hbase_AbstractByteRange_getVLong_rdh | // Copied from com.google.protobuf.CodedInputStream v2.5.0 readRawVarint64
@Override
public long getVLong(int index) {
int shift = 0;long result = 0;
while (shift < 64) {
final byte b = get(index++);
result |= ((long) (b & 0x7f)) << shift;
if ((b & 0x80) == 0) {
break;
... | 3.26 |
hbase_AbstractByteRange_compareTo_rdh | /**
* Bitwise comparison of each byte in the array. Unsigned comparison, not paying attention to
* java's signed bytes.
*/
@Override
public int compareTo(ByteRange other) {
return Bytes.compareTo(bytes, f0, f1, other.getBytes(), other.getOffset(), other.getLength());
} | 3.26 |
hbase_AbstractByteRange_getBytes_rdh | //
// methods for managing the backing array and range viewport
//
@Override
public byte[] getBytes() {
return bytes;} | 3.26 |
hbase_AbstractByteRange_getVLongSize_rdh | // end of copied from protobuf
public static int getVLongSize(long val) {int rPos = 0;
while ((val & (~0x7f)) != 0) {
val >>>= 7;
rPos++;
}
return rPos + 1;
} | 3.26 |
hbase_AbstractByteRange_get_rdh | //
// methods for retrieving data
//
@Override
public byte get(int index) {
return
bytes[f0 + index];
} | 3.26 |
hbase_AbstractByteRange_isEmpty_rdh | /**
* Returns true when {@code range} is of zero length, false otherwise.
*/
public static boolean isEmpty(ByteRange range) {
return (range == null) || (range.getLength() == 0);
} | 3.26 |
hbase_AbstractByteRange_hashCode_rdh | //
// methods used for comparison
//
@Override
public int hashCode() {
if (isHashCached()) {
// hash is already calculated and cached
return hash;
}
if (this.isEmpty()) {// return 0 for empty ByteRange
hash = 0;
return hash;
}
int off = f0;
hash = 0;
for (... | 3.26 |
hbase_MemcachedBlockCache_evictBlocksByHfileName_rdh | /**
* This method does nothing so that memcached can handle all evictions.
*/
@Override
public int evictBlocksByHfileName(String hfileName) {
return 0;
} | 3.26 |
hbase_Log4jUtils_getMethod_rdh | // load class when calling to avoid introducing class not found exception on log4j when loading
// this class even without calling any of the methods below.
private static Method getMethod(String methodName, Class<?>... args) {
try
{
Class<?> clazz = Class.forName(INTERNAL_UTILS_CLASS_NAME);
re... | 3.26 |
hbase_Log4jUtils_enableDebug_rdh | /**
* Switches the logger for the given class to DEBUG level.
*
* @param clazz
* The class for which to switch to debug logging.
*/
public static void enableDebug(Class<?> clazz) {
setLogLevel(clazz.getName(), "DEBUG");
} | 3.26 |
hbase_Log4jUtils_disableZkAndClientLoggers_rdh | /**
* Disables Zk- and HBase client logging
*/
public static void disableZkAndClientLoggers() {
// disable zookeeper log to avoid it mess up command output
setLogLevel("org.apache.zookeeper", "OFF");
// disable hbase zookeeper tool log to avoid it mess up command output
setLogLevel("org.apache.hadoop.... | 3.26 |
hbase_FileIOEngine_read_rdh | /**
* Transfers data from file to the given byte buffer
*
* @param be
* an {@link BucketEntry} which maintains an (offset, len, refCnt)
* @return the {@link Cacheable} with block data inside.
* @throws IOException
* if any IO error happen.
*/
@Override
public Cacheable read(BucketEntry be) throws IOExceptio... | 3.26 |
hbase_FileIOEngine_shutdown_rdh | /**
* Close the file
*/
@Overridepublic void shutdown() {
for (int i = 0; i < filePaths.length; i++) {
try {
if (fileChannels[i] != null) {
fileChannels[i].close();
}
if (rafs[i] != null) {rafs[i].close();}
} catch (IOException ex) {
... | 3.26 |
hbase_FileIOEngine_getAbsoluteOffsetInFile_rdh | /**
* Get the absolute offset in given file with the relative global offset.
*
* @return the absolute offset
*/private long getAbsoluteOffsetInFile(int fileNum, long globalOffset) {
return globalOffset - (fileNum * sizePerFile);
} | 3.26 |
hbase_FileIOEngine_m0_rdh | /**
* Transfers data from the given byte buffer to file
*
* @param srcBuffer
* the given byte buffer from which bytes are to be read
* @param offset
* The offset in the file where the first byte to be written
*/
@Override
public void m0(ByteBuffer srcBuffer, long offset) throws IOException {
m0(ByteBuff... | 3.26 |
hbase_FileIOEngine_isPersistent_rdh | /**
* File IO engine is always able to support persistent storage for the cache
*/
@Override
public boolean isPersistent() {
return true;
} | 3.26 |
hbase_FileIOEngine_sync_rdh | /**
* Sync the data to file after writing
*/
@Override
public void sync() throws IOException {
for (int i = 0;
i < fileChannels.length; i++) {
try {
if (fileChannels[i] != null) {
fileChannels[i].force(true);
}
} catch (IOException ie) {
LOG... | 3.26 |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.