name stringlengths 12 178 | code_snippet stringlengths 8 36.5k | score float64 3.26 3.68 |
|---|---|---|
hbase_RestCsrfPreventionFilter_isBrowser_rdh | /**
* This method interrogates the User-Agent String and returns whether it refers to a browser. If
* its not a browser, then the requirement for the CSRF header will not be enforced; if it is a
* browser, the requirement will be enforced.
* <p>
* A User-Agent String is considered to be a browser if it matches any... | 3.26 |
hbase_CoprocessorHost_getCoprocessors_rdh | /**
* Used to create a parameter to the HServerLoad constructor so that HServerLoad can provide
* information about the coprocessors loaded by this regionserver. (HBASE-4070: Improve region
* server metrics to report loaded coprocessors to master).
*/
public Set<String> getCoprocessors() {
Set<String> returnVal... | 3.26 |
hbase_CoprocessorHost_findCoprocessor_rdh | /**
* Find coprocessors by full class name or simple name.
*/
public C
findCoprocessor(String
className) {
for (E env : coprocEnvironments) {
if (env.getInstance().getClass().getName().equals(className) || env.getInstance().getClass().getSimpleName().equals(className)) {
return env.getInstan... | 3.26 |
hbase_CoprocessorHost_findCoprocessors_rdh | /**
* Find list of coprocessors that extend/implement the given class/interface
*
* @param cls
* the class/interface to look for
* @return the list of coprocessors, or null if not found
*/
public <T extends C> List<T> findCoprocessors(Class<T> cls) {
ArrayList<T> ret = new ArrayList<>();
for (E env : ... | 3.26 |
hbase_CoprocessorHost_findCoprocessorEnvironment_rdh | /**
* Find a coprocessor environment by class name
*
* @param className
* the class name
* @return the coprocessor, or null if not found
*/
public E findCoprocessorEnvironment(String className) {
for (E env : coprocEnvironments) {
if (env.getInstance().getClass().getName().equals(className) || env.... | 3.26 |
hbase_CoprocessorHost_execOperationWithResult_rdh | // Functions to execute observer hooks and handle results (if any)
// ////////////////////////////////////////////////////////////////////////////////////////
/**
* Do not call with an observerOperation that is null! Have the caller check.
*/
protected <O, R> R execOperationWithResult(final ObserverOperationWithResul... | 3.26 |
hbase_CoprocessorHost_getExternalClassLoaders_rdh | /**
* Retrieves the set of classloaders used to instantiate Coprocessor classes defined in external
* jar files.
*
* @return A set of ClassLoader instances
*/
Set<ClassLoader> getExternalClassLoaders() {
Set<ClassLoader> externalClassLoaders = new HashSet<>();
final ClassLoader systemClassLoader = this.get... | 3.26 |
hbase_CoprocessorHost_handleCoprocessorThrowable_rdh | /**
* This is used by coprocessor hooks which are declared to throw IOException (or its subtypes).
* For such hooks, we should handle throwable objects depending on the Throwable's type. Those
* which are instances of IOException should be passed on to the client. This... | 3.26 |
hbase_CoprocessorHost_execShutdown_rdh | /**
* Coprocessor classes can be configured in any order, based on that priority is set and chained
* in a sorted order. Should be used preStop*() hooks i.e. when master/regionserver is going down.
* This function first calls coprocessor methods (using ObserverOperatio... | 3.26 |
hbase_CoprocessorHost_callObserver_rdh | /**
* In case of coprocessors which have many kinds of observers (for eg, {@link RegionCoprocessor}
* has BulkLoadObserver, RegionObserver, etc), some implementations may not need all observers,
* in which case they will return null for that observer's getter. We simply ignore such cases.
*/
@Override
void callObse... | 3.26 |
hbase_CoprocessorHost_loadSystemCoprocessors_rdh | /**
* Load system coprocessors once only. Read the class names from configuration. Called by
* constructor.
*/protected void loadSystemCoprocessors(Configuration conf, String confKey) {
boolean coprocessorsEnabled = conf.getBoolean(COPROCESSORS_ENABLED_CONF_KEY, DEFAULT_COPROCESSORS_ENABLED);
if (!coprocess... | 3.26 |
hbase_CoprocessorHost_checkAndLoadInstance_rdh | /**
*
* @param implClass
* Implementation class
* @param priority
* priority
* @param conf
* configuration
* @throws java.io.IOException
* Exception
*/
public E checkAndLoadInstance(Class<?> implClass, int priority, Configuration conf) throws IOException {
// create the instance
C impl;
t... | 3.26 |
hbase_CoprocessorDescriptorBuilder_of_rdh | /**
* Used to build the {@link CoprocessorDescriptor}
*/
@InterfaceAudience.Publicpublic final class CoprocessorDescriptorBuilder {
public static CoprocessorDescriptor of(String className) {
return new CoprocessorDescriptorBuilder(className).build();
} | 3.26 |
hbase_HFileBlockIndex_getNonRootSize_rdh | /**
* Returns the size of this chunk if stored in the non-root index block format
*/
@Override
public int getNonRootSize() {return (Bytes.SIZEOF_INT// Number of entries
+
(Bytes.SIZEOF_INT * (blockKeys.size() + 1)))// Secondary index
+ curTotalNonRootEntrySize;// All entries
} | 3.26 |
hbase_HFileBlockIndex_getEntryBySubEntry_rdh | /**
* Finds the entry corresponding to the deeper-level index block containing the given
* deeper-level entry (a "sub-entry"), assuming a global 0-based ordering of sub-entries.
* <p>
* <i> Implementation note. </i> We are looking for i such that numSubEntriesAt[i - 1] <= k <
* numSubEntriesAt[i], because a deeper... | 3.26 |
hbase_HFileBlockIndex_getNumLevels_rdh | /**
* Returns the number of levels in this block index.
*/public int getNumLevels() {
return numLevels;
} | 3.26 |
hbase_HFileBlockIndex_addEntry_rdh | /**
* Add one index entry to the current leaf-level block. When the leaf-level block gets large
* enough, it will be flushed to disk as an inline block.
*
* @param firstKey
* the first key of the data block
* @param blockOffset
* the offset of the data block
* @param blockDataSize
* the on-disk size of t... | 3.26 |
hbase_HFileBlockIndex_add_rdh | /**
* The same as {@link #add(byte[], long, int, long)} but does not take the key/value into
* account. Used for single-level indexes.
*
* @see #add(byte[], long, int, long)
*/
@Override
public void add(byte[] firstKey, long blockOffset, int onDiskDataSize) {
add(firstKey,
blockOffset, onDiskDataSize, -1);... | 3.26 |
hbase_HFileBlockIndex_shouldWriteBlock_rdh | /**
* Whether there is an inline block ready to be written. In general, we write an leaf-level
* index block as an inline block as soon as its size as serialized in the non-root format
* reaches a certain threshold.
*/
@Override
public boolean shouldWriteBlock(boolean closing) {
if (singleLevelOnly) {throw new ... | 3.26 |
hbase_HFileBlockIndex_readRootIndex_rdh | /**
* Read in the root-level index from the given input stream. Must match what was written into
* the root level by {@link BlockIndexWriter#writeIndexBlocks(FSDataOutputStream)} at the offset
* that function returned.
*
* @param blk
* the HFile block
* @param numEntries
* the number of root-level index ent... | 3.26 |
hbase_HFileBlockIndex_m2_rdh | /**
* Read in the root-level index from the given input stream. Must match what was written into
* the root level by {@link BlockIndexWriter#writeIndexBlocks(FSDataOutputStream)} at the offset
* that function returned.
*
* @param in
* the buffered input stream or wrapped byte input stream
* @param numEntries
... | 3.26 |
hbase_HFileBlockIndex_getTotalUncompressedSize_rdh | /**
* The total uncompressed size of the root index block, intermediate-level index blocks, and
* leaf-level index blocks.
*
* @return the total uncompressed size of all index blocks
*/
public long getTotalUncompressedSize() {
return totalBlockUncompressedSize;
} | 3.26 |
hbase_HFileBlockIndex_ensureSingleLevel_rdh | /**
*
* @throws IOException
* if we happened to write a multi-level index.
*/
public void ensureSingleLevel() throws IOException {
if (numLevels > 1) {
throw new IOException((((("Wrote a "... | 3.26 |
hbase_HFileBlockIndex_getRootBlockCount_rdh | /**
* Returns the number of root-level blocks in this block index
*/
public int getRootBlockCount() {
return rootCount;
} | 3.26 |
hbase_HFileBlockIndex_writeInlineBlock_rdh | /**
* Write out the current inline index block. Inline blocks are non-root blocks, so the non-root
* index format is used.
*/ @Override
public void writeInlineBlock(DataOutput out) throws IOException {
if (singleLevelOnly)
throw new UnsupportedOperationException(INLINE_BLOCKS_NOT_ALLOWED);
// Write ... | 3.26 |
hbase_HFileBlockIndex_m3_rdh | /**
* Writes the root level and intermediate levels of the block index into the output stream,
* generating the tree from bottom up. Assumes that the leaf level has been inline-written to
* the disk if there is enough dat... | 3.26 |
hbase_HFileBlockIndex_blockWritten_rdh | /**
* Called after an inline block has been written so that we can add an entry referring to that
* block to the parent-level index.
*/
@Override
public void blockWritten(long offset, int onDiskSize, int uncompressedSize) {
// Add leaf index block size
totalBlockOnDiskSize += onDiskSize;
totalBlockUnco... | 3.26 |
hbase_HFileBlockIndex_getNonRootIndexedKey_rdh | /**
* The indexed key at the ith position in the nonRootIndex. The position starts at 0.
*
* @param i
* the ith position
* @return The indexed key at the ith position in the nonRootIndex.
*/
static byte[] getNonRootIndexedKey(ByteBuff nonRootIndex, int i) {
int v26 = nonRootIndex.getInt(0);
if ((i < 0) ... | 3.26 |
hbase_HFileBlockIndex_getRootSize_rdh | /**
* Returns the size of this chunk if stored in the root index block format
*/
@Override
public int getRootSize() {
return curTotalRootSize;
} | 3.26 |
hbase_HFileBlockIndex_getNumEntries_rdh | /**
* Returns the number of entries in this block index chunk
*/
public int getNumEntries() {
return blockKeys.size();
} | 3.26 |
hbase_HFileBlockIndex_getRootBlockKey_rdh | /**
* from 0 to {@link #getRootBlockCount() - 1}
*/
public Cell getRootBlockKey(int i) {
return seeker.getRootBlockKey(i);
} | 3.26 |
hbase_HFileBlockIndex_getNumRootEntries_rdh | /**
* Returns how many block index entries there are in the root level
*/
public final int getNumRootEntries() {
return rootChunk.getNumEntries();} | 3.26 |
hbase_HFileBlockIndex_writeSingleLevelIndex_rdh | /**
* Writes the block index data as a single level only. Does not do any block framing.
*
* @param out
* the buffered output stream to write the index to. Typically a stream
* writing into an {@link HFile} block.
* @param description
* a short description of the index being written. Used in a log message.... | 3.26 |
hbase_HFileBlockIndex_seekToDataBlock_rdh | /**
* Return the data block which contains this key. This function will only be called when the
* HFile version is larger than 1.
*
* @param key
* the key we are looking for
* @param currentBlock
* the current block, to avoid re-reading the same block
* @param expectedDataBlockEncoding
* the data block e... | 3.26 |
hbase_HFileBlockIndex_getRootBlockOffset_rdh | /**
*
* @param i
* from 0 to {@link #getRootBlockCount() - 1}
*/
public long getRootBlockOffset(int i) {
return blockOffsets[i];
} | 3.26 |
hbase_HFileBlockIndex_ensureNonEmpty_rdh | /**
* Returns true if the block index is empty.
*/ public abstract boolean isEmpty();
/**
* Verifies that the block index is non-empty and throws an {@link IllegalStateException}
* otherwise.
*/
public void ensureNonEmpty() {
if (isEmpty()) {
throw new IllegalStateException("Block index is empty or ... | 3.26 |
hbase_HFileBlockIndex_getMidKeyMetadata_rdh | /**
* Used when writing the root block index of a multi-level block index. Serializes additional
* information allowing to efficiently identify the mid-key.
*
* @return a few serialized fields for finding the mid-key
* @throws IOException
* if could not create metadata for computing mid-key
*/
@Override
public... | 3.26 |
hbase_BalanceResponse_isBalancerRan_rdh | /**
* Returns true if the balancer ran, otherwise false. The balancer may not run for a variety of
* reasons, such as: another balance is running, there are regions in transition, the cluster is
* in maintenance mode, etc.
*/
public boolean isBalancerRan() {
return balancerRan;
} | 3.26 |
hbase_BalanceResponse_newBuilder_rdh | /**
* Creates a new {@link BalanceResponse.Builder}
*/
public static Builder newBuilder() {
return new Builder();
} | 3.26 |
hbase_BalanceResponse_getMovesExecuted_rdh | /**
* The number of moves actually executed by the balancer if it ran. This will be zero if
* {@link #getMovesCalculated()} is zero or if {@link BalanceRequest#isDryRun()} was true. It may
* also not be equal to {@link #getMovesCalculated()} if the balancer was interrupted midway
* through executing the moves due t... | 3.26 |
hbase_BalanceResponse_build_rdh | /**
* Build the {@link BalanceResponse}
*/public BalanceResponse build() {
return new BalanceResponse(balancerRan, movesCalculated, movesExecuted);
} | 3.26 |
hbase_BalanceResponse_setBalancerRan_rdh | /**
* Set true if the balancer ran, otherwise false. The balancer may not run in some
* circumstances, such as if a balance is already running or there are regions already in
* transition.
*
* @param balancerRan
* true if balancer ran, false otherwise
*/
public Builder setBalancerRan(boolean balancerRan) {
... | 3.26 |
hbase_BalanceResponse_setMovesCalculated_rdh | /**
* Set how many moves were calculated by the balancer. This will be zero if the cluster is
* already balanced.
*
* @param movesCalculated
* moves calculated by the balance run
*/
public Builder setMovesCalculated(int movesCalculated) {this.movesCalculated = movesCalculated;
return this;
} | 3.26 |
hbase_BalanceResponse_setMovesExecuted_rdh | /**
* Set how many of the calculated moves were actually executed by the balancer. This should be
* zero if the balancer is run with {@link BalanceRequest#isDryRun()}. It may also not equal
* movesCalculated if the balancer ran out of time while executing the moves.
*
* @param movesExecuted
* moves executed by ... | 3.26 |
hbase_BalanceResponse_getMovesCalculated_rdh | /**
* The number of moves calculated by the balancer if {@link #isBalancerRan()} is true. This will
* be zero if no better balance could be found.
*/
public int getMovesCalculated() {
return movesCalculated;
} | 3.26 |
hbase_FilterList_getOperator_rdh | /**
* Get the operator.
*/
public Operator getOperator()
{
return operator;} | 3.26 |
hbase_FilterList_addFilter_rdh | /**
* Add a filter.
*
* @param filter
* another filter
*/
public void addFilter(Filter filter) {
addFilter(Collections.singletonList(filter));
} | 3.26 |
hbase_FilterList_toByteArray_rdh | /**
* Returns The filter serialized using pb
*/
@Override
public byte[] toByteArray() throws IOException {
FilterProtos.FilterList.Builder builder = FilterProtos.FilterList.newBuilder();
builder.setOperator(FilterList.Operator.valueOf(operator.name()));
ArrayList<Filter> filters = filterListBase.getFilter... | 3.26 |
hbase_FilterList_parseFrom_rdh | /**
* Parse a seralized representation of {@link FilterList}
*
* @param pbBytes
* A pb serialized {@link FilterList} instance
* @return An instance of {@link FilterList} made from <code>bytes</code>
* @throws DeserializationException
* if an error occurred
* @see #toByteArray
*/
public static FilterList pa... | 3.26 |
hbase_FilterList_getFilters_rdh | /**
* Get the filters.
*/
public List<Filter> getFilters() {
return filterListBase.getFilters();
} | 3.26 |
hbase_FilterList_filterRowCells_rdh | /**
* Filters that never filter by modifying the returned List of Cells can inherit this
* implementation that does nothing. {@inheritDoc }
*/
@Override
public void filterRowCells(List<Cell> cells) throws IOException {
filterListBase.filterRowCells(cells);
} | 3.26 |
hbase_FilterList_areSerializedFieldsEqual_rdh | /**
* Returns true if and only if the fields of the filter that are serialized are equal to the
* corresponding fields in other. Used for testing.
*/
@Override
boolean areSerializedFieldsEqual(Filter
other) {
if (other == this) {
return true;
}
if (!(other instanceof FilterList)) {
return... | 3.26 |
hbase_ReportMakingVisitor_isTableDisabled_rdh | /**
* Returns True if table is disabled or disabling; defaults false!
*/
boolean isTableDisabled(RegionInfo ri) {
if (ri == null) {
return false;
}
if (this.services == null) {
return false;
}
if (this.services.getTableStateManager() == null) {
return false;
}
Ta... | 3.26 |
hbase_ReportMakingVisitor_getReport_rdh | /**
* Do not call until after {@link #close()}. Will throw a {@link RuntimeException} if you do.
*/
CatalogJanitorReport getReport() {
if (!this.closed) {
throw new RuntimeException("Report not ready until after close()");
}
return
this.report;
} | 3.26 |
hbase_ReportMakingVisitor_isTableTransition_rdh | /**
* Returns True iff first row in hbase:meta or if we've broached a new table in hbase:meta
*/
private boolean isTableTransition(RegionInfo ri) {
return (this.previous == null) || (!this.previous.getTable().equals(ri.getTable()));
} | 3.26 |
hbase_ReportMakingVisitor_checkServer_rdh | /**
* Run through referenced servers and save off unknown and the dead.
*/
private void checkServer(RegionLocations locations) {
if (this.services == null) {
// Can't do this test if no services.
return;
... | 3.26 |
hbase_GlobalMetricRegistriesAdapter_init_rdh | /**
* Make sure that this global MetricSource for hbase-metrics module based metrics are initialized.
* This should be called only once.
*/
public static GlobalMetricRegistriesAdapter init() {
return new GlobalMetricRegistriesAdapter();
} | 3.26 |
hbase_HBaseSnapshotException_getSnapshotDescription_rdh | /**
* Returns the description of the snapshot that is being failed
*/
public SnapshotDescription getSnapshotDescription() {
return this.description;
} | 3.26 |
hbase_TableSplit_readFields_rdh | /**
* Reads the values of each field.
*
* @param in
* The input to read from.
* @throws IOException
* When reading the input fails.
*/
@Override
public void readFields(DataInput in) throws IOException {Version version = Version.UNVERSIONED;
// TableSplit was not versioned in the beginning.
// In order to in... | 3.26 |
hbase_TableSplit_getEncodedRegionName_rdh | /**
* Returns the region's encoded name.
*
* @return The region's encoded name.
*/
public String getEncodedRegionName() {
return encodedRegionName;
} | 3.26 |
hbase_TableSplit_getLength_rdh | /**
* Returns the length of the split.
*
* @return The length of the split.
* @see org.apache.hadoop.mapreduce.InputSplit#getLength()
*/
@Override
public long getLength() {
return length;
} | 3.26 |
hbase_TableSplit_write_rdh | /**
* Writes the field values to the output.
*
* @param out
* The output to write to.
* @throws IOException
* When writing the values to the output fails.
*/
@Override
public void write(DataOutput out) throws IOException {
WritableUtils.writeVInt(out, VERSION.code);
Bytes.writeByteArray(out, tableName.getNam... | 3.26 |
hbase_TableSplit_getEndRow_rdh | /**
* Returns the end row.
*
* @return The end row.
*/
public byte[] getEndRow() {
return endRow;
} | 3.26 |
hbase_TableSplit_toString_rdh | /**
* Returns the details about this instance as a string.
*
* @return The values of this instance as a string.
* @see java.lang.Object#toString()
*/
@Override
public String toString() {StringBuilder sb = new StringBuilder();
sb.append("Split(");
sb.append("tablename=").append(tableName);
// null scan input is rep... | 3.26 |
hbase_TableSplit_getStartRow_rdh | /**
* Returns the start row.
*
* @return The start row.
*/
public byte[] getStartRow() {
return startRow;
} | 3.26 |
hbase_TableSplit_compareTo_rdh | /**
* Compares this split against the given one.
*
* @param split
* The split to compare to.
* @return The result of the comparison.
* @see java.lang.Comparable#compareTo(java.lang.Object)
*/
@Override
public int compareTo(TableSplit split) {
// If The table name of the two splits is the same then compare sta... | 3.26 |
hbase_TableSplit_getLocations_rdh | /**
* Returns the region's location as an array.
*
* @return The array containing the region location.
* @see org.apache.hadoop.mapreduce.InputSplit#getLocations()
*/
@Override
public String[] getLocations() {return new
String[]{ regionLocation };
} | 3.26 |
hbase_TableSplit_getScan_rdh | /**
* Returns a Scan object from the stored string representation.
*
* @return Returns a Scan object based on the stored scanner.
* @throws IOException
* throws IOException if deserialization fails
*/
public Scan getScan() throws IOException {
return TableMapReduceUtil.convertStringToScan(this.scan);
} | 3.26 |
hbase_TableSplit_getRegionLocation_rdh | /**
* Returns the region location.
*
* @return The region's location.
*/
public String getRegionLocation() {
return regionLocation;
} | 3.26 |
hbase_TableSplit_getTableName_rdh | /**
* Returns the table name converted to a byte array.
*
* @see #getTable()
* @return The table name.
*/
public byte[] getTableName() {
return tableName.getName();
} | 3.26 |
hbase_TableSplit_getTable_rdh | /**
* Returns the table name.
*
* @return The table name.
*/
public TableName getTable() {
// It is ugly that usually to get a TableName, the method is called getTableName. We can't do
// that in here though because there was an existing getTableName in place already since
// deprecated.
return tableName;
} | 3.26 |
hbase_FSTableDescriptors_createTableDescriptor_rdh | /**
* Create new TableDescriptor in HDFS. Happens when we are creating table. If forceCreation is
* true then even if previous table descriptor is present it will be overwritten
*
* @return True if we successfully created file.
*/
public boolean createTableDescriptor(TableDescriptor htd, boolean forceCreation) thr... | 3.26 |
hbase_FSTableDescriptors_m2_rdh | /**
* Deletes files matching the table info file pattern within the given directory whose sequenceId
* is at most the given max sequenceId.
*/
private static void m2(FileSystem fs, Path dir, int maxSequenceId) throws IOException {
FileStatus[] status = CommonFSUtils.listStatus(fs, dir, T... | 3.26 |
hbase_FSTableDescriptors_getTableDir_rdh | /**
* Return the table directory in HDFS
*/
private Path getTableDir(TableName tableName) {
return CommonFSUtils.getTableDir(rootdir, tableName);
} | 3.26 |
hbase_FSTableDescriptors_remove_rdh | /**
* Removes the table descriptor from the local cache and returns it. If not in read only mode, it
* also deletes the entire table directory(!) from the FileSystem.
*/
@Override
public TableDescriptor remove(final TableName tablename) throws IOException {
if (fsreadonly) {
throw new NotImplementedExcep... | 3.26 |
hbase_FSTableDescriptors_getAll_rdh | /**
* Returns a map from table name to table descriptor for all tables.
*/
@Overridepublic Map<String, TableDescriptor> getAll() throws
IOException {
Map<String, TableDescriptor> tds = new ConcurrentSkipListMap<>();
if (fsvisited) {
for (Map.Entry<TableName, TableDescriptor> entry : this.f1.entrySet()) {
... | 3.26 |
hbase_FSTableDescriptors_getTableInfoFileName_rdh | /**
* Returns Name of tableinfo file.
*/
@RestrictedApi(explanation = "Should only be called in tests or self", link = "", allowedOnPath = ".*/src/test/.*|.*/FSTableDescriptors\\.java")
static String getTableInfoFileName(int sequenceId, byte[] content) {
return (((TABLEINFO_FILE_PREFIX + ".") + formatTableInfoS... | 3.26 |
hbase_FSTableDescriptors_get_rdh | /**
* Get the current table descriptor for the given table, or null if none exists.
* <p/>
* Uses a local cache of the descriptor but still checks the filesystem on each call if
* {@link #fsvisited} is not {@code true}, i.e, we haven't done a full scan yet, to see if a newer
* file has been created since the cache... | 3.26 |
hbase_FSTableDescriptors_getTableDescriptorFromFs_rdh | /**
* Returns the latest table descriptor for the table located at the given directory directly from
* the file system if it exists.
*/
public static TableDescriptor getTableDescriptorFromFs(FileSystem fs,
Path tableDir) throws
IOException {
return getTableDescriptorFromFs(fs, tableDir, true).map(Pair::getSec... | 3.26 |
hbase_FSTableDescriptors_writeTableDescriptor_rdh | /**
* Attempts to write a new table descriptor to the given table's directory. It begins at the
* currentSequenceId + 1 and tries 10 times to find a new sequence number not already in use.
* <p/>
* Removes the current descriptor file if passed in.
*
* @return Descriptor file or null if we failed write.
*/
privat... | 3.26 |
hbase_FSTableDescriptors_m0_rdh | /**
* Returns the current sequence id and file length or 0 if none found.
*
* @param p
* Path to a <code>.tableinfo</code> file.
*/
@RestrictedApi(explanation = "Should only be called in tests or self", link = "", allowedOnPath = ".*/src/test/.*|.*/FSTableDescriptors\\.java")
static SequenceIdAndFileLength m0(P... | 3.26 |
hbase_FSTableDescriptors_getByNamespace_rdh | /**
* Find descriptors by namespace.
*
* @see #get(org.apache.hadoop.hbase.TableName)
*/
@Override
public Map<String, TableDescriptor> getByNamespace(String name) throws IOException {
Map<String, TableDescriptor> htds = new TreeMap<>();
List<Path> tableDirs = FSUtils.getLocalTableDirs(fs, CommonFSUtils.get... | 3.26 |
hbase_ProcedureUtil_convertToProcedure_rdh | /**
* Helper to convert the protobuf procedure.
* <p/>
* Used by ProcedureStore implementations.
* <p/>
* TODO: OPTIMIZATION: some of the field never change during the execution (e.g. className,
* procId, parentId, ...). We can split in 'data' and 'state', and the store may take advantage of
* it by storing the ... | 3.26 |
hbase_ProcedureUtil_convertToProtoResourceType_rdh | // ==========================================================================
// convert from LockedResource object
// ==========================================================================
public static LockedResourceType convertToProtoResourceType(LockedResourceType resourceType) {
return LockServiceProtos.Locked... | 3.26 |
hbase_ProcedureUtil_createRetryCounter_rdh | /**
* Get a retry counter for getting the backoff time. We will use the
* {@link ExponentialBackoffPolicyWithLimit} policy, and the base unit is 1 second, max sleep time
* is 10 minutes by default.
* <p/>
* For UTs, you can set the {@link #PROCEDURE_RETRY_SLEEP_INTERVAL_MS} and
* {@link #PROCEDURE_RETRY_MAX_SLEEP... | 3.26 |
hbase_Import_instantiateFilter_rdh | /**
* Create a {@link Filter} to apply to all incoming keys ({@link KeyValue KeyValues}) to
* optionally not include in the job output
*
* @param conf
* {@link Configuration} from which to load the filter
* @return the filter to use for the task, or <tt>null</tt> if no filter to should be used
* @throws Illega... | 3.26 |
hbase_Import_m1_rdh | // helper: create a new KeyValue based on CF rename map
private static Cell m1(Cell kv, Map<byte[], byte[]> cfRenameMap) {
if (cfRenameMap != null) {
// If there's a rename mapping for this CF, create a new KeyValue
byte[] newCfName = cfRenameMap.get(CellUtil.cloneFamily(kv));
if (newCfName != null) {
List<Tag> tags =... | 3.26 |
hbase_Import_map_rdh | /**
*
* @param row
* The current table row key.
* @param value
* The columns.
* @param context
* The current context.
* @throws IOException
* When something is broken with the data.
*/
@Override
public void map(ImmutableBytesWritable row, Result value, Context
context) throws IOException {
try {
... | 3.26 |
hbase_Import_main_rdh | /**
* Main entry point.
*
* @param args
* The command line parameters.
* @throws Exception
* When running the job fails.
*/
public static void main(String[] args) throws Exception {
int errCode = ToolRunner.run(HBaseConfiguration.create(), new Import(), args);
System.exit(errCode);} | 3.26 |
hbase_Import_createCfRenameMap_rdh | // helper: make a map from sourceCfName to destCfName by parsing a config key
private static Map<byte[], byte[]> createCfRenameMap(Configuration conf) {
Map<byte[], byte[]> cfRenameMap = null;
String allMappingsPropVal = conf.get(CF_RENAME_PROP);
if (allMappingsPropVal != null) {
// The conf value format should be sou... | 3.26 |
hbase_Import_createSubmittableJob_rdh | /**
* Sets up the actual job.
*
* @param conf
* The current configuration.
* @param args
* The command line parameters.
* @return The newly created job.
* @throws IOException
* When setting up the job fails.
*/
public static Job createSubmittableJob(Configuration conf, String[] args) throws IOException... | 3.26 |
hbase_Import_addFilterAndArguments_rdh | /**
* Add a Filter to be instantiated on import
*
* @param conf
* Configuration to update (will be passed to the job)
* @param clazz
* {@link Filter} subclass to instantiate on the server.
* @param filterArgs
* List of arguments to pass to the filter on instantiation
*/
public static void addFilterAndArg... | 3.26 |
hbase_Import_usage_rdh | /* @param errorMsg Error message. Can be null. */
private static void usage(final String errorMsg) {
if ((errorMsg != null) && (errorMsg.length() > 0)) {
System.err.println("ERROR: " + errorMsg);
}
System.err.println("Usage: Import [options] <tablename> <inputdir>");
System.err.println("By default Import will load data... | 3.26 |
hbase_Import_configureCfRenaming_rdh | /**
* <p>
* Sets a configuration property with key {@link #CF_RENAME_PROP} in conf that tells the mapper
* how to rename column families.
* <p>
* Alternately, instead of calling this function, you could set the configuration key
* {@link #CF_RENAME_PROP} yourself. The value should look like
*
* <pre>
* srcCf1:... | 3.26 |
hbase_Import_flushRegionsIfNecessary_rdh | /**
* If the durability is set to {@link Durability#SKIP_WAL} and the data is imported to hbase, we
* need to flush all the regions of the table as the data is held in memory and is also not
* present in the Write Ahead Log to replay in scenarios of a crash. This method flushes all the
* regions of the table in the... | 3.26 |
hbase_ColumnPaginationFilter_toByteArray_rdh | /**
* Returns The filter serialized using pb
*/
@Override
public byte[] toByteArray() {
FilterProtos.ColumnPaginationFilter.Builder builder = FilterProtos.ColumnPaginationFilter.newBuilder();
builder.setLimit(this.limit);
if (this.offset >= 0) {
builder.setOffset(this.offset);
}
if (this.columnOffset != null) {... | 3.26 |
hbase_ColumnPaginationFilter_areSerializedFieldsEqual_rdh | /**
* Returns true if and only if the fields of the filter that are serialized are equal to the
* corresponding fields in other. Used for testing.
*/
@Override
boolean areSerializedFieldsEqual(Filter o) {
if (o == this) {
return true;
}
if (!(o instanceof ColumnPaginationFilter)) {
return false;
}
ColumnP... | 3.26 |
hbase_ColumnPaginationFilter_parseFrom_rdh | /**
* Parse a serialized representation of {@link ColumnPaginationFilter}
*
* @param pbBytes
* A pb serialized {@link ColumnPaginationFilter} instance
* @return An instance of {@link ColumnPaginationFilter} made from <code>bytes</code>
* @throws DeserializationException
* if an error occurred
* @see #toByte... | 3.26 |
hbase_Lz4Codec_getBufferSize_rdh | // Package private
static int getBufferSize(Configuration conf) {
return conf.getInt(LZ4_BUFFER_SIZE_KEY, conf.getInt(CommonConfigurationKeys.IO_COMPRESSION_CODEC_LZ4_BUFFERSIZE_KEY, CommonConfigurationKeys.IO_COMPRESSION_CODEC_LZ4_BUFFERSIZE_DEFAULT));
} | 3.26 |
hbase_AggregateImplementation_getMin_rdh | /**
* Gives the minimum for a given combination of column qualifier and column family, in the given
* row range as defined in the Scan object. In its current implementation, it takes one column
* family and one column qualifier (if provided). In case of null column qualifier, minimum value
* for the entire column f... | 3.26 |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.