name stringlengths 12 178 | code_snippet stringlengths 8 36.5k | score float64 3.26 3.68 |
|---|---|---|
hbase_SnapshotOfRegionAssignmentFromMeta_getTableToRegionMap_rdh | /**
* Get regions for tables
*
* @return a mapping from table to regions
*/public Map<TableName, List<RegionInfo>> getTableToRegionMap() {
return tableToRegionMap;
} | 3.26 |
hbase_SnapshotOfRegionAssignmentFromMeta_initialize_rdh | /**
* Initialize the region assignment snapshot by scanning the hbase:meta table
*/
public void initialize() throws IOException {LOG.info("Start to scan the hbase:meta for the current region assignment " + "snappshot");
// Scan hbase:meta to pick up user regions
try (Table v7 = connection.getTable(TableName.M... | 3.26 |
hbase_SnapshotOfRegionAssignmentFromMeta_getRegionToRegionServerMap_rdh | /**
* Get region to region server map
*
* @return region to region server map
*/
public Map<RegionInfo, ServerName> getRegionToRegionServerMap() {
return regionToRegionServerMap;
} | 3.26 |
hbase_SnapshotOfRegionAssignmentFromMeta_getRegionNameToRegionInfoMap_rdh | /**
* Get the regioninfo for a region
*
* @return the regioninfo
*/
public Map<String, RegionInfo> getRegionNameToRegionInfoMap() {
return this.regionNameToRegionInfoMap;
} | 3.26 |
hbase_WALSplitUtil_moveAsideBadEditsFile_rdh | /**
* Move aside a bad edits file.
*
* @param fs
* the file system used to rename bad edits file.
* @param edits
* Edits file to move aside.
* @return The name of the moved aside file.
*/
public static Path moveAsideBadEditsFile(final FileSystem fs, final Path edits) throws IOException {
Path moveAsideN... | 3.26 |
hbase_WALSplitUtil_finishSplitLogFile_rdh | /**
* Completes the work done by splitLogFile by archiving logs
* <p>
* It is invoked by SplitLogManager once it knows that one of the SplitLogWorkers have completed
* the splitLogFile() part. If the master crashes then this function might get called multiple
* times.
* <p>
*/
public static void finishSplitLogFi... | 3.26 |
hbase_WALSplitUtil_getCompletedRecoveredEditsFilePath_rdh | /**
* Get the completed recovered edits file path, renaming it to be by last edit in the file from
* its first edit. Then we could use the name to skip recovered edits when doing
* HRegion#replayRecoveredEditsIfAny(Map, CancelableProgressable, MonitoredTask).
*
* @return dstPath take file's last edit log seq num a... | 3.26 |
hbase_WALSplitUtil_archive_rdh | /**
* Moves processed logs to a oldLogDir after successful processing Moves corrupted logs (any log
* that couldn't be successfully parsed to corruptDir (.corrupt) for later investigation
*/
static void archive(final Path wal, final boolean corrupt, final Path oldWALDir, final FileSystem walFS, final Configuration ... | 3.26 |
hbase_WALSplitUtil_getMutationsFromWALEntry_rdh | /**
* This function is used to construct mutations from a WALEntry. It also reconstructs WALKey &
* WALEdit from the passed in WALEntry
*
* @param logEntry
* pair of WALKey and WALEdit instance stores WALKey and WALEdit instances
* extracted from the passed in WALEntry.
* @return list of Pair<Mutation... | 3.26 |
hbase_WALSplitUtil_hasRecoveredEdits_rdh | /**
* Check whether there is recovered.edits in the region dir
*
* @param conf
* conf
* @param regionInfo
* the region to check
* @return true if recovered.edits exist in the region dir
*/
public static boolean hasRecoveredEdits(final Configuration conf, final RegionInfo regionInfo) throws IOException {
... | 3.26 |
hbase_WALSplitUtil_moveWAL_rdh | /**
* Move WAL. Used to move processed WALs to archive or bad WALs to corrupt WAL dir. WAL may have
* already been moved; makes allowance.
*/
public static void moveWAL(FileSystem fs, Path p, Path targetDir) throws IOException {
if (fs.exists(p)) {
if (!CommonFSUtils.renameAndSetModifyTime(fs, p, targetD... | 3.26 |
hbase_WALSplitUtil_writeRegionSequenceIdFile_rdh | /**
* Create a file with name as region's max sequence id
*/
public static void writeRegionSequenceIdFile(FileSystem walFS, Path regionDir, long newMaxSeqId) throws IOException {
FileStatus[] files = getSequenceIdFiles(walFS, regionDir);long maxSeqId = getMaxSequenceId(files);
if (ma... | 3.26 |
hbase_WALSplitUtil_getMaxRegionSequenceId_rdh | /**
* Get the max sequence id which is stored in the region directory. -1 if none.
*/
public static long getMaxRegionSequenceId(FileSystem walFS, Path regionDir) throws IOException {
return getMaxSequenceId(getSequenceIdFiles(walFS, regionDir));
} | 3.26 |
hbase_WALSplitUtil_getSplitEditFilesSorted_rdh | /**
* Returns sorted set of edit files made by splitter, excluding files with '.temp' suffix.
*
* @param walFS
* WAL FileSystem used to retrieving split edits files.
* @param regionDir
* WAL region dir to look for recovered edits files under.
* @return Files in passed <code>regionDir</code> as a sorted set.
... | 3.26 |
hbase_WALSplitUtil_tryCreateRecoveredHFilesDir_rdh | /**
* Return path to recovered.hfiles directory of the region's column family: e.g.
* /hbase/some_table/2323432434/cf/recovered.hfiles/. This method also ensures existence of
* recovered.hfiles directory under the region's column family, creating it if necessary.
*
* @param rootFS
* the root file system
* @par... | 3.26 |
hbase_WALSplitUtil_isSequenceIdFile_rdh | /**
* Is the given file a region open sequence id file.
*/
public static boolean isSequenceIdFile(final Path file) {
return file.getName().endsWith(SEQUENCE_ID_FILE_SUFFIX) || file.getName().endsWith(OLD_SEQUENCE_ID_FILE_SUFFIX);
} | 3.26 |
hbase_WALSplitUtil_getRegionSplitEditsPath_rdh | /**
* Path to a file under RECOVERED_EDITS_DIR directory of the region found in <code>logEntry</code>
* named for the sequenceid in the passed <code>logEntry</code>: e.g.
* /hbase/some_table/2323432434/recovered.edits/2332. This method also ensures existence of
* RECOVERED_EDITS_DIR under the region creating it if ... | 3.26 |
hbase_ColumnPrefixFilter_toByteArray_rdh | /**
* Returns The filter serialized using pb
*/
@Override
public byte[] toByteArray() {
FilterProtos.ColumnPrefixFilter.Builder builder = FilterProtos.ColumnPrefixFilter.newBuilder();
if (this.prefix != null)
builder.setPrefix(UnsafeByteOperations.unsafeWrap(this.prefix));
return builder.build().toByteArray();
... | 3.26 |
hbase_ColumnPrefixFilter_parseFrom_rdh | /**
* Parses a serialized representation of the {@link ColumnPrefixFilter}
*
* @param pbBytes
* A pb serialized {@link ColumnPrefixFilter} instance
* @return An instance of {@link ColumnPrefixFilter} made from <code>bytes</code>
* @throws DeserializationException
* if an error occurred
* @see #toByteArray
... | 3.26 |
hbase_ColumnPrefixFilter_areSerializedFieldsEqual_rdh | /**
* Returns true if and only if the fields of the filter that are serialized are equal to the
* corresponding fields in other. Used for testing.
*/
@Override
boolean areSerializedFieldsEqual(Filter o) {
if (o == this) {
return true;
}
if (!(o instanceof ColumnPrefixFilter)) {
return false;
}
ColumnPrefixFilter oth... | 3.26 |
hbase_HFileContentValidator_validateHFileContent_rdh | /**
* Check HFile contents are readable by HBase 2.
*
* @param conf
* used configuration
* @return number of HFiles corrupted HBase
* @throws IOException
* if a remote or network exception occurs
*/
private boolean validateHFileContent(Configuration conf) throws IOException {
FileSystem fileSystem = Com... | 3.26 |
hbase_SecureBulkLoadManager_isFile_rdh | /**
* Check if the path is referencing a file. This is mainly needed to avoid symlinks.
*
* @return true if the p is a file
*/
private boolean isFile(Path p) throws IOException { FileStatus status = srcFs.getFileStatus(p);
boolean isFile = !status.isDirectory();
try {
isFile = isFile && (!((Boolean)... | 3.26 |
hbase_BinaryComponentComparator_areSerializedFieldsEqual_rdh | /**
* Returns true if and only if the fields of the comparator that are serialized are equal to the
* corresponding fields in other. Used for testing.
*/
@Override
boolean areSerializedFieldsEqual(ByteArrayComparable other) {
if (other == this) {
return true;
}
if (!(other inst... | 3.26 |
hbase_BinaryComponentComparator_m0_rdh | /**
* Parse a serialized representation of {@link BinaryComponentComparator}
*
* @param pbBytes
* A pb serialized {@link BinaryComponentComparator} instance
* @return An instance of {@link BinaryComponentComparator} made from <code>bytes</code>
* @throws DeserializationException
* if an error occurred
* @se... | 3.26 |
hbase_BinaryComponentComparator_toByteArray_rdh | /**
* Returns The comparator serialized using pb
*/
@Override
public byte[]
toByteArray() {
ComparatorProtos.BinaryComponentComparator.Builder builder = ComparatorProtos.BinaryComponentComparator.newBuilder();
builder.setValue(ByteString.copyFrom(this.value));
builder.setOffset(this.offset);
return bu... | 3.26 |
hbase_BlockType_isData_rdh | /**
* Returns whether this block type is encoded or unencoded data block
*/
public final boolean isData() {return (this == DATA) || (this == ENCODED_DATA);
} | 3.26 |
hbase_BlockType_getId_rdh | /**
* Use this instead of {@link #ordinal()}. They work exactly the same, except DATA and
* ENCODED_DATA get the same id using this method (overridden for {@link #ENCODED_DATA}).
*
* @return block type id from 0 to the number of block types - 1
*/
public int getId() {
// Default implementation, can be overridd... | 3.26 |
hbase_BlockType_readAndCheck_rdh | /**
* Reads a magic record of the length {@link #MAGIC_LENGTH} from the given stream and expects it
* to match this block type.
*/
public void readAndCheck(DataInputStream in) throws IOException {
byte[] buf = new byte[MAGIC_LENGTH];
in.readFully(buf);
if (Bytes.compareTo(buf, f0) != 0) {
throw ... | 3.26 |
hbase_BlockType_isIndex_rdh | /**
* Returns whether this block category is index
*/
public final boolean isIndex() {
return this.getCategory() ==
BlockCategory.INDEX;
} | 3.26 |
hbase_BlockType_expectSpecific_rdh | /**
* Throws an exception if the block category passed is the special category meaning "all
* categories".
*/
public void expectSpecific() {if (this == ALL_CATEGORIES) {
throw new IllegalArgumentException(("Expected a specific block " + "category but got ") + this);
}
} | 3.26 |
hbase_BlockType_isBloom_rdh | /**
* Returns whether this block category is bloom filter
*/
public final boolean isBloom() {
return this.getCategory() == BlockCategory.BLOOM;
} | 3.26 |
hbase_BlockType_put_rdh | /**
* Put the magic record out to the specified byte array position.
*
* @param bytes
* the byte array
* @param offset
* position in the array
* @return incremented offset
*/
// System.arraycopy is static native. We can't do anything about this until minimum JDK is 9.
@SuppressWarnings("UnsafeFinalization")... | 3.26 |
hbase_ZKReplicationStorageBase_toByteArray_rdh | /**
* Serialized protobuf of <code>state</code> with pb magic prefix prepended suitable for use as
* content of a peer-state znode under a peer cluster id as in
* /hbase/replication/peers/PEER_ID/peer-state.
*/
protected static byte[] toByteArray(final ReplicationProtos.ReplicationState.State state) {
Replicat... | 3.26 |
hbase_ChunkCreator_getChunk_rdh | /**
* Poll a chunk from the pool, reset it if not null, else create a new chunk to return if we
* have not yet created max allowed chunks count. When we have already created max allowed
* chunks and no free chunks as of now, return null. It is the responsibility of the caller to
* make a chunk then. Note: Chunks re... | 3.26 |
hbase_ChunkCreator_createChunk_rdh | /**
* Creates the chunk either onheap or offheap
*
* @param pool
* indicates if the chunks have to be created which will be used by the Pool
* @param chunkType
* whether the requested chunk is data chunk or index chunk.
* @param size
* the size of the chunk to be allocated, in bytes
* @return the chunk
... | 3.26 |
hbase_ChunkCreator_putbackChunks_rdh | /**
* Add the chunks to the pool, when the pool achieves the max size, it will skip the remaining
* chunks
*/
private void putbackChunks(Chunk c) {
int v10 = this.maxCount - reclaimedChunks.size();
if ((c.isFromPool() && (c.size == chunkSize)) && (v10 > 0)) {
reclaimedChunks.add(c);
} else {
// remove the ch... | 3.26 |
hbase_ChunkCreator_numberOfMappedChunks_rdh | // the chunks in the chunkIdMap may already be released so we shouldn't relay
// on this counting for strong correctness. This method is used only in testing.
int numberOfMappedChunks() {
return this.chunkIdMap.size();
} | 3.26 |
hbase_ChunkCreator_getJumboChunk_rdh | /**
* Creates and inits a chunk of a special size, bigger than a regular chunk size. Such a chunk
* will never come from pool and will always be on demand allocated.
*
* @return the chunk that was initialized
* @param jumboSize
* the special size to be used
*/
Chunk getJumboChunk(int jumboSize) {
int allocSize... | 3.26 |
hbase_ChunkCreator_createChunkForPool_rdh | // Chunks from pool are created covered with strong references anyway.
private Chunk createChunkForPool(ChunkType chunkType, int
chunkSize) {
if ((chunkSize != dataChunksPool.getChunkSize()) && (chunkSize != indexChunksPool.getChunkSize())) {
return null;
}
return createChunk(true, chunkType, chunkSize);
} | 3.26 |
hbase_ChunkCreator_initialize_rdh | /**
* Initializes the instance of ChunkCreator
*
* @param chunkSize
* the chunkSize
* @param offheap
* indicates if the chunk is to be created offheap or not
* @param globalMemStoreSize
* the global memstore size
* @param poolSizePercentage
* pool size perce... | 3.26 |
hbase_WALObserver_postWALRoll_rdh | /**
* Called after rolling the current WAL
*
* @param oldPath
* the path of the wal that we replaced
* @param newPath
* the path of the wal we have created and now is the current
*/
default void postWALRoll(ObserverContext<? extends WALCoprocessorEnvironment> ctx, Path oldPath, Path newPath) throws IOExcept... | 3.26 |
hbase_WALObserver_preWALRoll_rdh | /**
* Called before rolling the current WAL
*
* @param oldPath
* the path of the current wal that we are replacing
* @param newPath
* the path of the wal we are going to create
*/
default void preWALRoll(ObserverContext<? extends WALCoprocessorEnvironment> ctx, Path oldPath, Path newPath) throws IOException ... | 3.26 |
hbase_SubstringComparator_areSerializedFieldsEqual_rdh | /**
* Returns true if and only if the fields of the comparator that are serialized are equal to the
* corresponding fields in other. Used for testing.
*/
@Override
boolean areSerializedFieldsEqual(ByteArrayComparable other) {
if (other == this) {
return true;
}
if (!(other instanceof SubstringCo... | 3.26 |
hbase_SubstringComparator_parseFrom_rdh | /**
* Parse a serialized representation of {@link SubstringComparator}
*
* @param pbBytes
* A pb serialized {@link SubstringComparator} instance
* @return An instance of {@link SubstringComparator} made from <code>bytes</code>
* @throws DeserializationException
* if an error occurred
* @see #toByteArray
*/... | 3.26 |
hbase_SubstringComparator_toByteArray_rdh | /**
* Returns The comparator serialized using pb
*/
@Override
public byte[] toByteArray() {
ComparatorProtos.SubstringComparator.Builder builder = ComparatorProtos.SubstringComparator.newBuilder();
builder.setSubstr(this.substr); return builder.build().toByteArray();
} | 3.26 |
hbase_TsvImporterMapper_setup_rdh | /**
* Handles initializing this class with objects specific to it (i.e., the parser). Common
* initialization that might be leveraged by a subsclass is done in <code>doSetup</code>. Hence a
* subclass may choose to override this method and call <code>doSetup</code> as well before
* handling it's own custom params.
... | 3.26 |
hbase_TsvImporterMapper_doSetup_rdh | /**
* Handles common parameter initialization that a subclass might want to leverage.
*/
protected void doSetup(Context context) {
Configuration conf = context.getConfiguration();
// If a custom separator has been used,
// decode it back from Base64 encoding.
f1 = conf.get(ImportTsv.SEPARATOR_CONF_KEY... | 3.26 |
hbase_TsvImporterMapper_map_rdh | /**
* Convert a line of TSV text into an HBase table row.
*/
@Override
public void map(LongWritable offset,
Text value, Context context) throws IOException {
byte[]
lineBytes = value.getBytes();
try {
ImportTsv.TsvParser.ParsedLine parsed = parser.parse(lineBytes, value.getLength());
Immut... | 3.26 |
hbase_MetricsHeapMemoryManager_increaseAboveHeapOccupancyLowWatermarkCounter_rdh | /**
* Increase the counter for heap occupancy percent above low watermark
*/
public void increaseAboveHeapOccupancyLowWatermarkCounter() {
source.increaseAboveHeapOccupancyLowWatermarkCounter();
} | 3.26 |
hbase_MetricsHeapMemoryManager_setCurMemStoreSizeGauge_rdh | /**
* Set the current global memstore size used gauge
*
* @param memStoreSize
* the current memory usage in memstore, in bytes.
*/
public void setCurMemStoreSizeGauge(final long memStoreSize) {
source.setCurMemStoreSizeGauge(memStoreSize);
} | 3.26 |
hbase_MetricsHeapMemoryManager_increaseTunerDoNothingCounter_rdh | /**
* Increase the counter for tuner neither expanding memstore global size limit nor expanding
* blockcache max size.
*/
public void increaseTunerDoNothingCounter() {
source.increaseTunerDoNothingCounter();
} | 3.26 |
hbase_MetricsHeapMemoryManager_updateUnblockedFlushCount_rdh | /**
* Update/Set the unblocked flush count histogram/gauge
*
* @param unblockedFlushCount
* the number of unblocked memstore flush since last tuning.
*/
public void updateUnblockedFlushCount(final long unblockedFlushCount) {
source.updateUnblockedFlushCount(unblockedFlushCount);
} | 3.26 |
hbase_MetricsHeapMemoryManager_updateBlockedFlushCount_rdh | /**
* Update/Set the blocked flush count histogram/gauge
*
* @param blockedFlushCount
* the number of blocked memstore flush since last tuning.
*/
public void updateBlockedFlushCount(final long blockedFlushCount) {
source.updateBlockedFlushCount(blockedFlushCount);
} | 3.26 |
hbase_MetricsHeapMemoryManager_setCurMemStoreOffHeapSizeGauge_rdh | /**
* Set the current global memstore off-heap size gauge
*
* @param memStoreOffHeapSize
* the current memory off-heap size in memstore, in bytes.
*/
public void setCurMemStoreOffHeapSizeGauge(final long memStoreOffHeapSize) {
source.setCurMemStoreOffHeapSizeGauge(memStoreOffHeapSize);
} | 3.26 |
hbase_MetricsHeapMemoryManager_updateMemStoreDeltaSizeHistogram_rdh | /**
* Update the increase/decrease memstore size histogram
*
* @param memStoreDeltaSize
* the tuning result of memstore.
*/
public void updateMemStoreDeltaSizeHistogram(final int memStoreDeltaSize) {
source.updateMemStoreDeltaSizeHistogram(memStoreDeltaSize);
} | 3.26 |
hbase_MetricsHeapMemoryManager_updateBlockCacheDeltaSizeHistogram_rdh | /**
* Update the increase/decrease blockcache size histogram
*
* @param blockCacheDeltaSize
* the tuning result of blockcache.
*/
public void updateBlockCacheDeltaSizeHistogram(final int blockCacheDeltaSize) {
source.updateBlockCacheDeltaSizeHistogram(blockCacheDeltaSize); } | 3.26 |
hbase_MetricsHeapMemoryManager_setCurBlockCacheSizeGauge_rdh | /**
* Set the current blockcache size used gauge
*
* @param blockCacheSize
* the current memory usage in blockcache, in bytes.
*/
public void setCurBlockCacheSizeGauge(final long blockCacheSize) {
source.setCurBlockCacheSizeGauge(blockCacheSize);
} | 3.26 |
hbase_MetricsHeapMemoryManager_setCurMemStoreOnHeapSizeGauge_rdh | /**
* Set the current global memstore on-heap size gauge
*
* @param memStoreOnHeapSize
* the current memory on-heap size in memstore, in bytes.
*/
public void setCurMemStoreOnHeapSizeGauge(final long
memStoreOnHeapSize) {
source.setCurMemStoreOnHeapSizeGauge(memStoreOnHeapSize);
} | 3.26 |
hbase_CompactionTool_compactStoreFiles_rdh | /**
* Execute the actual compaction job. If the compact once flag is not specified, execute the
* compaction until no more compactions are needed. Uses the Configuration settings provided.
*/
private void compactStoreFiles(final Path tableDir, final TableDescriptor htd, final RegionInfo hri, final String familyName,... | 3.26 |
hbase_CompactionTool_getSplits_rdh | /**
* Returns a split for each store files directory using the block location of each file as
* locality reference.
*/
@Override
public List<InputSplit> getSplits(JobContext job) throws IOException {
List<InputSplit> splits = new ArrayList<>();
List<FileStatus> files = listStatus(job);
Text key = new Tex... | 3.26 |
hbase_CompactionTool_compact_rdh | /**
* Execute the compaction on the specified path.
*
* @param path
* Directory path on which to run compaction.
* @param compactOnce
* Execute just a single step of compaction.
* @param major
* Request major compaction.
*/
public void compact(final Path path, final boolean compactOnce, final boolean ma... | 3.26 |
hbase_CompactionTool_createInputFile_rdh | /**
* Create the input file for the given directories to compact. The file is a TextFile with each
* line corrisponding to a store files directory to compact.
*/
public static List<Path> createInputFile(final FileSystem fs, final FileSystem stagingFs, final Path path, final Set<Path> toCompactDirs) throws IOExceptio... | 3.26 |
hbase_CompactionTool_getStoreDirHosts_rdh | /**
* return the top hosts of the store files, used by the Split
*/
private static String[] getStoreDirHosts(final FileSystem fs, final Path path) throws IOException {
FileStatus[] files = CommonFSUtils.listStatus(fs, path);
if (files
== null) {
return new String[]{ };
}
HDFSBlocksDistribution hdfsBlocksDistri... | 3.26 |
hbase_CompactionTool_doClient_rdh | /**
* Execute compaction, from this client, one path at the time.
*/
private int doClient(final FileSystem fs, final Set<Path> toCompactDirs, final boolean compactOnce, final
boolean
major) throws IOException {
CompactionWorker worker = new CompactionWorker(fs, getConf());
for (Path path : toCompactDirs) {
worker... | 3.26 |
hbase_MasterFileSystem_createInitialFileSystemLayout_rdh | /**
* Create initial layout in filesystem.
* <ol>
* <li>Check if the meta region exists and is readable, if not create it. Create hbase.version and
* the hbase:meta directory if not one.</li>
* </ol>
* Idempotent.
*/
private void createInitialFileSystemLayout() throws IOException {
final String[] protected... | 3.26 |
hbase_MasterFileSystem_getClusterId_rdh | /**
* Returns The unique identifier generated for this cluster
*/
public ClusterId getClusterId() {
return clusterId;
} | 3.26 |
hbase_MasterFileSystem_getWALRootDir_rdh | /**
* Returns HBase root log dir.
*/
public Path getWALRootDir() {
return this.walRootDir;
} | 3.26 |
hbase_MasterFileSystem_getRootDir_rdh | /**
* Returns HBase root dir.
*/
public Path getRootDir() {
return this.rootdir;
} | 3.26 |
hbase_MasterFileSystem_getRegionDir_rdh | /**
* Returns the directory for a give {@code region}.
*/
public Path getRegionDir(RegionInfo region) {
return FSUtils.getRegionDirFromRootDir(getRootDir(), region);
} | 3.26 |
hbase_MasterFileSystem_getTempDir_rdh | /**
* Returns HBase temp dir.
*/public Path getTempDir() {
return this.tempdir;
} | 3.26 |
hbase_MasterFileSystem_checkStagingDir_rdh | /**
* Check permissions for bulk load staging directory. This directory has special hidden
* permissions. Create it if necessary.
*/
private void checkStagingDir() throws IOException {
Path
p = new Path(this.rootdir, HConstants.BULKLOAD_STAGING_DIR_NAME);
try {
if (!this.fs.exists(p)) {
if (!this.fs.mkdirs(p, Hidden... | 3.26 |
hbase_MasterFileSystem_checkSubDir_rdh | /**
* Make sure the directories under rootDir have good permissions. Create if necessary.
*/
private void checkSubDir(final Path p, final String dirPermsConfName) throws IOException {
FileSystem fs = p.getFileSystem(conf);
FsPermission dirPerms = new FsPermission(conf.get(dirPermsConfName, "700"));
if (!fs.exists(p... | 3.26 |
hbase_MasterFileSystem_checkTempDir_rdh | /**
* Make sure the hbase temp directory exists and is empty. NOTE that this method is only executed
* once just after the master becomes the active one.
*/
void checkTempDir(final Path tmpdir, final Configuration c, final FileSystem fs) throws IOException {
// If the temp directory exists, clear the content (left o... | 3.26 |
hbase_NullComparator_parseFrom_rdh | /**
* Parse the serialized representation of {@link NullComparator}
*
* @param pbBytes
* A pb serialized {@link NullComparator} instance
* @return An instance of {@link NullComparator} made from <code>bytes</code>
* @throws DeserializationException
* if an error occurred
* @see #toByteArray
*/
public stati... | 3.26 |
hbase_NullComparator_toByteArray_rdh | /**
* Returns The comparator serialized using pb
*/
@Override
public byte[] toByteArray() {ComparatorProtos.NullComparator.Builder builder = ComparatorProtos.NullComparator.newBuilder();
return builder.build().toByteArray();
} | 3.26 |
hbase_NullComparator_areSerializedFieldsEqual_rdh | /**
* Returns true if and only if the fields of the comparator that are serialized are equal to the
* corresponding fields in other. Used for testing.
*/
@Override
boolean areSerializedFieldsEqual(ByteArrayComparable other) {
if (other == this) {
return true;
}
if (!(other instanceof NullComparat... | 3.26 |
hbase_RegionServerRpcQuotaManager_checkQuota_rdh | /**
* Check the quota for the current (rpc-context) user. Returns the OperationQuota used to get the
* available quota and to report the data/usage of the operation.
*
* @param region
* the region where the operation will be performed
* @param numWrites
* number of writes to perform
* @param numReads
* n... | 3.26 |
hbase_RegionServerRpcQuotaManager_getQuota_rdh | /**
* Returns the quota for an operation.
*
* @param ugi
* the user that is executing the operation
* @param table
* the table where the operation will be executed
* @return the OperationQuota
*/
public OperationQuota getQuota(final UserGroupInformation ugi, final TableName table) {
if ((isQuotaEnabled()
&&... | 3.26 |
hbase_RESTServer_loginServerPrincipal_rdh | // login the server principal (if using secure Hadoop)
private static Pair<FilterHolder,
Class<? extends ServletContainer>> loginServerPrincipal(UserProvider userProvider, Configuration conf) throws Exception {
Class<? extends ServletContainer> v5 = ServletContainer.class;
if (userProvider.isHadoopSecurityEnabl... | 3.26 |
hbase_RESTServer_main_rdh | /**
* The main method for the HBase rest server.
*
* @param args
* command-line arguments
* @throws Exception
* exception
*/
public static void main(String[] args) throws Exception {
LOG.info(("***** STARTING service '" + RESTServer.class.getSimpleName()) + "' *****");
VersionInfo.logVersion();
... | 3.26 |
hbase_RESTServer_run_rdh | /**
* Runs the REST server.
*/public synchronized void run() throws Exception
{
Pair<FilterHolder, Class<? extends ServletContainer>> pair = loginServerPrincipal(userProvider, conf);
FilterHolder authFilter = pair.getFirst();
Class<? extends ServletContainer> containerClass = pair.getSecond();
RESTSe... | 3.26 |
hbase_ZKConfig_makeZKProps_rdh | /**
* Make a Properties object holding ZooKeeper config. Parses the corresponding config options from
* the HBase XML configs and generates the appropriate ZooKeeper properties.
*
* @param conf
* Configuration to read from.
* @return Properties holding mappings representing ZooKeeper config file.
*/
public sta... | 3.26 |
hbase_ZKConfig_standardizeZKQuorumServerString_rdh | /**
* Standardize the ZK quorum string: make it a "server:clientport" list, separated by ','
*
* @param quorumStringInput
* a string contains a list of servers for ZK quorum
* @param clientPort
* the default client port
* @return the string for a list of "server:port" separated by ","
*/
public static Str... | 3.26 |
hbase_ZKConfig_makeZKPropsFromHbaseConfig_rdh | /**
* Make a Properties object holding ZooKeeper config. Parses the corresponding config options from
* the HBase XML configs and generates the appropriate ZooKeeper properties.
*
* @param conf
* Configuration to read from.
* @return Properties holding mappings representing ZooKeeper... | 3.26 |
hbase_ZKConfig_getZKQuorumServersStringFromHbaseConfig_rdh | /**
* Return the ZK Quorum servers string given the specified configuration
*
* @return Quorum servers String
*/
private static String getZKQuorumServersStringFromHbaseConfig(Configuration conf) {
String defaultClientPort = Integer.toString(conf.getInt(HConstants.ZOOKEEPER_CLIENT_PORT, HConstants.DEFAULT_ZOOKEE... | 3.26 |
hbase_ZKConfig_transformClusterKey_rdh | /**
* Separate the given key into the three configurations it should contain: hbase.zookeeper.quorum,
* hbase.zookeeper.client.port and zookeeper.znode.parent
*
* @return the three configuration in the described order
*/
public static ZKClusterKey transformClusterKey(String key) throws IOException {
List<String> v... | 3.26 |
hbase_ZKConfig_buildZKQuorumServerString_rdh | /**
* Build the ZK quorum server string with "server:clientport" list, separated by ','
*
* @param serverHosts
* a list of servers for ZK quorum
* @param clientPort
* the default client port
* @return the string for a list of "server:port" separated by ","
*/
public static String buildZKQuorumServerString(S... | 3.26 |
hbase_ZKConfig_getZooKeeperClusterKey_rdh | /**
* Get the key to the ZK ensemble for this configuration and append a name at the end
*
* @param conf
* Configuration to use to build the key
* @param name
* Name that should be appended at the end if not empty or null
* @return ensemble key with a name (if any)
*/
public static String getZooKeeperClust... | 3.26 |
hbase_ZKConfig_m0_rdh | /**
* Get the key to the ZK ensemble for this configuration without adding a name at the end
*
* @param conf
* Configuration to use to build the key
* @return ensemble key without a name
*/
public static String m0(Configuration conf) {
return getZooKeeperClusterKey(conf, null);
} | 3.26 |
hbase_ZKConfig_getZKQuorumServersString_rdh | /**
* Return the ZK Quorum servers string given the specified configuration.
*
* @return Quorum servers
*/
public static String getZKQuorumServersString(Configuration conf) {
m2(HConstants.ZK_CFG_PROPERTY_PREFIX, conf);
return getZKQuorumServersStringFromHbaseConfig(conf);
} | 3.26 |
hbase_ZKConfig_validateClusterKey_rdh | /**
* Verifies that the given key matches the expected format for a ZooKeeper cluster key. The Quorum
* for the ZK cluster can have one the following formats (see examples below):
* <ol>
* <li>s1,s2,s3 (no client port in the list, the client port could be obtained from
* clientPort)</li>
* <li>s1:p1,s2:p2,s3:p3 (... | 3.26 |
hbase_Interns_tag_rdh | /**
* Get a metrics tag
*
* @param info
* of the tag
* @param value
* of the tag
* @return an interned metrics tag
*/
public static MetricsTag tag(MetricsInfo info, String value) {Map<String, MetricsTag> map = tagCache.getUnchecked(info);
MetricsTag tag = map.get(value);
if (tag == null) {
... | 3.26 |
hbase_Interns_m0_rdh | /**
* Get a metrics tag
*
* @param name
* of the tag
* @param description
* of the tag
* @param value
* of the tag
* @return an interned metrics tag
*/
public static MetricsTag m0(String name, String description, String value) {return tag(info(name, description), value);
} | 3.26 |
hbase_LogEventHandler_persistAll_rdh | /**
* Add all in memory queue records to system table. The implementors can use system table or
* direct HDFS file or ZK as persistence system.
*/
void persistAll(NamedQueuePayload.NamedQueueEvent namedQueueEvent, Connection connection) {
namedQueueServices.get(namedQueueEvent).persistAll(connection);
} | 3.26 |
hbase_LogEventHandler_clearNamedQueue_rdh | /**
* Cleans up queues maintained by services.
*
* @param namedQueueEvent
* type of queue to clear
* @return true if queue is cleaned up, false otherwise
*/
boolean clearNamedQueue(NamedQueuePayload.NamedQueueEvent namedQueueEvent) {
return namedQueueServices.get(namedQueueEvent).clearNamedQueue();
} | 3.26 |
hbase_LogEventHandler_onEvent_rdh | /**
* Called when a publisher has published an event to the {@link RingBuffer}. This is generic
* consumer of disruptor ringbuffer and for each new namedQueue that we add, we should also
* provide specific consumer logic here.
*
* @param event
* published to the {@link RingBuffer}
* @param sequence
* of the... | 3.26 |
hbase_AsyncScanSingleRegionRpcRetryingCaller_destroy_rdh | // return the current state, and set the state to DESTROYED.
ScanControllerState destroy() {
ScanControllerState state = this.state;
this.state = ScanControllerState.DESTROYED;
return state;
} | 3.26 |
hbase_AsyncScanSingleRegionRpcRetryingCaller_prepare_rdh | // return false if the scan has already been resumed. See the comment above for ScanResumerImpl
// for more details.
synchronized boolean prepare(ScanResponse resp, int numberOfCompleteRows) {
if
(state ==
ScanResumerState.RESUMED) {
// user calls resume before we actually suspend the scan, just con... | 3.26 |
hbase_AsyncScanSingleRegionRpcRetryingCaller_start_rdh | /**
* Now we will also fetch some cells along with the scanner id when opening a scanner, so we also
* need to process the ScanResponse for the open scanner request. The HBaseRpcController for the
* open scanner request is also needed because we may have some data in the CellScanner which is
* contained in the cont... | 3.26 |
hbase_Union4_decodeD_rdh | /**
* Read an instance of the fourth type parameter from buffer {@code src}.
*/
public D decodeD(PositionedByteRange src) {
return ((D) (decode(src)));
} | 3.26 |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.