name stringlengths 12 178 | code_snippet stringlengths 8 36.5k | score float64 3.26 3.68 |
|---|---|---|
hbase_ServerCommandLine_logHBaseConfigs_rdh | /**
* Print into log some of the important hbase attributes.
*/
private static void logHBaseConfigs(Configuration conf) {
final String[] keys = new String[]{ // Expand this list as you see fit.
"hbase.tmp.dir", HConstants.HBASE_DIR, HConstants.CLUSTER_DISTRIBUTED, HConstants.ZOOKEEPER_QUORUM };
for (Stri... | 3.26 |
hbase_ServerCommandLine_logJVMInfo_rdh | /**
* Log information about the currently running JVM.
*/
public static void logJVMInfo() {
// Print out vm stats before starting up.
RuntimeMXBean runtime = ManagementFactory.getRuntimeMXBean();
if (runtime != null) {
LOG.info((((("vmName=" + runtime.getVmName()) +
", vmVendor=")
... | 3.26 |
hbase_BrokenStoreFileCleaner_isCompactedFile_rdh | // Compacted files can still have readers and are cleaned by a separate chore, so they have to
// be skipped here
private boolean isCompactedFile(FileStatus file, HStore store) {
return store.getStoreEngine().getStoreFileManager().getCompactedfiles().stream().anyMatch(sf -> sf.getPath().equals(file.getPath()));
} | 3.26 |
hbase_NamespacesInstanceResource_post_rdh | /**
* Build a response for POST create namespace with properties specified.
*
* @param model
* properties used for create.
* @param uriInfo
* (JAX-RS context variable) request URL
* @return response code.
*/
@POST
@Consumes({ MIMETYPE_XML, MIMETYPE_JSON, MIMETYPE_PROTOBUF, MIMETYPE_PROTOBUF_IETF })
public R... | 3.26 |
hbase_NamespacesInstanceResource_createOrUpdate_rdh | // Do the actual namespace create or alter.
private Response createOrUpdate(final NamespacesInstanceModel model, final UriInfo uriInfo, final Admin admin, final boolean updateExisting) {
NamespaceDescriptor.Builder builder = NamespaceDescriptor.create(namespace);
builder.addConfiguration(model.getProperties());... | 3.26 |
hbase_NamespacesInstanceResource_deleteNoBody_rdh | /**
* Build a response for DELETE delete namespace.
*
* @param message
* value not used.
* @param headers
* value not used.
* @return response code.
*/
@DELETE
public Response deleteNoBody(final byte[] message, @Context
final UriInfo uriInfo, @Context
final HttpHeaders headers) {
if (LOG.isTraceEnabled... | 3.26 |
hbase_NamespacesInstanceResource_processUpdate_rdh | // Check that POST or PUT is valid and then update namespace.
private Response processUpdate(NamespacesInstanceModel model, final boolean updateExisting, final UriInfo uriInfo) {
if (LOG.isTraceEnabled()) {
LOG.trace((updateExisting ? "PUT " : "POST ") + uriInfo.getAbsolutePath());
}
if (model == nu... | 3.26 |
hbase_NamespacesInstanceResource_getNamespaceInstanceResource_rdh | /**
* Dispatch to NamespaceInstanceResource for getting list of tables.
*/
@Path("tables")
public NamespacesInstanceResource getNamespaceInstanceResource(@PathParam("tables")
... | 3.26 |
hbase_CachedClusterId_setClusterId_rdh | /**
* Succeeds only once, when setting to a non-null value. Overwrites are not allowed.
*/
private void setClusterId(ClusterId id) {
if ((id == null) || isClusterIdSet.get()) {
return;
}
clusterId = id;
isClusterIdSet.set(true);
} | 3.26 |
hbase_CachedClusterId_getClusterId_rdh | /**
* Returns a cached copy of the cluster ID. null if the cache is not populated.
*/
private String getClusterId() {
if (!isClusterIdSet.get()) {
return null;
}
// It is ok to read without a lock since clusterId is immutable once set.
return clusterId.toString();
} | 3.26 |
hbase_HFileArchiveUtil_getTableArchivePath_rdh | /**
* Get the path to the table archive directory based on the configured archive directory.
* <p>
* Assumed that the table should already be archived.
*
* @param conf
* {@link Configuration} to read the archive directory property. Can be null
* @param tableName
* Name of the table to be archived. Cannot be... | 3.26 |
hbase_HFileArchiveUtil_getTableName_rdh | /* @return table name given archive file path */
public static TableName getTableName(Path archivePath) {
Path p = archivePath;String tbl = null;
// namespace is the 4th parent of file
for (int i = 0; i < 5; i++) {
if (p == null)
return null;
if (i == 3)
tbl = p.get... | 3.26 |
hbase_HFileArchiveUtil_getArchivePath_rdh | /**
* Get the full path to the archive directory on the configured
* {@link org.apache.hadoop.hbase.master.MasterFileSystem}
*
* @param conf
* to look for archive directory name and root directory. Cannot be null. Notes for
* testing: requires a FileSystem root directory to be specified.
* @return the full {... | 3.26 |
hbase_ShutdownHook_install_rdh | /**
* Install a shutdown hook that calls stop on the passed Stoppable and then thread joins against
* the passed <code>threadToJoin</code>. When this thread completes, it then runs the hdfs thread
* (This install removes the hdfs shutdown hook keeping a handle on it to run it after
* <code>threadToJoin</code> has s... | 3.26 |
hbase_ShutdownHook_main_rdh | /**
* Main to test basic functionality. Run with clean hadoop 0.20 and hadoop 0.21 and cloudera
* patched hadoop to make sure our shutdown hook handling works for all compbinations. Pass
* '-Dhbase.shutdown.hook=false' to test turning off the running of shutdown hooks.
*/
public static void main(final String[]
ar... | 3.26 |
hbase_ReplicationSyncUp_listRegionServers_rdh | // Find region servers under wal directory
// Here we only care about the region servers which may still be alive, as we need to add
// replications for them if missing. The dead region servers which have already been processed
// fully do not need to add their replication queues again, as the operation has already bee... | 3.26 |
hbase_ReplicationSyncUp_claimReplicationQueues_rdh | // When using this tool, usually the source cluster is unhealthy, so we should try to claim the
// replication queues for the dead region servers first and then replicate the data out.
private void claimReplicationQueues(ReplicationSourceManager mgr, Set<ServerName> regionServers) throws ReplicationException, KeeperEx... | 3.26 |
hbase_ReplicationSyncUp_main_rdh | /**
* Main program
*/
public static void main(String[] args) throws Exception {
int ret = ToolRunner.run(HBaseConfiguration.create(), new ReplicationSyncUp(), args);
System.exit(ret);
} | 3.26 |
hbase_AbstractProtobufWALReader_getCodecClsName_rdh | /**
* Returns the cell codec classname
*/
public String getCodecClsName() {return
codecClsName;
} | 3.26 |
hbase_AbstractProtobufWALReader_isWALTrailer_rdh | /**
* This is used to determine whether we have already reached the WALTrailer. As the size and magic
* are at the end of the WAL file, it is possible that these two options are missing while
* writing, so we will consider there is no trailer. And when we actually reach the WALTrailer, we
* will try to decode it as... | 3.26 |
hbase_AbstractProtobufWALReader_getWriterClsNames_rdh | /**
* Returns names of the accepted writer classes
*/
public List<String> getWriterClsNames() {
return WRITER_CLS_NAMES;
} | 3.26 |
hbase_AbstractProtobufWALReader_setTrailerIfPresent_rdh | /**
* To check whether a trailer is present in a WAL, it seeks to position (fileLength -
* PB_WAL_COMPLETE_MAGIC.size() - Bytes.SIZEOF_INT). It reads the int value to know the size of
* the trailer, and checks whether the trailer is present at the end or not by comparing the last
* PB_WAL_COMPLETE_MAGIC.size() byte... | 3.26 |
hbase_MiniBatchOperationInProgress_m0_rdh | /**
* Returns The number of operations(Mutations) involved in this batch.
*/
public int m0()
{
return this.lastIndexExclusive - this.firstIndex;
} | 3.26 |
hbase_MiniBatchOperationInProgress_getOperation_rdh | /**
* Returns The operation(Mutation) at the specified position.
*/
public T getOperation(int index) {
return operations[getAbsoluteIndex(index)];
} | 3.26 |
hbase_MiniBatchOperationInProgress_getOperationStatus_rdh | /**
* Returns Gets the status code for the operation(Mutation) at the specified position.
*/
public OperationStatus getOperationStatus(int index) {
return this.retCodeDetails[getAbsoluteIndex(index)];
} | 3.26 |
hbase_MiniBatchOperationInProgress_addOperationsFromCP_rdh | /**
* Add more Mutations corresponding to the Mutation at the given index to be committed atomically
* in the same batch. These mutations are applied to the WAL and applied to the memstore as well.
* The timestamp of the cells in the given Mutations MUST be obtained from the original mutation.
* <b>Note:</b> The du... | 3.26 |
hbase_MiniBatchOperationInProgress_setWalEdit_rdh | /**
* Sets the walEdit for the operation(Mutation) at the specified position.
*/
public void setWalEdit(int index, WALEdit walEdit) {
this.walEditsFromCoprocessors[getAbsoluteIndex(index)] = walEdit;
} | 3.26 |
hbase_MiniBatchOperationInProgress_setOperationStatus_rdh | /**
* Sets the status code for the operation(Mutation) at the specified position. By setting this
* status, {@link org.apache.hadoop.hbase.coprocessor.RegionObserver} can make HRegion to skip
* Mutations.
*/
public void setOperationStatus(int index, OperationStatus opStatus) {
this.retCodeDetails[getAbsoluteInd... | 3.26 |
hbase_MiniBatchOperationInProgress_getWalEdit_rdh | /**
* Returns Gets the walEdit for the operation(Mutation) at the specified position.
*/
public WALEdit getWalEdit(int index) {
return this.walEditsFromCoprocessors[getAbsoluteIndex(index)];
} | 3.26 |
hbase_BucketAllocator_allocateBlock_rdh | /**
* Allocate a block with specified size. Return the offset
*
* @param blockSize
* size of block
* @return the offset in the IOEngine
*/
public synchronized long allocateBlock(int blockSize) throws CacheFullException, BucketAllocatorException {
assert blockSize > 0;
BucketSizeInfo bsi = roundUpToBucketSizeIn... | 3.26 |
hbase_BucketAllocator_itemSize_rdh | /**
* This bucket size can only allocate items of this size, even if the requested allocation size
* is smaller. The rest goes towards {@link #fragmentationBytes()}.
*/
public long itemSize() {
return itemSize;
} | 3.26 |
hbase_BucketAllocator_usedCount_rdh | /**
* How many items are currently taking up space in this bucket size's buckets
*/
public long usedCount() {
return usedCount;
} | 3.26 |
hbase_BucketAllocator_roundUpToBucketSizeInfo_rdh | /**
* Round up the given block size to bucket size, and get the corresponding BucketSizeInfo
*/
public BucketSizeInfo roundUpToBucketSizeInfo(int blockSize) {
for (int i = 0; i < f0.length; ++i)
if (blockSize <= f0[i])
return bucketSizeInfos[i];
return null;
} | 3.26 |
hbase_BucketAllocator_freeBytes_rdh | /**
* How many more bytes can be allocated from the currently claimed blocks of this bucket size
*/
public long freeBytes() {
return f1 * itemSize;
} | 3.26 |
hbase_BucketAllocator_wastedBytes_rdh | /**
* If {@link #bucketCapacity} is not perfectly divisible by this {@link #itemSize()}, the
* remainder will be unusable by in buckets of this size. A high value here may be optimized by
* trying to choose bucket sizes which can better divide {@link #bucketCapacity}.
*/
public long wastedBytes() {
return wastedByt... | 3.26 |
hbase_BucketAllocator_freeBlock_rdh | /**
* Free a block with the offset
*
* @param offset
* block's offset
* @return size freed
*/
public synchronized int freeBlock(long offset, int length) {
int bucketNo = ((int) (offset / bucketCapacity));
assert (bucketNo
>= 0) && (bucketNo < buckets.length);
Bucket targetBucket = buckets[bucketNo];
bucketS... | 3.26 |
hbase_BucketAllocator_completelyFreeBuckets_rdh | /**
* How many buckets are currently claimed by this bucket size but as yet totally unused. These
* buckets are available for reallocation to other bucket sizes if those fill up.
*/public int completelyFreeBuckets() {
return completelyFreeBuckets;
} | 3.26 |
hbase_BucketAllocator_usedBytes_rdh | /**
* How many bytes are currently taking up space in this bucket size's buckets Note: If your
* items are less than the bucket size of this bucket, the actual used bytes by items will be
* lower than this value. But since a bucket size can only allocate items of a single size, this
* value is the true number of us... | 3.26 |
hbase_BucketAllocator_fullBuckets_rdh | /**
* How many buckets have been completely filled by blocks for this bucket size. These buckets
* can't accept any more blocks unless some existing are freed.
*/
public int fullBuckets() {
return fullBuckets;
} | 3.26 |
hbase_BucketAllocator_getLeastFilledBuckets_rdh | /**
* Returns a set of indices of the buckets that are least filled excluding the offsets, we also
* the fully free buckets for the BucketSizes where everything is empty and they only have one
* completely free bucket as a reserved
*
* @param excludedBuckets
* the buckets that need to be excluded due to current... | 3.26 |
hbase_BucketAllocator_totalCount_rdh | /**
* Combined {@link #freeCount()} + {@link #usedCount()}
*/
public long totalCount() {
return totalCount;
} | 3.26 |
hbase_BucketAllocator_totalBytes_rdh | /**
* Combined {@link #totalCount()} * {@link #itemSize()}
*/
public long totalBytes() {
return totalCount * itemSize;
} | 3.26 |
hbase_BucketAllocator_m4_rdh | /**
* Every time you allocate blocks in these buckets where the block size is less than the bucket
* size, fragmentation increases by that difference. You can reduce fragmentation by lowering
* the bucket size so that it is closer to the typical block size. This may have the consequence
* of bumping some blocks to ... | 3.26 |
hbase_BucketAllocator_allocate_rdh | /**
* Allocate a block in this bucket, return the offset representing the position in physical
* space
*
* @return the offset in the IOEngine
*/
public long allocate() {
assert freeCount
> 0;// Else should not have been called
assert... | 3.26 |
hbase_AssignmentVerificationReport_getRegionsWithoutValidFavoredNodes_rdh | /**
* Return the regions without favored nodes
*
* @return regions without favored nodes
*/
List<RegionInfo> getRegionsWithoutValidFavoredNodes() {
return regionsWithoutValidFavoredNodes;
} | 3.26 |
hbase_AssignmentVerificationReport_getUnassignedRegions_rdh | /**
* Return the unassigned regions
*
* @return unassigned regions
*/
List<RegionInfo> getUnassignedRegions() {
return unAssignedRegionsList;
} | 3.26 |
hbase_AssignmentVerificationReport_getNonFavoredAssignedRegions_rdh | /**
* Return the regions not assigned to its favored nodes
*
* @return regions not assigned to its favored nodes
*/
List<RegionInfo> getNonFavoredAssignedRegions() {
return nonFavoredAssignedRegionList;
} | 3.26 |
hbase_AssignmentVerificationReport_getNumRegionsOnFavoredNodeByPosition_rdh | /**
* Return the number of regions based on the position (primary/secondary/ tertiary) assigned to
* their favored nodes
*
* @return the number of regions
*/
int getNumRegionsOnFavoredNodeByPosition(FavoredNodesPlan.Position position) {
return favoredNodes[position.ordinal()];
} | 3.26 |
hbase_AssignmentVerificationReport_getDispersionInformation_rdh | /**
* Return a list which contains 3 elements: average dispersion score, max dispersion score and min
* dispersion score as first, second and third elements, respectively.
*/
public List<Float> getDispersionInformation() {
List<Float> dispersion
= new ArrayList<>();
dispersion.add(avgDispersionScore);
... | 3.26 |
hbase_AssignmentVerificationReport_getTotalFavoredAssignments_rdh | /**
* Return the number of regions assigned to their favored nodes
*
* @return number of regions assigned to their favored nodes
*/
int getTotalFavoredAssignments() {
return totalFavoredAssignments;
} | 3.26 |
hbase_AssignmentVerificationReport_fillUpDispersion_rdh | /**
* Use this to project the dispersion scores
*/
public void fillUpDispersion(TableName tableName, SnapshotOfRegionAssignmentFromMeta snapshot,
FavoredNodesPlan newPlan) {
// Set the table name
this.tableName = tableName;
// Get all the regions for this table
List<RegionInfo> regionInfoList = snap... | 3.26 |
hbase_Chunk_reset_rdh | /**
* Reset the offset to UNINITIALIZED before before reusing an old chunk
*/
void reset() {
if (nextFreeOffset.get() != UNINITIALIZED) {
nextFreeOffset.set(UNINITIALIZED);allocCount.set(0);
}
} | 3.26 |
hbase_Chunk_alloc_rdh | /**
* Try to allocate <code>size</code> bytes from the chunk. If a chunk is tried to get allocated
* before init() call, the thread doing the allocation will be in busy-wait state as it will keep
* looping till the nextFreeOffset is set.
*
* @return the offset of the successful allocation, ... | 3.26 |
hbase_Chunk_init_rdh | /**
* Actually claim the memory for this chunk. This should only be called from the thread that
* constructed the chunk. It is thread-safe against other threads calling alloc(), who will block
* until the allocation is complete.
*/
public void init() {
assert nextFreeOffset.get() == UNINITIALIZED;
try {
... | 3.26 |
hbase_Chunk_getData_rdh | /**
* Returns This chunk's backing data.
*/
ByteBuffer getData() {
return this.data;
} | 3.26 |
hbase_TableIntegrityErrorHandlerImpl_handleHoleInRegionChain_rdh | /**
* {@inheritDoc }
*/@Override
public void handleHoleInRegionChain(byte[] holeStart, byte[] holeEnd) throws IOException
{
}
/**
* {@inheritDoc } | 3.26 |
hbase_TableIntegrityErrorHandlerImpl_handleRegionEndKeyNotEmpty_rdh | /**
* {@inheritDoc }
*/
@Override
public void handleRegionEndKeyNotEmpty(byte[] curEndKey) throws IOException {
} | 3.26 |
hbase_TableIntegrityErrorHandlerImpl_handleRegionStartKeyNotEmpty_rdh | /**
* {@inheritDoc }
*/
@Override
public void handleRegionStartKeyNotEmpty(HbckRegionInfo hi) throws IOException {
} | 3.26 |
hbase_TableIntegrityErrorHandlerImpl_handleDuplicateStartKeys_rdh | /**
* {@inheritDoc }
*/
@Override
public void handleDuplicateStartKeys(HbckRegionInfo hi1, HbckRegionInfo hi2) throws IOException {
} | 3.26 |
hbase_TableIntegrityErrorHandlerImpl_m0_rdh | /**
* {@inheritDoc }
*/
@Override
public HbckTableInfo m0() {
return ti;
} | 3.26 |
hbase_TableIntegrityErrorHandlerImpl_handleDegenerateRegion_rdh | /**
* {@inheritDoc }
*/
@Override
public void handleDegenerateRegion(HbckRegionInfo hi) throws IOException {
} | 3.26 |
hbase_TableIntegrityErrorHandlerImpl_setTableInfo_rdh | /**
* {@inheritDoc }
*/
@Override
public void setTableInfo(HbckTableInfo ti2) {
this.ti = ti2;
} | 3.26 |
hbase_TableIntegrityErrorHandlerImpl_handleOverlapInRegionChain_rdh | /**
* {@inheritDoc }
*/
@Override
public void handleOverlapInRegionChain(HbckRegionInfo hi1, HbckRegionInfo hi2) throws IOException {
} | 3.26 |
hbase_DictionaryCache_loadFromResource_rdh | // Visible for testing
public static byte[] loadFromResource(final Configuration conf,
final String s, final int maxSize) throws IOException {
if (!s.startsWith(RESOURCE_SCHEME)) {
throw new IOException("Path does not start with " + RESOURCE_SCHEME);
}
final String path = s.substring(RESOU... | 3.26 |
hbase_DictionaryCache_getDictionary_rdh | /**
* Load a dictionary or return a previously cached load.
*
* @param conf
* configuration
* @param path
* the hadoop Path where the dictionary is located, as a String
* @return the dictionary bytes if successful, null otherwise
*/
public static byte[] getDictionary(final Configuration conf, final String p... | 3.26 |
hbase_SimpleRpcScheduler_onConfigurationChange_rdh | /**
* Resize call queues;
*
* @param conf
* new configuration
*/
@Override
public void onConfigurationChange(Configuration conf) {
callExecutor.resizeQueues(conf);
if (priorityExecutor != null) {
priorityExecutor.resizeQueues(conf);
}
if (replicationExecutor != null) {
replicatio... | 3.26 |
hbase_HeterogeneousRegionCountCostFunction_loadRules_rdh | /**
* used to load the rule files.
*/
@RestrictedApi(explanation = "Should only be called in tests", link = "", allowedOnPath = ".*(/src/test/.*|HeterogeneousRegionCountCostFunction).java")
void loadRules() {
final List<String> lines = readFile(this.rulesPath); if (null == lines) {
LOG.warn(("cannot load ... | 3.26 |
hbase_HeterogeneousRegionCountCostFunction_findLimitForRS_rdh | /**
* Find the limit for a ServerName. If not found then return the default value
*
* @param serverName
* the server we are looking for
* @return the limit
*/int findLimitForRS(final ServerName serverName) {
boolean matched = false;
int limit = -1;
for (final Map.Entry<Pattern, Integer> entry : this.limitPerRu... | 3.26 |
hbase_HeterogeneousRegionCountCostFunction_readFileFromLocalFS_rdh | /**
* used to read the rule files from local FS
*/
private List<String> readFileFromLocalFS(final String filename) throws IOException {
return Files.readAllLines(Paths.get(filename), StandardCharsets.UTF_8);
} | 3.26 |
hbase_HeterogeneousRegionCountCostFunction_readFile_rdh | /**
* used to read the rule files from either HDFS or local FS
*/
private List<String> readFile(final String filename) {
if (null
== filename) {
return null;
}
try {
if (filename.startsWith("file:")) {
return readFileFromLocalFS(filename);
}
return readFileFromHDFS(filename);
... | 3.26 |
hbase_HeterogeneousRegionCountCostFunction_readFileFromHDFS_rdh | /**
* used to read the rule files from HDFS
*/
private List<String> readFileFromHDFS(final String filename) throws IOException {
final Path path = new Path(filename);
final FileSystem fs = FileSystem.get(this.conf);
try (BufferedReader reader = new BufferedReader(new InputStreamReader(fs.open(path), Stand... | 3.26 |
hbase_HeterogeneousRegionCountCostFunction_rebuildCache_rdh | /**
* Rebuild cache matching ServerNames and their capacity.
*/
private void rebuildCache() {
LOG.debug("Rebuilding cache of capacity for each RS");
this.limitPerRS.clear();
this.totalCapacity = 0;
if (null == this.cluster) {
return;
}
for (int i = 0; i < this.cluster.numServers; i++) ... | 3.26 |
hbase_HeterogeneousRegionCountCostFunction_prepare_rdh | /**
* Called once per LB invocation to give the cost function to initialize it's state, and perform
* any costly calculation.
*/
@Override
void prepare(final BalancerClusterState cluster) {
this.cluster = cluster;
this.loadRules();
} | 3.26 |
hbase_SizeCachedKeyValue_getSerializedSize_rdh | /**
* Override by just returning the length for saving cost of method dispatching. If not, it will
* call {@link ExtendedCell#getSerializedSize()} firstly, then forward to
* {@link SizeCachedKeyValue#getSerializedSize(boolean)}. (See HBASE-21657)
*/
@Override
public int getSerializedSize() {
return this.length;... | 3.26 |
hbase_TokenProvider_getServices_rdh | // AuthenticationService implementation
@Override
public Iterable<Service> getServices() {
return Collections.singleton(AuthenticationProtos.AuthenticationService.newReflectiveService(this));
} | 3.26 |
hbase_TokenProvider_isAllowedDelegationTokenOp_rdh | /**
*
* @param ugi
* A user group information.
* @return true if delegation token operation is allowed
*/
private boolean isAllowedDelegationTokenOp(UserGroupInformation ugi) throws IOException {
AuthenticationMethod authMethod = ugi.getAuthenticationMethod();
if (authMethod == AuthenticationMethod.PROXY... | 3.26 |
hbase_FilterWrapper_toByteArray_rdh | /**
* Returns The filter serialized using pb
*/
@Override
public byte[] toByteArray() throws IOException {
FilterProtos.FilterWrapper.Builder builder = FilterProtos.FilterWrapper.newBuilder();builder.setFilter(ProtobufUtil.toFilter(this.filter));
return builder.build().toByteArray();
} | 3.26 |
hbase_FilterWrapper_parseFrom_rdh | /**
*
* @param pbBytes
* A pb serialized {@link FilterWrapper} instance
* @return An instance of {@link FilterWrapper} made from <code>bytes</code>
* @throws org.apache.hadoop.hbase.exceptions.DeserializationException
* @see #toByteArray
*/
public static FilterWrapper parseF... | 3.26 |
hbase_AccessControlClient_getUserPermissions_rdh | /**
* List all the userPermissions matching the given table pattern, column family and column
* qualifier.
*
* @param connection
* Connection
* @param tableRegex
* The regular expression string to match against. It shouldn't be null,
* empty or a namespace regular expression.
* @param columnFamily
* C... | 3.26 |
hbase_AccessControlClient_grant_rdh | /**
* Grant global permissions for the specified user. If permissions for the specified user exists,
* later granted permissions will override previous granted permissions.
*/
public static void grant(Connection connection, final String userName, final Permission... actions) throws Throwable {grant(connection, userN... | 3.26 |
hbase_AccessControlClient_revoke_rdh | /**
* Revoke global permissions for the specified user.
*
* @param connection
* The Connection instance to use
*/
public static void revoke(Connection connection, final String userName, final Permission... actions) throws Throwable {
connection.getAdmin().revoke(new UserPermission(userName, Permission.newBui... | 3.26 |
hbase_AccessControlClient_isAuthorizationEnabled_rdh | /**
* Return true if authorization is supported and enabled
*
* @param connection
* The connection to use
* @return true if authorization is supported and enabled, false otherwise
*/
public static boolean isAuthorizationEnabled(Connection connection) throws IOException {return connection.getAdmin().getSecurityC... | 3.26 |
hbase_AccessControlClient_isCellAuthorizationEnabled_rdh | /**
* Return true if cell authorization is supported and enabled
*
* @param connection
* The connection to use
* @return true if cell authorization is supported and enabled, false otherwise
*/
public static boolean isCellAuthorizationEnabled(Connection connection) throws IOException {
return connection.getA... | 3.26 |
hbase_AccessControlClient_hasPermission_rdh | /**
* Validates whether specified user has permission to perform actions on the mentioned table,
* column family or column qualifier.
*
* @param connection
* Connection
* @param tableName
* Table name, it shouldn't be null or empty.
* @param columnFamily
* The column family. Optional argument, can be emp... | 3.26 |
hbase_AccessControlClient_m0_rdh | /**
* Grants permission on the specified table for the specified user
*
* @param connection
* The Connection instance to use
* @param tableName
* the table name
* @param userName
* the user name
* @param family
* the column family
* @param qual
* the column qualifier
* @param mergeExistingPermiss... | 3.26 |
hbase_ByteBufferUtils_drainInputStreamToBuffer_rdh | /**
* Copy from the InputStream to a new heap ByteBuffer until the InputStream is exhausted.
*/public static ByteBuffer drainInputStreamToBuffer(InputStream is) throws IOException {
ByteArrayOutputStream baos = new ByteArrayOutputStream(4096);
IOUtils.copyBytes(is, baos, 4096, true);
ByteBuffer buffer = ByteBuffer.w... | 3.26 |
hbase_ByteBufferUtils_searchDelimiterIndex_rdh | /**
* Find index of passed delimiter.
*
* @return Index of delimiter having started from start of <code>b</code> moving rightward.
*/
public static int searchDelimiterIndex(ByteBuffer b, int
offset, final
int length, final int delimiter) { for (int i = offset, n = offset + length; i < n; i++) {
if (b.get(i) == de... | 3.26 |
hbase_ByteBufferUtils_putInt_rdh | /**
* Put an int value out to the given ByteBuffer's current position in big-endian format. This also
* advances the position in buffer by int size.
*
* @param buffer
* the ByteBuffer to write to
* @param val
* int to write out
*/
public static void putInt(ByteBuffer buffer, int val) {
ConverterHolder.BEST_... | 3.26 |
hbase_ByteBufferUtils_readVLong_rdh | /**
* Similar to {@link WritableUtils#readVLong(java.io.DataInput)} but reads from a
* {@link ByteBuff}.
*/
public static long readVLong(ByteBuff in) {
return readVLong(in::get);
} | 3.26 |
hbase_ByteBufferUtils_toInt_rdh | /**
* Reads an int value at the given buffer's offset.
*
* @param buffer
* input byte buffer to read
* @param offset
* input offset where int is
* @return int value at offset
*/
public static int toInt(ByteBuffer buffer, int offset) {
return ConverterHolder.BEST_CONVERTER.toInt(buffer, offset);
} | 3.26 |
hbase_ByteBufferUtils_putShort_rdh | /**
* Put a short value out to the given ByteBuffer's current position in big-endian format. This
* also advances the position in buffer by short size.
*
* @param buffer
* the ByteBuffer to write to
* @param val
* short to write out
*/
public static void putShort(ByteBuffer buffer, short val)
{
ConverterHol... | 3.26 |
hbase_ByteBufferUtils_copyBufferToStream_rdh | /**
* Copy data from a buffer to an output stream. Does not update the position in the buffer.
*
* @param out
* the output stream to write bytes to
* @param in
* the buffer to read bytes from
* @param offset
* the offset in the buffer (from the buffer's array offset) to start copying bytes
* from
* @p... | 3.26 |
hbase_ByteBufferUtils_findCommonPrefix_rdh | /**
* Find length of common prefix in two arrays.
*
* @param left
* ByteBuffer to be compared.
* @param leftOffset
* Offset in left ByteBuffer.
* @param leftLength
* Length of left ByteBuffer.
* @param right
* Array to be compared
* @param rightOffset
* Offset in right Array.
* @param rightLength... | 3.26 |
hbase_ByteBufferUtils_copyFromArrayToBuffer_rdh | /**
* Copies bytes from given array's offset to length part into the given buffer. Puts the bytes to
* buffer's given position. This doesn't affect the position of buffer.
*
* @param out
* output bytebuffer to copy to
* @param outOffset
* output buffer offset
* @param in
* input array to copy from
* @pa... | 3.26 |
hbase_ByteBufferUtils_copyOfRange_rdh | /**
* Similar to {@link Arrays#copyOfRange(byte[], int, int)}
*
* @param original
* the buffer from which the copy has to happen
* @param from
* the starting index
* @param to
* the ending index
* @return a byte[] created out of the copy
*/
public static byte[] copyOfRange(ByteBuffer original, int from,... | 3.26 |
hbase_ByteBufferUtils_m3_rdh | /**
* Reads a double value at the given buffer's offset.
*
* @param buffer
* input byte buffer to read
* @param offset
* offset where double is
* @return double value at offset
*/
public static double m3(ByteBuffer buffer, int offset) {
return Double.longBitsToDouble(toLong(buffer, offset));
} | 3.26 |
hbase_ByteBufferUtils_copyFromBufferToArray_rdh | /**
* Copies specified number of bytes from given offset of 'in' ByteBuffer to the array. This
* doesn't affect the position of buffer.
*
* @param out
* output array to copy input bytebuffer to
* @param in
* input bytebuffer to copy from
* @param sourceOffset
* offset of source bytebuffer
* @param desti... | 3.26 |
hbase_ByteBufferUtils_intFitsIn_rdh | /**
* Check how many bytes is required to store value.
*
* @param value
* Value which size will be tested.
* @return How many bytes are required to store value.
*/
public static int intFitsIn(final int value) {
if (value < 0) {
return 4;
}
if (value < (1 << (2 * 8))) {
if (value < (1 << (1
* 8))) {
return 1;
... | 3.26 |
hbase_ByteBufferUtils_m2_rdh | /**
* Copy the data to the output stream and update position in buffer.
*
* @param out
* the stream to write bytes to
* @param in
* the buffer to read bytes from
* @param length
* the number of bytes to copy
*/
public static void m2(OutputStream out, ByteBuffer in, int length) throws IOException {
copyBu... | 3.26 |
hbase_ByteBufferUtils_putCompressedInt_rdh | /**
* Put in buffer integer using 7 bit encoding. For each written byte: 7 bits are used to store
* value 1 bit is used to indicate whether there is next bit.
*
* @param value
* Int to be compressed.
* @param out
* Where to put compressed data
* @return Number of bytes written.
* @throws IOException
* o... | 3.26 |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.