name stringlengths 12 178 | code_snippet stringlengths 8 36.5k | score float64 3.26 3.68 |
|---|---|---|
hbase_ByteBufferUtils_skip_rdh | /**
* Increment position in buffer.
*
* @param buffer
* In this buffer.
* @param length
* By that many bytes.
*/
public static void skip(ByteBuffer buffer, int length) {
buffer.position(buffer.position() + length);
} | 3.26 |
hbase_ByteBufferUtils_putLong_rdh | /**
* Put a long value out to the given ByteBuffer's current position in big-endian format. This also
* advances the position in buffer by long size.
*
* @param buffer
* the ByteBuffer to write to
* @param val
* long to write out
*/
public static void putLong(ByteBuffer buffer, long val) {
ConverterHolder... | 3.26 |
hbase_ByteBufferUtils_arePartsEqual_rdh | /**
* Check whether two parts in the same buffer are equal.
*
* @param buffer
* In which buffer there are parts
* @param offsetLeft
* Beginning of first part.
* @param lengthLeft
* Length of the first part.
* @param offsetRight
* Beginning of the second part.
* @param lengthRight
* Length of the s... | 3.26 |
hbase_ByteBufferUtils_toStringBinary_rdh | // For testing purpose
public static String toStringBinary(final ByteBuffer b, int off, int len) {
StringBuilder result = new StringBuilder();
// Just in case we are passed a 'len' that is > buffer length...
if (off >= b.capacity()) {
return result.toString();
}
if ((off
+ len) > b.capacity()) {
len = b.capacity() - o... | 3.26 |
hbase_ByteBufferUtils_longFitsIn_rdh | /**
* Check how many bytes are required to store value.
*
* @param value
* Value which size will be tested.
* @return How many bytes are required to store value.
*/
public static int longFitsIn(final long value) {
if (value < 0) {
return 8;
}
if
(value < (1L << (4 * 8))) {
// no more than 4 bytes
if (value < (... | 3.26 |
hbase_ByteBufferUtils_readAsInt_rdh | /**
* Converts a ByteBuffer to an int value
*
* @param buf
* The ByteBuffer
* @param offset
* Offset to int value
* @param length
* Number of bytes used to store the int value.
* @return the int value if there's not enough bytes left in the buffer after the given offset
*/
public static int readAsInt(By... | 3.26 |
hbase_ByteBufferUtils_compareTo_rdh | // The below two methods show up in lots of places. Versions of them in commons util and in
// Cassandra. In guava too? They are copied from ByteBufferUtils. They are here as static
// privates. Seems to make code smaller and make Hotspot happier (comes of compares and study
// of compiled code via jitwatch).
public st... | 3.26 |
hbase_ByteBufferUtils_writeVLong_rdh | /**
* Similar to {@link WritableUtils#writeVLong(java.io.DataOutput, long)}, but writes to a
* {@link ByteBuffer}.
*/
public static void writeVLong(ByteBuffer out, long i) {
if ((i >= (-112)) &&
(i <= 127)) {
out.put(((byte) (i)));
return;
}
int len = -112;
if (i < 0) {
i ^= -1L;// take one's complement
len = -120... | 3.26 |
hbase_ByteBufferUtils_toBytes_rdh | /**
* Copy the given number of bytes from specified offset into a new byte[]
*
* @param buffer
* input bytebuffer to read
* @param offset
* input offset where Bytes are
* @param length
* the number of bytes to read
* @return a new byte[] containing the bytes in the specified range
*/
public static byte[... | 3.26 |
hbase_ByteBufferUtils_toLong_rdh | /**
* Reads a long value at the given buffer's offset.
*
* @param buffer
* input byte buffer to read
* @param offset
* input offset where Long is
* @return long value at offset
*/
public static long toLong(ByteBuffer buffer, int offset) {
return ConverterHolder.BEST_CONVERTER.toLong(buffer, offset);} | 3.26 |
hbase_ByteBufferUtils_copyFromBufferToBuffer_rdh | /**
* Copy from one buffer to another from given offset. This will be absolute positional copying and
* won't affect the position of any of the buffers.
*
* @param in
* input bytebuffer
* @param out
* destination bytebuffer
* @param sourceOffset
* offset of source buffer
* @param destinationOffset
* ... | 3.26 |
hbase_ByteBufferUtils_readLong_rdh | /**
* Read long which was written to fitInBytes bytes and increment position.
*
* @param fitInBytes
* In how many bytes given long is stored.
* @return The value of parsed long.
*/
public static long readLong(ByteBuffer in, final int fitInBytes) {
long v58 = 0;
for (int i = 0; i < fitInBytes; ++i) {
v58 |= (i... | 3.26 |
hbase_ByteBufferUtils_toShort_rdh | /**
* Reads a short value at the given buffer's offset.
*
* @param buffer
* input byte buffer to read
* @param offset
* input offset where short is
* @return short value at offset
*/
public static short toShort(ByteBuffer buffer, int offset) {
return ConverterHolder.BEST_CONVERTER.m0(buffer, offset);
} | 3.26 |
hbase_ByteBufferUtils_hashCode_rdh | /**
* ByteBuffer to hash offset to start from length to hash
*/
public static int hashCode(ByteBuffer buf, int offset, int length) {
int hash = 1;
for (int i = offset; i < (offset + length); i++) {
hash = (31 * hash) + ((int) (toByte(buf, i)));
}
return hash;
} | 3.26 |
hbase_ByteBufferUtils_readCompressedInt_rdh | /**
* Read integer from buffer coded in 7 bits and increment position.
*
* @return Read integer.
*/
public static int readCompressedInt(ByteBuffer buffer) {
byte b = buffer.get();
if ((b & NEXT_BIT_MASK) != 0) {
return (b & VALUE_MASK) + (readCompressedInt(buffer) << NEXT_BIT_SHIFT);
}
return b & VALUE_MASK;
} | 3.26 |
hbase_ByteBufferUtils_searchDelimiterIndexInReverse_rdh | /**
* Find index of passed delimiter walking from end of buffer backwards.
*
* @return Index of delimiter
*/
public static int searchDelimiterIndexInReverse(ByteBuffer b, int offset,
int length, int
delimiter) {for (int
i = (offset + length)
- 1; i >= offset; i--) {
if (b.get(i) == delimiter) {
return i;
}
}
retur... | 3.26 |
hbase_BufferedMutator_getWriteBufferSize_rdh | /**
* Returns the maximum size in bytes of the write buffer for this HTable.
* <p>
* The default value comes from the configuration parameter {@code hbase.client.write.buffer}.
*
* @return The size of the write buffer in bytes.
*/default long getWriteBufferSize() {throw new UnsupportedOperationException("The Buf... | 3.26 |
hbase_BufferedMutator_disableWriteBufferPeriodicFlush_rdh | /**
* Disable periodic flushing of the write buffer.
*/
default void disableWriteBufferPeriodicFlush() {
setWriteBufferPeriodicFlush(0, MIN_WRITE_BUFFER_PERIODIC_FLUSH_TIMERTICK_MS);
} | 3.26 |
hbase_BufferedMutator_setWriteBufferPeriodicFlush_rdh | /**
* Sets the maximum time before the buffer is automatically flushed.
*
* @param timeoutMs
* The maximum number of milliseconds how long records may be buffered before
* they are flushed. Set to 0 to disable.
* @param timerTickMs
* The number of milliseconds between each check if the timeout has been
* ... | 3.26 |
hbase_ThriftConnection_getTableBuilder_rdh | /**
* Get a TableBuider to build ThriftTable, ThriftTable is NOT thread safe
*
* @return a TableBuilder
* @throws IOException
* IOException
*/
@Override
public TableBuilder getTableBuilder(TableName tableName, ExecutorService pool) {
return new TableBuilder() {
@Override
public TableBuilder ... | 3.26 |
hbase_ThriftConnection_getAdmin_rdh | /**
* Get a ThriftAdmin, ThriftAdmin is NOT thread safe
*
* @return a ThriftAdmin
* @throws IOException
* IOException
*/
@Override
public Admin getAdmin() throws IOException {
Pair<THBaseService.Client, TTransport> client = clientBuilder.getClient();
return new ThriftAdmin(client.getFirst(), client.get... | 3.26 |
hbase_ForeignExceptionDispatcher_addListener_rdh | /**
* Listen for failures to a given process. This method should only be used during initialization
* and not added to after exceptions are accepted.
*
* @param errorable
* listener for the errors. may be null.
*/
public synchronized void addListener(ForeignExceptionListener errorable) {
this.listeners.add(... | 3.26 |
hbase_ForeignExceptionDispatcher_dispatch_rdh | /**
* Sends an exception to all listeners.
*
* @param e
* {@link ForeignException} containing the cause. Can be null.
*/
private void dispatch(ForeignException e) {
// update all the listeners with the passed error
for (ForeignExceptionListener l : listeners) {
l.receive(e);
}
} | 3.26 |
hbase_ClientUtils_utf8_rdh | /**
* Helper to translate a byte buffer to UTF8 strings
*
* @param bb
* byte buffer
* @return UTF8 decoded string value
*/
public static String utf8(final ByteBuffer bb) {
// performance is not very critical here so we always copy the BB to a byte array
byte[] buf = new byte[bb.remaining()];
// dupl... | 3.26 |
hbase_ClientUtils_getLoginContext_rdh | /**
* To authenticate the demo client, kinit should be invoked ahead. Here we try to get the Kerberos
* credential from the ticket cache
*
* @return LoginContext Object
* @throws LoginException
* Exception thrown if unable to get LoginContext
*/
public static LoginContext ge... | 3.26 |
hbase_ClientUtils_printRow_rdh | /**
* copy values into a TreeMap to get them in sorted order and print it
*
* @param rowResult
* Holds row name and then a map of columns to cells
*/
public static void printRow(final TRowResult rowResult) {
TreeMap<String, TCell> sorted = new TreeMap<>();
for (Map.Entry<ByteBuffer, TCell> column : rowRe... | 3.26 |
hbase_RegionHDFSBlockLocationFinder_mapHostNameToServerName_rdh | /**
* Map hostname to ServerName, The output ServerName list will have the same order as input hosts.
*
* @param hosts
* the list of hosts
* @return ServerName list
*/
@RestrictedApi(explanation = "Should only be called in tests", link = "", allowedOnPath = ".*/src/test/.*|.*/RegionHDFSBlockLocationFinder.java"... | 3.26 |
hbase_RegionHDFSBlockLocationFinder_scheduleFullRefresh_rdh | /**
* Refresh all the region locations.
*
* @return true if user created regions got refreshed.
*/
private boolean scheduleFullRefresh() {
ClusterInfoProvider service = this.provider;
// Protect from anything being null while starting up.
if (service == null) {
return false;
}
// TODO: S... | 3.26 |
hbase_RegionHDFSBlockLocationFinder_refreshLocalityChangedRegions_rdh | /**
* If locality for a region has changed, that pretty certainly means our cache is out of date.
* Compare oldStatus and newStatus, refreshing any regions which have moved or changed locality.
*/
private void refreshLocalityChangedRegions(ClusterMetrics oldStatus, ClusterMetrics newStatus) {
if
((oldStatus ... | 3.26 |
hbase_RegionHDFSBlockLocationFinder_getDescriptor_rdh | /**
* return TableDescriptor for a given tableName
*
* @param tableName
* the table name
*/
private TableDescriptor getDescriptor(TableName tableName) throws IOException {
ClusterInfoProvider service = this.provider;
if (service == null) {
return null;
}
... | 3.26 |
hbase_RegionHDFSBlockLocationFinder_internalGetTopBlockLocation_rdh | /**
* Returns an ordered list of hosts that are hosting the blocks for this region. The weight of
* each host is the sum of the block lengths of all files on that host, so the first host in the
* list is the server which holds the most bytes of the given region's HFiles.
*
* @param region
* region
* @return or... | 3.26 |
hbase_RegionHDFSBlockLocationFinder_createCache_rdh | /**
* Create a cache for region to list of servers
*
* @return A new Cache.
*/
private LoadingCache<RegionInfo, HDFSBlocksDistribution> createCache() {
return CacheBuilder.newBuilder().expireAfterWrite(CACHE_TIME, TimeUnit.MILLISECONDS).build(loader);
} | 3.26 |
hbase_StoreFileTrackerFactory_create_rdh | /**
* Used at master side when splitting/merging regions, as we do not have a Store, thus no
* StoreContext at master side.
*/
public static StoreFileTracker create(Configuration conf, TableDescriptor td, ColumnFamilyDescriptor cfd, HRegionFileSystem regionFs) {
StoreContext ctx = StoreContext.getBuilder().withC... | 3.26 |
hbase_StoreFileTrackerFactory_createForMigration_rdh | /**
* Create store file tracker to be used as source or destination for
* {@link MigrationStoreFileTracker}.
*/
static StoreFileTrackerBase createForMigration(Configuration conf, String configName, boolean isPrimaryReplica, StoreContext ctx) {
Class<? extends StoreFileTrackerBase> tracker = getStoreFileTrackerCl... | 3.26 |
hbase_BaseLoadBalancer_updateBalancerStatus_rdh | /**
* Updates the balancer status tag reported to JMX
*/
@Override
public void updateBalancerStatus(boolean status) {
metricsBalancer.balancerStatus(status);
} | 3.26 |
hbase_BaseLoadBalancer_randomAssignment_rdh | /**
* Used to assign a single region to a random server.
*/
private ServerName randomAssignment(BalancerClusterState cluster, RegionInfo regionInfo, List<ServerName> servers) {
int numServers =
servers.size();// servers is not null, numServers > 1
ServerName sn = null;
final int maxIterations = numServers * 4;
in... | 3.26 |
hbase_BaseLoadBalancer_roundRobinAssignment_rdh | /**
* Round-robin a list of regions to a list of servers
*/
private void roundRobinAssignment(BalancerClusterState cluster, List<RegionInfo> regions, List<ServerName> servers, Map<ServerName, List<RegionInfo>> assignments) {
Random rand = ThreadLocalRandom.current();
List<RegionInfo> unassignedRegions = new ArrayLis... | 3.26 |
hbase_BaseLoadBalancer_getRegionAssignmentsByServer_rdh | // return a modifiable map, as we may add more entries into the returned map.
private Map<ServerName, List<RegionInfo>> getRegionAssignmentsByServer(Collection<RegionInfo> regions) {
return provider != null ? new HashMap<>(provider.getSnapShotOfAssignment(regions)) : new HashMap<>();
} | 3.26 |
hbase_SimpleRpcServerResponder_processResponse_rdh | /**
* Process the response for this call. You need to have the lock on
* {@link org.apache.hadoop.hbase.ipc.SimpleServerRpcConnection#responseWriteLock}
*
* @return true if we proceed the call fully, false otherwise.
*/
private boolean processResponse(SimpleServerRpcConnection conn, RpcResponse resp) throws IOExc... | 3.26 |
hbase_SimpleRpcServerResponder_doRespond_rdh | //
// Enqueue a response from the application.
//
void doRespond(SimpleServerRpcConnection conn,
RpcResponse
resp) throws IOException {
boolean added = false;
// If there is already a write in progress, we don't wait. This allows to free the handlers
// immediately for other tasks.
if (conn.responseQueue.isEmpty() &&... | 3.26 |
hbase_SimpleRpcServerResponder_registerWrites_rdh | /**
* Take the list of the connections that want to write, and register them in the selector.
*/
private void registerWrites() { Iterator<SimpleServerRpcConnection> it = writingCons.iterator();
while (it.hasNext()) {
SimpleServerRpcConnection c = it.next();
it.remove();
SelectionKey sk = c... | 3.26 |
hbase_SimpleRpcServerResponder_purge_rdh | /**
* If there were some calls that have not been sent out for a long time, we close the connection.
*
* @return the time of the purge.
*/
private long purge(long lastPurgeTime) {
long now = EnvironmentEdgeManager.currentTime();
if (now < (lastPurgeTime + this.simpleRpcServer... | 3.26 |
hbase_SimpleRpcServerResponder_registerForWrite_rdh | /**
* Add a connection to the list that want to write,
*/
public void registerForWrite(SimpleServerRpcConnection c) {
if (writingCons.add(c)) {
writeSelector.wakeup();
}
} | 3.26 |
hbase_RoundRobinTableInputFormat_configure_rdh | /**
* Adds a configuration to the Context disabling remote rpc'ing to figure Region size when
* calculating InputSplits. See up in super-class TIF where we rpc to every server to find the
* size of all involved Regions. Here we disable this super-class action. This means InputSplits
* will have a length of zero. If... | 3.26 |
hbase_RoundRobinTableInputFormat_unconfigure_rdh | /**
*
* @see #configure()
*/
void unconfigure() {
if (this.hbaseRegionsizecalculatorEnableOriginalValue == null) {
getConf().unset(HBASE_REGIONSIZECALCULATOR_ENABLE);
} else {
getConf().setBoolean(HBASE_REGIONSIZECALCULATOR_ENABLE, this.hbaseRegionsizecalculatorEnableOriginalValue);
}
} | 3.26 |
hbase_RoundRobinTableInputFormat_getSuperSplits_rdh | /**
* Call super-classes' getSplits. Have it out here as its own method so can be overridden.
*/
List<InputSplit> getSuperSplits(JobContext context) throws IOException {
return super.getSplits(context);
} | 3.26 |
hbase_RoundRobinTableInputFormat_roundRobin_rdh | /**
* Spread the splits list so as to avoid clumping on RegionServers. Order splits so every server
* gets one split before a server gets a second, and so on; i.e. round-robin the splits amongst
* the servers in the cluster.
*/
List<InputSplit>
roundRobin(List<InputSplit> inputs) throws ... | 3.26 |
hbase_RoundRobinTableInputFormat_main_rdh | /**
* Pass table name as argument. Set the zk ensemble to use with the System property
* 'hbase.zookeeper.quorum'
*/
public static void main(String[] args) throws IOException {
TableInputFormat tif = new RoundRobinTableInputFormat();final Configuration configuration = HBaseConfiguration.create();
configura... | 3.26 |
hbase_Append_setReturnResults_rdh | /**
* True (default) if the append operation should return the results. A client that is not
* interested in the result can save network bandwidth setting this to false.
*/
@Override
public Append setReturnResults(boolean returnResults) {
super.setReturnResults(returnResults);
return this;
} | 3.26 |
hbase_Append_getTimeRange_rdh | /**
* Gets the TimeRange used for this append.
*/
public TimeRange getTimeRange() {
return this.tr;
} | 3.26 |
hbase_Append_isReturnResults_rdh | /**
* Returns current setting for returnResults
*/
// This method makes public the superclasses's protected method.
@Override
public boolean isReturnResults() {
return super.isReturnResults();
} | 3.26 |
hbase_Append_addColumn_rdh | /**
* Add the specified column and value to this Append operation.
*
* @param family
* family name
* @param qualifier
* column qualifier
* @param value
* value to append to specified column
*/
public Append addColumn(byte[] family, byte[] qualifier, byte[] value) {
KeyValue kv = new KeyValue(this.... | 3.26 |
hbase_Append_add_rdh | /**
* Add column and value to this Append operation.
*
* @return This instance
*/ @Override
public Append add(final Cell cell) {
try {
super.add(cell);
} catch (IOException e) {
// we eat the exception of wrong row for BC..
LOG.error(e.toString(), e);
}
return this;
} | 3.26 |
hbase_Append_setTimeRange_rdh | /**
* Sets the TimeRange to be used on the Get for this append.
* <p>
* This is useful for when you have counters that only last for specific periods of time (ie.
* counters that are partitioned by time). By setting the range of valid times for this append,
* you can potentially gain some performance with a more o... | 3.26 |
hbase_SnapshotQuotaObserverChore_getSnapshotsFromTables_rdh | /**
* Computes a mapping of originating {@code TableName} to snapshots, when the {@code TableName}
* exists in the provided {@code Set}.
*/
Multimap<TableName, String> getSnapshotsFromTables(Admin admin, Set<TableName> tablesToFetchSnapshotsFrom) throws IOException {
Multimap<TableName, String>
snapshotsToCo... | 3.26 |
hbase_SnapshotQuotaObserverChore_pruneTableSnapshots_rdh | /**
* Removes the snapshot entries that are present in Quota table but not in snapshotsToComputeSize
*
* @param snapshotsToComputeSize
* list of snapshots to be persisted
*/
void pruneTableSnapshots(Multimap<TableName, String> snapshotsToComputeSize) throws IOException {
Mult... | 3.26 |
hbase_SnapshotQuotaObserverChore_getPeriod_rdh | /**
* Extracts the period for the chore from the configuration.
*
* @param conf
* The configuration object.
* @return The configured chore period or the default value.
*/
static int getPeriod(Configuration conf) {
return conf.getInt(SNAPSHOT_QUOTA_CHORE_PERIOD_KEY, SNAPSHOT_QUOTA_CHORE_PERIOD_DEFAULT);
} | 3.26 |
hbase_SnapshotQuotaObserverChore_getSnapshotsToComputeSize_rdh | /**
* Fetches each table with a quota (table or namespace quota), and then fetch the name of each
* snapshot which was created from that table.
*
* @return A mapping of table to snapshots created from that table
*/
Multimap<TableName, String> getSnapshotsToComputeSize() throws IOException {
Set<TableName> tabl... | 3.26 |
hbase_SnapshotQuotaObserverChore_persistSnapshotSizesForNamespaces_rdh | /**
* Writes the size used by snapshots for each namespace to the quota table.
*/
void persistSnapshotSizesForNamespaces(Map<String, Long> snapshotSizesByNamespace) throws IOException {
try (Table quotaTable = conn.getTable(QuotaUtil.QUOTA_TABLE_NAME)) {
quotaTable.put(snapshotSizesByNamespace.entrySe... | 3.26 |
hbase_SnapshotQuotaObserverChore_pruneNamespaceSnapshots_rdh | /**
* Removes the snapshot entries that are present in Quota table but not in snapshotsToComputeSize
*
* @param snapshotsToComputeSize
* list of snapshots to be persisted
*/
void pruneNamespaceSnapshots(Multimap<TableName, String> snapshotsToComputeSize) throws IOException {Set<String> existingSnapshotEntries ... | 3.26 |
hbase_SnapshotQuotaObserverChore_getInitialDelay_rdh | /**
* Extracts the initial delay for the chore from the configuration.
*
* @param conf
* The configuration object.
* @return The configured chore initial delay or the default value.
*/
static long getInitialDelay(Configuration conf) {
return conf.getLong(SNAPSHOT_QUOTA_CHORE_DELAY_KEY, SNAPSHOT_QUOTA_CHORE_DELA... | 3.26 |
hbase_SnapshotQuotaObserverChore_getTimeUnit_rdh | /**
* Extracts the time unit for the chore period and initial delay from the configuration. The
* configuration value for {@link #SNAPSHOT_QUOTA_CHORE_TIMEUNIT_KEY} must correspond to a
* {@link TimeUnit} value.
*
* @param conf
* The configuration object.
* @return The configured time unit for the chore period... | 3.26 |
hbase_SnapshotQuotaObserverChore_getNotifierForTable_rdh | /**
* Returns the correct instance of {@link FileArchiverNotifier} for the given table name.
*
* @param tn
* The table name
* @return A {@link FileArchiverNotifier} instance
*/
FileArchiverNotifier getNotifierForTable(TableName tn) {return FileArchiverNotifierFactoryImpl.getInstance().get(conn, conf, fs, tn);
... | 3.26 |
hbase_SnapshotQuotaObserverChore_computeSnapshotSizes_rdh | /**
* Computes the size of each snapshot provided given the current files referenced by the table.
*
* @param snapshotsToComputeSize
* The snapshots to compute the size of
* @return A mapping of table to snapshot created from that table and the snapshot's size.
*/
Map<String, Lon... | 3.26 |
hbase_ReplicationQueueInfo_extractDeadServersFromZNodeString_rdh | /**
* Parse dead server names from queue id. servername can contain "-" such as
* "ip-10-46-221-101.ec2.internal", so we need skip some "-" during parsing for the following
* cases: 2-ip-10-46-221-101.ec2.internal,52170,1364333181125-<server name>-...
*/
private static void extractDeadServersFromZNodeString(Stri... | 3.26 |
hbase_SampleUploader_configureJob_rdh | /**
* Job configuration.
*/
public static Job configureJob(Configuration conf, String[] args) throws IOException {
Path inputPath = new Path(args[0]);
String tableName = args[1];
Job v9 = new Job(conf, (NAME + "_") + tableName);
v9.setJarByClass(SampleUploader.Uploader.class);
FileInputFormat.setI... | 3.26 |
hbase_SampleUploader_run_rdh | /**
* Main entry point.
*
* @param otherArgs
* The command line parameters after ToolRunner handles standard.
* @throws Exception
* When running the job fails.
*/
@Override
public int run(S... | 3.26 |
hbase_InfoServer_addUnprivilegedServlet_rdh | /**
* Adds a servlet in the server that any user can access.
*
* @see HttpServer#addUnprivilegedServlet(String, ServletHolder)
*/
public void addUnprivilegedServlet(String name, String pathSpec, ServletHolder holder) {if (name != null) {
holder.setName(name);
}
this.httpServer.addUnprivilegedServl... | 3.26 |
hbase_InfoServer_canUserModifyUI_rdh | /**
* Returns true if and only if UI authentication (spnego) is enabled, UI authorization is enabled,
* and the requesting user is defined as an administrator. If the UI is set to readonly, this
* method always returns false.
*/
public static boolean canUserModifyUI(HttpServletRequest req, ServletContext ctx, Confi... | 3.26 |
hbase_InfoServer_addPrivilegedServlet_rdh | /**
* Adds a servlet in the server that any user can access.
*
* @see HttpServer#addPrivilegedServlet(String, String, Class)
*/
public void addPrivilegedServlet(String name, String pathSpec, Class<? extends HttpServlet> clazz) {
this.httpServer.addPrivilegedServlet(name, pathSpec, clazz);
} | 3.26 |
hbase_InfoServer_buildAdminAcl_rdh | /**
* Builds an ACL that will restrict the users who can issue commands to endpoints on the UI which
* are meant only for administrators.
*/AccessControlList buildAdminAcl(Configuration conf) {
final String userGroups = conf.get(HttpServer.HTTP_SPNEGO_AUTHENTICATION_ADMIN_USERS_KEY, null);
final String admi... | 3.26 |
hbase_InfoServer_getPort_rdh | /**
*
* @return the port of the info server
* @deprecated Since 0.99.0
*/
@Deprecatedpublic int getPort() {
return this.httpServer.getPort();} | 3.26 |
hbase_MutableSegment_add_rdh | /**
* Adds the given cell into the segment
*
* @param cell
* the cell to add
* @param mslabUsed
* whether using MSLAB
*/
public void add(Cell cell, boolean mslabUsed, MemStoreSizing memStoreSizing, boolean sizeAddedPreOperation) {
internalAdd(cell, mslabUsed, memStoreSizing, sizeAddedPreOperation);
} | 3.26 |
hbase_RSGroupInfo_setConfiguration_rdh | /**
* Setter for storing a configuration setting in {@link #configuration} map.
*
* @param key
* Config key.
* @param value
* String value.
*/
public void setConfiguration(String key, String value) {
configuration.put(key, Objects.requireNonNull(value));
} | 3.26 |
hbase_RSGroupInfo_addAllServers_rdh | /**
* Adds the given servers to the group.
*/
public void addAllServers(Collection<Address> hostPort) {
servers.addAll(hostPort);
} | 3.26 |
hbase_RSGroupInfo_getServers_rdh | /**
* Get list of servers.
*/
public Set<Address> getServers() {
return servers;
} | 3.26 |
hbase_RSGroupInfo_removeConfiguration_rdh | /**
* Remove a config setting represented by the key from the {@link #configuration} map
*/
public void removeConfiguration(final String key) {
configuration.remove(key);
} | 3.26 |
hbase_RSGroupInfo_getName_rdh | /**
* Get group name.
*/
public String getName() {
return name;
} | 3.26 |
hbase_RSGroupInfo_getConfiguration_rdh | /**
* Getter for fetching an unmodifiable {@link #configuration} map.
*/
public Map<String, String> getConfiguration() {
// shallow pointer copy
return Collections.unmodifiableMap(configuration);
} | 3.26 |
hbase_RSGroupInfo_removeServer_rdh | /**
* Remove given server from the group.
*/
public boolean removeServer(Address hostPort) {
return servers.remove(hostPort);
} | 3.26 |
hbase_RSGroupInfo_containsServer_rdh | /**
* Returns true if a server with hostPort is found
*/
public boolean containsServer(Address hostPort) {
return servers.contains(hostPort);
} | 3.26 |
hbase_RSGroupInfo_addServer_rdh | /**
* Adds the given server to the group.
*/
public void addServer(Address hostPort) {
servers.add(hostPort);
} | 3.26 |
hbase_SplitLogWorker_start_rdh | /**
* start the SplitLogWorker thread
*/
public void start() {
worker = new Thread(null, this, "SplitLogWorker-" + server.getServerName().toShortString());
worker.start();
} | 3.26 |
hbase_SplitLogWorker_splitLog_rdh | /**
* Returns Result either DONE, RESIGNED, or ERR.
*/
static Status splitLog(String filename, CancelableProgressable p, Configuration conf, RegionServerServices server, LastSequenceId sequenceIdChecker, WALFactory factory) {
Path walDir;
FileSystem fs;
try {walDir = CommonFSUtils.getWALRootDir(conf);... | 3.26 |
hbase_SplitLogWorker_m0_rdh | /**
* Returns the number of tasks processed by coordination. This method is used by tests only
*/
public int m0() {
return coordination.getTaskReadySeq();
} | 3.26 |
hbase_SplitLogWorker_processSyncReplicationWAL_rdh | // returns whether we need to continue the split work
private static boolean processSyncReplicationWAL(String name, Configuration conf, RegionServerServices server,
FileSystem fs, Path walDir) throws IOException {
Path walFile = new Path(walDir, name);
String filename = walFile.getName();
Optional<String> ... | 3.26 |
hbase_SplitLogWorker_stopTask_rdh | /**
* If the worker is doing a task i.e. splitting a log file then stop the task. It doesn't exit the
* worker thread.
*/
public void
stopTask() {
LOG.info("Sending interrupt to stop the worker thread");
worker.interrupt();// TODO interrupt often gets swallowed, do what else?
} | 3.26 |
hbase_SplitLogWorker_stop_rdh | /**
* stop the SplitLogWorker thread
*/
public void stop() {
coordination.stopProcessingTasks();
stopTask();
} | 3.26 |
hbase_RegionReplicationBufferManager_decrease_rdh | /**
* Called after you ship the edits out.
*/
public void decrease(long size) {
pendingSize.addAndGet(-size);
} | 3.26 |
hbase_RegionReplicationBufferManager_increase_rdh | /**
* Return whether we should just drop all the edits, if we have reached the hard limit of max
* pending size.
*
* @return {@code true} means OK, {@code false} means drop all the edits.
*/
public boolean increase(long size) {
long sz = pendingSize.addAndGet(size);
if ... | 3.26 |
hbase_ColumnCount_setCount_rdh | /**
* Set the current count to a new count
*
* @param count
* new count to set
*/
public void setCount(int count) {
this.count = count;
} | 3.26 |
hbase_ColumnCount_getLength_rdh | /**
* Returns the length
*/
public int getLength() {
return this.length;
} | 3.26 |
hbase_ColumnCount_increment_rdh | /**
* Increment the current version count
*
* @return current count
*/
public int increment() {
return ++count;
} | 3.26 |
hbase_ColumnCount_getOffset_rdh | /**
* Returns the offset
*/
public int getOffset() {return this.offset;
} | 3.26 |
hbase_ColumnCount_getBuffer_rdh | /**
* Returns the buffer
*/
public byte[] getBuffer() {
return this.bytes;
} | 3.26 |
hbase_CompactionPipeline_replaceAtIndex_rdh | // replacing one segment in the pipeline with a new one exactly at the same index
// need to be called only within synchronized block
@SuppressWarnings(value = "VO_VOLATILE_INCREMENT", justification = "replaceAtIndex is invoked under a synchronize block so safe")
private void replaceAtIndex(int idx, ImmutableSegment ne... | 3.26 |
hbase_CompactionPipeline_flattenOneSegment_rdh | /**
* If the caller holds the current version, go over the the pipeline and try to flatten each
* segment. Flattening is replacing the ConcurrentSkipListMap based CellSet to CellArrayMap based.
* Flattening of the segment that initially is not based on ConcurrentSkipListMap has no effect.
* Return after one segment... | 3.26 |
hbase_CompactionPipeline_swapSuffix_rdh | /**
* Must be called under the {@link CompactionPipeline#pipeline} Lock.
*/private void swapSuffix(List<? extends Segment> suffix, ImmutableSegment segment, boolean closeSegmentsInSuffix) {
matchAndRemoveSuffixFromPipeline(suffix);
if (segment != null) {
pipeline.addLast(segment);
}
// During... | 3.26 |
hbase_CompactionPipeline_validateSuffixList_rdh | // debug method
private boolean validateSuffixList(LinkedList<ImmutableSegment> suffix) {
if (suffix.isEmpty())
{
// empty suffix is always valid
return true;
}
Iterator<ImmutableSegment> pipelineBackwardIterator = pipeline.descendingIterator();
Iterator<ImmutableSegment> suffixBackw... | 3.26 |
hbase_CompactionPipeline_matchAndRemoveSuffixFromPipeline_rdh | /**
* Checking that the {@link Segment}s in suffix input parameter is same as the {@link Segment}s in
* {@link CompactionPipeline#pipeline} one by one from the last element to the first element of
* suffix. If matched, remove suffix from {@link CompactionPipeline#pipeline}. <br/>
* Must be called under the {@link C... | 3.26 |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.