name stringlengths 12 178 | code_snippet stringlengths 8 36.5k | score float64 3.26 3.68 |
|---|---|---|
flink_SinkTestSuiteBase_testScaleUp | /**
* Test connector sink restart from a completed savepoint with a higher parallelism.
*
* <p>This test will create a sink in the external system, generate a collection of test data
* and write a half part of them to this sink by the Flink Job with parallelism 2 at first. Then
* stop the job, restart the same job... | 3.68 |
hmily_HashedWheelTimer_expireTimeouts | /**
* Expire all {@link HashedWheelTimeout}s for the given {@code deadline}.
*/
public void expireTimeouts(final long deadline) {
HashedWheelTimeout timeout = head;
// process all timeouts
while (timeout != null) {
boolean remove = false;
if (timeout.remainingRounds <= 0) {
if ... | 3.68 |
flink_PythonDependencyInfo_create | /**
* Creates PythonDependencyInfo from GlobalJobParameters and DistributedCache.
*
* @param config The config.
* @param distributedCache The DistributedCache object of current task.
* @return The PythonDependencyInfo object that contains whole information of python dependency.
*/
public static PythonDependencyIn... | 3.68 |
framework_ContainerOrderedWrapper_addItem | /**
* Registers a new Item by its ID to the underlying container and to the
* ordering.
*
* @param itemId
* the ID of the Item to be created.
* @return the added Item or <code>null</code> if the operation failed
* @throws UnsupportedOperationException
* if the addItem is not supported.
*... | 3.68 |
flink_DataSource_getInputFormat | /**
* Gets the input format that is executed by this data source.
*
* @return The input format that is executed by this data source.
*/
@Internal
public InputFormat<OUT, ?> getInputFormat() {
return this.inputFormat;
} | 3.68 |
morf_AbstractSqlDialectTest_testSelectWithConcatenationUsingCase | /**
* Tests concatenation in a select with {@linkplain CaseStatement}.
*/
@Test
public void testSelectWithConcatenationUsingCase() {
WhenCondition whenCondition = new WhenCondition(eq(new FieldReference("taxVariationIndicator"), new FieldLiteral('Y')), new FieldReference("exposureCustomerNumber"));
SelectStatemen... | 3.68 |
morf_GraphBasedUpgradeTraversalService_waitForReadyToExecuteNode | /**
* Caller of this method will be blocked awaiting moment when at least one new
* node is available for execution or all the nodes of the upgrade have been
* executed. When the upgrade reaches that point the block will be released and
* the method will be completed. Note that the fact that at the time of the
* b... | 3.68 |
hbase_BufferedMutator_setRpcTimeout | /**
* Set rpc timeout for this mutator instance
* @deprecated Since 3.0.0, will be removed in 4.0.0. Please set this through the
* {@link BufferedMutatorParams}.
*/
@Deprecated
default void setRpcTimeout(int timeout) {
throw new UnsupportedOperationException(
"The BufferedMutator::setRpcTimeout ha... | 3.68 |
framework_VCaption_mightChange | /**
* Checks whether anything in a given state change might cause the caption
* to change.
*
* @param event
* the state change event to check
* @return <code>true</code> if the caption might have changed; otherwise
* <code>false</code>
*/
public static boolean mightChange(StateChangeEvent eve... | 3.68 |
hadoop_ManifestCommitter_commitTask | /**
* Commit the task.
* This is where the task attempt tree list takes place.
* @param context task context.
* @throws IOException IO Failure.
*/
@Override
public void commitTask(final TaskAttemptContext context)
throws IOException {
ManifestCommitterConfig committerConfig = enterCommitter(true,
conte... | 3.68 |
framework_BinderValidationStatus_getBeanValidationErrors | /**
* Gets the failed bean level validation results.
*
* @return a list of failed bean level validation results
*/
public List<ValidationResult> getBeanValidationErrors() {
return binderStatuses.stream().filter(ValidationResult::isError)
.collect(Collectors.toList());
} | 3.68 |
hbase_TableInfoModel_toString | /*
* (non-Javadoc)
* @see java.lang.Object#toString()
*/
@Override
public String toString() {
StringBuilder sb = new StringBuilder();
for (TableRegionModel aRegion : regions) {
sb.append(aRegion.toString());
sb.append('\n');
}
return sb.toString();
} | 3.68 |
framework_VTabsheet_removeAssistiveDescription | /**
* Removes the {@code aria-describedby} attribute from this tab element.
* This should be called when this tab loses focus.
*/
public void removeAssistiveDescription() {
Roles.getTablistRole().removeAriaDescribedbyProperty(getElement());
} | 3.68 |
framework_AbsoluteLayout_setBottomValue | /**
* Sets the 'bottom' attribute value (distance from the bottom of the
* component to the bottom edge of the layout). Currently active units
* are maintained.
*
* @param bottomValue
* The value of the 'bottom' attribute
* @see #setBottomUnits(Unit)
*/
public void setBottomValue(Float bottomValue) {... | 3.68 |
hadoop_OBSCommonUtils_verifyBucketExists | /**
* Verify that the bucket exists. This does not check permissions, not even
* read access.
*
* @param owner the owner OBSFileSystem instance
* @throws FileNotFoundException the bucket is absent
* @throws IOException any other problem talking to OBS
*/
static void verifyBucketExists(final OBSFileSyst... | 3.68 |
hadoop_RpcNoSuchMethodException_getRpcErrorCodeProto | /**
* get the detailed rpc status corresponding to this exception
*/
public RpcErrorCodeProto getRpcErrorCodeProto() {
return RpcErrorCodeProto.ERROR_NO_SUCH_METHOD;
} | 3.68 |
hadoop_WeightedPolicyInfo_setAMRMPolicyWeights | /**
* Setter method for ARMRMProxy weights.
*
* @param policyWeights the amrmproxy weights.
*/
public void setAMRMPolicyWeights(Map<SubClusterIdInfo, Float> policyWeights) {
this.amrmPolicyWeights = policyWeights;
} | 3.68 |
querydsl_AliasFactory_createProxy | /**
* Create a proxy instance for the given class and path
*
* @param <A>
* @param cl type of the proxy
* @param path underlying expression
* @return proxy instance
*/
@SuppressWarnings("unchecked")
protected <A> A createProxy(Class<A> cl, Expression<?> path) {
Enhancer enhancer = new Enhancer();
enhance... | 3.68 |
hbase_ResponseConverter_getResult | // Start utilities for Client
public static SingleResponse getResult(final ClientProtos.MutateRequest request,
final ClientProtos.MutateResponse response, final CellScanner cells) throws IOException {
SingleResponse singleResponse = new SingleResponse();
SingleResponse.Entry entry = new SingleResponse.Entry();
... | 3.68 |
framework_Label_fireValueChange | /**
* Emits the options change event.
*/
protected void fireValueChange() {
// Set the error message
fireEvent(new Label.ValueChangeEvent(this));
} | 3.68 |
hbase_ScannerContext_getSkippingRow | /**
* In this mode, only block size progress is tracked, and limits are ignored. We set this mode
* when skipping to next row, in which case all cells returned a thrown away so should not count
* towards progress.
* @return true if we are in skipping row mode.
*/
public boolean getSkippingRow() {
return skipping... | 3.68 |
hbase_TaskGroup_addTask | /**
* Add a new task to the group, and before that might complete the last task in the group
* @param description the description of the new task
* @param withCompleteLast whether to complete the last task in the group
* @return the added new task
*/
public synchronized MonitoredTask addTask(String descriptio... | 3.68 |
flink_FileInputFormat_decorateInputStream | /**
* This method allows to wrap/decorate the raw {@link FSDataInputStream} for a certain file
* split, e.g., for decoding. When overriding this method, also consider adapting {@link
* FileInputFormat#testForUnsplittable} if your stream decoration renders the input file
* unsplittable. Also consider calling existin... | 3.68 |
morf_UpgradeScriptAdditionsProvider_getUpgradeScriptAdditions | /**
* Returns all script additions with the filtering criteria applied.
* The filtering logic should be provided by calling {@link #setAllowedPredicate(Predicate)} first.
* @return set of upgrade script additions.
*/
default Set<UpgradeScriptAddition> getUpgradeScriptAdditions() {
return Collections.emptySet();... | 3.68 |
hadoop_BytesWritable_get | /**
* Get the data from the BytesWritable.
* @deprecated Use {@link #getBytes()} instead.
* @return data from the BytesWritable.
*/
@Deprecated
public byte[] get() {
return getBytes();
} | 3.68 |
hbase_ReplicationSourceManager_deleteQueue | /**
* Delete a complete queue of wals associated with a replication source
* @param queueId the id of replication queue to delete
*/
private void deleteQueue(ReplicationQueueId queueId) {
abortWhenFail(() -> this.queueStorage.removeQueue(queueId));
} | 3.68 |
hudi_GcsEventsSource_processMessages | /**
* Convert Pubsub messages into a batch of GCS file MetadataMsg objects, skipping those that
* don't need to be processed.
*
* @param receivedMessages Pubsub messages
* @return A batch of GCS file metadata messages
*/
private MessageBatch processMessages(List<ReceivedMessage> receivedMessages) {
List<String>... | 3.68 |
flink_ExternalResourceUtils_getExternalResourceAmountMap | /** Get the map of resource name and amount of all of enabled external resources. */
@VisibleForTesting
static Map<String, Long> getExternalResourceAmountMap(Configuration config) {
final Set<String> resourceSet = getExternalResourceSet(config);
if (resourceSet.isEmpty()) {
return Collections.emptyMap(... | 3.68 |
graphhopper_VectorTileEncoder_clipGeometry | /**
* Clip geometry according to buffer given at construct time. This method
* can be overridden to change clipping behavior. See also
* {@link #clipCovers(Geometry)}.
*
* @param geometry a {@link Geometry} to check for intersection with the current clip geometry
* @return a boolean true when current clip geometr... | 3.68 |
flink_DagConnection_setInterestingProperties | /**
* Sets the interesting properties for this pact connection.
*
* @param props The interesting properties.
*/
public void setInterestingProperties(InterestingProperties props) {
if (this.interestingProps == null) {
this.interestingProps = props;
} else {
throw new IllegalStateException("In... | 3.68 |
framework_ViewBeforeLeaveEvent_isNavigateRun | /**
* Checks if the navigate command has been executed.
*
* @return <code>true</code> if {@link #navigate()} has been called,
* <code>false</code> otherwise
*/
protected boolean isNavigateRun() {
return navigateRun;
} | 3.68 |
open-banking-gateway_FintechConsentSpecSecureStorage_fromInboxForAuth | /**
* Get data from FinTechs' inbox associated with the FinTech user.
* @param authSession Authorization session associated with this user
* @param password FinTech user password
* @return FinTechs' users' keys to access consent, spec. etc.
*/
@SneakyThrows
public FinTechUserInboxData fromInboxForAuth(AuthSession ... | 3.68 |
shardingsphere-elasticjob_JobFacade_isNeedSharding | /**
* Judge job whether to need resharding.
*
* @return need resharding or not
*/
public boolean isNeedSharding() {
return shardingService.isNeedSharding();
} | 3.68 |
morf_NamedParameterPreparedStatement_setInt | /**
* Sets the value of a named integer parameter.
*
* @param parameter the parameter metadata.
* @param value the parameter value.
* @return this, for method chaining
* @exception SQLException if an error occurs when setting the parameter
*/
public NamedParameterPreparedStatement setInt(SqlParameter parameter, ... | 3.68 |
flink_FlinkAggregateJoinTransposeRule_registry | /**
* Creates a {@link org.apache.calcite.sql.SqlSplittableAggFunction.Registry} that is a view of
* a list.
*/
private static <E> SqlSplittableAggFunction.Registry<E> registry(final List<E> list) {
return new SqlSplittableAggFunction.Registry<E>() {
public int register(E e) {
int i = list.in... | 3.68 |
hbase_HFileBlock_getByteStream | /** Returns a byte stream reading the data(excluding header and checksum) of this block */
DataInputStream getByteStream() {
ByteBuff dup = this.bufWithoutChecksum.duplicate();
dup.position(this.headerSize());
return new DataInputStream(new ByteBuffInputStream(dup));
} | 3.68 |
morf_AbstractSqlDialectTest_testAddIntegerColumn | /**
* Test adding an integer column.
*/
@Test
public void testAddIntegerColumn() {
testAlterTableColumn(AlterationType.ADD, column("intField_new", DataType.INTEGER).nullable(), expectedAlterTableAddIntegerColumnStatement());
} | 3.68 |
hbase_SerialReplicationChecker_isLastRangeAndOpening | // We may write a open region marker to WAL before we write the open sequence number to meta, so
// if a region is in OPENING state and we are in the last range, it is not safe to say we can push
// even if the previous range is finished.
private boolean isLastRangeAndOpening(ReplicationBarrierResult barrierResult, int... | 3.68 |
hadoop_DiskBalancerDataNode_equals | /**
* Indicates whether some other object is "equal to" this one.
*/
@Override
public boolean equals(Object obj) {
if ((obj == null) || (obj.getClass() != getClass())) {
return false;
}
DiskBalancerDataNode that = (DiskBalancerDataNode) obj;
return dataNodeUUID.equals(that.getDataNodeUUID());
} | 3.68 |
hudi_HoodieExampleDataGenerator_generateInsertsStreamOnPartition | /**
* Generates new inserts, across a single partition path. It also updates the list of existing keys.
*/
public Stream<HoodieRecord<T>> generateInsertsStreamOnPartition(String commitTime, Integer n, String partitionPath) {
int currSize = getNumExistingKeys();
return IntStream.range(0, n).boxed().map(i -> {
... | 3.68 |
hadoop_SubClusterIdInfo_toId | /**
* Get the sub-cluster identifier as {@link SubClusterId}.
* @return the sub-cluster id.
*/
public SubClusterId toId() {
return SubClusterId.newInstance(id);
} | 3.68 |
flink_HsSelectiveSpillingStrategy_onMemoryUsageChanged | // When the amount of memory used exceeds the threshold, decide action based on global
// information. Otherwise, no need to take action.
@Override
public Optional<Decision> onMemoryUsageChanged(
int numTotalRequestedBuffers, int currentPoolSize) {
return numTotalRequestedBuffers < currentPoolSize * spillTh... | 3.68 |
flink_AbstractUdfOperator_getUserCodeWrapper | /**
* Gets the function that is held by this operator. The function is the actual implementation of
* the user code.
*
* <p>This throws an exception if the pact does not contain an object but a class for the user
* code.
*
* @return The object with the user function for this operator.
* @see org.apache.flink.ap... | 3.68 |
morf_SqlServerDialect_alterTableAddColumnStatements | /**
* @see org.alfasoftware.morf.jdbc.SqlDialect#alterTableAddColumnStatements(org.alfasoftware.morf.metadata.Table, org.alfasoftware.morf.metadata.Column)
*/
@Override
public Collection<String> alterTableAddColumnStatements(Table table, Column column) {
List<String> statements = new ArrayList<>();
// TODO looks... | 3.68 |
flink_StructuredOptionsSplitter_splitEscaped | /**
* Splits the given string on the given delimiter. It supports quoting parts of the string with
* either single (') or double quotes ("). Quotes can be escaped by doubling the quotes.
*
* <p>Examples:
*
* <ul>
* <li>'A;B';C => [A;B], [C]
* <li>"AB'D";B;C => [AB'D], [B], [C]
* <li>"AB'""D;B";C => [AB'\... | 3.68 |
hbase_ChecksumType_nameToType | /**
* Map a checksum name to a specific type. Do our own names.
* @return Type associated with passed code.
*/
public static ChecksumType nameToType(final String name) {
for (ChecksumType t : ChecksumType.values()) {
if (t.getName().equals(name)) {
return t;
}
}
throw new RuntimeException("Unknow... | 3.68 |
hbase_TableRegionModel_setStartKey | /**
* @param startKey the start key
*/
public void setStartKey(byte[] startKey) {
this.startKey = startKey;
} | 3.68 |
hudi_AvroSchemaEvolutionUtils_reconcileSchemaRequirements | /**
* Reconciles nullability and datatype requirements b/w {@code source} and {@code target} schemas,
* by adjusting these of the {@code source} schema to be in-line with the ones of the
* {@code target} one. Source is considered to be new incoming schema, while target could refer to prev table schema.
* For exampl... | 3.68 |
hadoop_Container_getAllocationRequestId | /**
* Get the optional <em>ID</em> corresponding to the original {@code
* ResourceRequest{@link #getAllocationRequestId()}}s which is satisfied by
* this allocated {@code Container}.
* <p>
* The scheduler may return multiple {@code AllocateResponse}s corresponding
* to the same ID as and when scheduler allocates ... | 3.68 |
hbase_TableDescriptorBuilder_setCoprocessorToMap | /**
* Add coprocessor to values Map
* @param specStr The Coprocessor specification all in in one String
* @return Returns <code>this</code>
*/
private ModifyableTableDescriptor setCoprocessorToMap(final String specStr) {
if (specStr == null) {
return this;
}
// generate a coprocessor key
int maxCoproces... | 3.68 |
querydsl_ExpressionUtils_extract | /**
* Get the potentially wrapped expression
*
* @param expr expression to analyze
* @return inner expression
*/
@SuppressWarnings("unchecked")
public static <T> Expression<T> extract(Expression<T> expr) {
if (expr != null) {
final Class<?> clazz = expr.getClass();
if (clazz == PathImpl.class |... | 3.68 |
zilla_DispatchAgent_supplyCounterWriter | // required for testing
public LongConsumer supplyCounterWriter(
long bindingId,
long metricId)
{
return countersLayout.supplyWriter(bindingId, metricId);
} | 3.68 |
framework_AutomaticImmediate_setup | /*
* (non-Javadoc)
*
* @see com.vaadin.tests.components.AbstractTestUI#setup(com.vaadin.server.
* VaadinRequest)
*/
@Override
protected void setup(VaadinRequest request) {
final TextField textField = new TextField() {
/*
* (non-Javadoc)
*
* @see com.vaadin.ui.AbstractField#... | 3.68 |
hadoop_CachedDNSToSwitchMapping_getCachedHosts | /**
* @param names a list of hostnames to look up (can be be empty)
* @return the cached resolution of the list of hostnames/addresses.
* or null if any of the names are not currently in the cache
*/
private List<String> getCachedHosts(List<String> names) {
List<String> result = new ArrayList<String>(names.size(... | 3.68 |
dubbo_RpcContext_isConsumerSide | /**
* is consumer side.
*
* @return consumer side.
*/
public boolean isConsumerSide() {
return newRpcContext.isConsumerSide();
} | 3.68 |
graphhopper_ReaderElement_hasTag | /**
* Check a number of tags in the given order if their value is equal to the specified value.
*/
public boolean hasTag(List<String> keyList, Object value) {
for (String key : keyList) {
if (value.equals(getTag(key, null)))
return true;
}
return false;
} | 3.68 |
flink_RoundRobinOperatorStateRepartitioner_collectStates | /** Collect the states from given parallelSubtaskStates with the specific {@code mode}. */
private Map<String, StateEntry> collectStates(
List<List<OperatorStateHandle>> parallelSubtaskStates, OperatorStateHandle.Mode mode) {
Map<String, StateEntry> states =
CollectionUtil.newHashMapWithExpecte... | 3.68 |
hbase_HMaster_modifyNamespace | /**
* Modify an existing Namespace.
* @param nonceGroup Identifier for the source of the request, a client or process.
* @param nonce A unique identifier for this operation from the client or process identified
* by <code>nonceGroup</code> (the source must ensure each operation gets a
* ... | 3.68 |
hbase_MetricRegistries_global | /**
* Return the global singleton instance for the MetricRegistries.
* @return MetricRegistries implementation.
*/
public static MetricRegistries global() {
return LazyHolder.GLOBAL;
} | 3.68 |
hudi_DiskMap_cleanup | /**
* Cleanup all resources, files and folders.
*/
private void cleanup(boolean isTriggeredFromShutdownHook) {
try {
FileIOUtils.deleteDirectory(diskMapPathFile);
} catch (IOException exception) {
LOG.warn("Error while deleting the disk map directory=" + diskMapPath, exception);
}
if (!isTriggeredFrom... | 3.68 |
hbase_CanaryTool_generateMonitorTables | /** Returns List of tables to use in test. */
private String[] generateMonitorTables(String[] monitorTargets) throws IOException {
String[] returnTables = null;
if (this.useRegExp) {
Pattern pattern = null;
List<TableDescriptor> tds = null;
Set<String> tmpTables = new TreeSet<>();
try {
LOG.d... | 3.68 |
morf_MySqlDialect_repairAutoNumberStartPosition | /**
* @see org.alfasoftware.morf.jdbc.SqlDialect#repairAutoNumberStartPosition(Table, SqlScriptExecutor, Connection)
*/
@Override
public void repairAutoNumberStartPosition(Table table, SqlScriptExecutor executor,Connection connection) {
Column autoIncrementColumn = getAutoIncrementColumnForTable(table);
i... | 3.68 |
framework_Link_setTargetWidth | /**
* Sets the target window width.
*
* @param targetWidth
* the targetWidth to set.
*/
public void setTargetWidth(int targetWidth) {
getState().targetWidth = targetWidth;
} | 3.68 |
flink_PermanentBlobCache_registerJob | /**
* Registers use of job-related BLOBs.
*
* <p>Using any other method to access BLOBs, e.g. {@link #getFile}, is only valid within calls
* to <tt>registerJob(JobID)</tt> and {@link #releaseJob(JobID)}.
*
* @param jobId ID of the job this blob belongs to
* @see #releaseJob(JobID)
*/
@Override
public void regis... | 3.68 |
flink_EnvironmentInformation_getTemporaryFileDirectory | /**
* Gets the directory for temporary files, as returned by the JVM system property
* "java.io.tmpdir".
*
* @return The directory for temporary files.
*/
public static String getTemporaryFileDirectory() {
return System.getProperty("java.io.tmpdir");
} | 3.68 |
dubbo_AbstractClusterInvoker_invokeWithContextAsync | /**
* When using a thread pool to fork a child thread, ThreadLocal cannot be passed.
* In this scenario, please use the invokeWithContextAsync method.
*
* @return
*/
protected Result invokeWithContextAsync(Invoker<T> invoker, Invocation invocation, URL consumerUrl) {
Invoker<T> originInvoker = setContext(invok... | 3.68 |
hbase_Mutation_get | /**
* Returns a list of all KeyValue objects with matching column family and qualifier.
* @param family column family
* @param qualifier column qualifier
* @return a list of KeyValue objects with the matching family and qualifier, returns an empty
* list if one doesn't exist for the given family.
*/
pu... | 3.68 |
hbase_HRegion_visitBatchOperations | /**
* Helper method for visiting pending/ all batch operations
*/
public void visitBatchOperations(boolean pendingOnly, int lastIndexExclusive, Visitor visitor)
throws IOException {
assert lastIndexExclusive <= this.size();
for (int i = nextIndexToProcess; i < lastIndexExclusive; i++) {
if (!pendingOnly || ... | 3.68 |
flink_MetricConfig_getFloat | /**
* Searches for the property with the specified key in this property list. If the key is not
* found in this property list, the default property list, and its defaults, recursively, are
* then checked. The method returns the default value argument if the property is not found.
*
* @param key the hashtable key.
... | 3.68 |
flink_GSBlobIdentifier_getBlobId | /**
* Get a Google blob id for this identifier, with generation=null.
*
* @return The BlobId
*/
public BlobId getBlobId() {
return BlobId.of(bucketName, objectName);
} | 3.68 |
flink_Operator_getParallelism | /**
* Returns the parallelism of this operator.
*
* @return The parallelism of this operator.
*/
public int getParallelism() {
return this.parallelism;
} | 3.68 |
hadoop_MappingRuleResult_createDefaultPlacementResult | /**
* Generator method for default placement results. It is a specialized
* placement result which will only use the "%default" as a queue name.
* @return The generated MappingRuleResult
*/
public static MappingRuleResult createDefaultPlacementResult() {
return RESULT_DEFAULT_PLACEMENT;
} | 3.68 |
zxing_SearchBookContentsActivity_parseResult | // Available fields: page_id, page_number, snippet_text
private SearchBookContentsResult parseResult(JSONObject json) {
String pageId;
String pageNumber;
String snippet;
try {
pageId = json.getString("page_id");
pageNumber = json.optString("page_number");
snippet = json.optString("snippet_text"); ... | 3.68 |
querydsl_MapPath_getKeyType | /**
* Get the key type
*
* @return key type
*/
public Class<K> getKeyType() {
return keyType;
} | 3.68 |
hbase_ByteBufferUtils_copyFromBufferToArray | /**
* Copies specified number of bytes from given offset of 'in' ByteBuffer to the array. This
* doesn't affect the position of buffer.
* @param out output array to copy input bytebuffer to
* @param in input bytebuffer to copy from
* @param sourceOffset offset of source bytebuffer... | 3.68 |
dubbo_RpcStatus_getActive | /**
* get active.
*
* @return active
*/
public int getActive() {
return active.get();
} | 3.68 |
hbase_StoreFileInfo_setRegionCoprocessorHost | /**
* Sets the region coprocessor env.
*/
public void setRegionCoprocessorHost(RegionCoprocessorHost coprocessorHost) {
this.coprocessorHost = coprocessorHost;
} | 3.68 |
flink_KeyMap_traverseMaps | /**
* Performs a traversal about logical the multi-map that results from the union of the given
* maps. This method does not actually build a union of the map, but traverses the hash maps
* together.
*
* @param maps The array uf maps whose union should be traversed.
* @param visitor The visitor that is called for... | 3.68 |
pulsar_FunctionRuntimeManager_restartFunctionUsingPulsarAdmin | /**
* Restart the entire function or restart a single instance of the function.
*/
@VisibleForTesting
void restartFunctionUsingPulsarAdmin(Assignment assignment, String tenant, String namespace,
String functionName, boolean restartEntireFunction)
throws PulsarAdminExceptio... | 3.68 |
druid_DataSourceSelectorFactory_getSelector | /**
* Get a new instance of the given selector name.
*
* @return null if the given name do not represent a DataSourceSelector
*/
public static DataSourceSelector getSelector(String name, HighAvailableDataSource highAvailableDataSource) {
for (DataSourceSelectorEnum e : DataSourceSelectorEnum.values()) {
... | 3.68 |
hbase_VisibilityLabelsCache_getLabelOrdinal | /**
* @param label Not null label string
* @return The ordinal for the label. The ordinal starts from 1. Returns 0 when passed a non
* existing label.
*/
@Override
public int getLabelOrdinal(String label) {
Integer ordinal = null;
this.lock.readLock().lock();
try {
ordinal = labels.get(label);
}... | 3.68 |
hadoop_SnappyCodec_getConf | /**
* Return the configuration used by this object.
*
* @return the configuration object used by this objec.
*/
@Override
public Configuration getConf() {
return conf;
} | 3.68 |
hbase_CommonFSUtils_create | /**
* Create the specified file on the filesystem. By default, this will:
* <ol>
* <li>apply the umask in the configuration (if it is enabled)</li>
* <li>use the fs configured buffer size (or 4096 if not set)</li>
* <li>use the default replication</li>
* <li>use the default block size</li>
* <li>not track progre... | 3.68 |
hbase_MasterObserver_postEnableReplicationPeer | /**
* Called after enable a replication peer
* @param peerId a short name that identifies the peer
*/
default void postEnableReplicationPeer(final ObserverContext<MasterCoprocessorEnvironment> ctx,
String peerId) throws IOException {
} | 3.68 |
flink_AllocatedSlot_getSlotId | /** Gets the Slot's unique ID defined by its TaskManager. */
public SlotID getSlotId() {
return new SlotID(getTaskManagerId(), physicalSlotNumber);
} | 3.68 |
flink_SharedObjects_add | /**
* Adds a new object to this {@code SharedObjects}. Although not necessary, it is recommended to
* only access the object through the returned {@link SharedReference}.
*/
public <T> SharedReference<T> add(T object) {
SharedReference<T> tag = new DefaultTag<>(id, objects.size());
objects.put(tag, object);
... | 3.68 |
hbase_ZKUtil_createNodeIfNotExistsNoWatch | /**
* Creates the specified znode with the specified data but does not watch it. Returns the znode of
* the newly created node If there is another problem, a KeeperException will be thrown.
* @param zkw zk reference
* @param znode path of node
* @param data data of node
* @param createMode speci... | 3.68 |
hbase_RestoreSnapshotHelper_hasRegionsToRemove | /** Returns true if there're regions to remove */
public boolean hasRegionsToRemove() {
return this.regionsToRemove != null && this.regionsToRemove.size() > 0;
} | 3.68 |
hbase_QuotaSettingsFactory_throttleUser | /**
* Throttle the specified user on the specified namespace.
* @param userName the user to throttle
* @param namespace the namespace to throttle
* @param type the type of throttling
* @param limit the allowed number of request/data per timeUnit
* @param timeUnit the limit time unit
* @param scope ... | 3.68 |
flink_FutureUtils_retryOperation | /**
* Helper method which retries the provided operation in case of a failure.
*
* @param resultFuture to complete
* @param operation to retry
* @param retries until giving up
* @param retryPredicate Predicate to test whether an exception is retryable
* @param executor to run the futures
* @param <T> type of th... | 3.68 |
dubbo_TriHttp2RemoteFlowController_stateCancelled | /**
* Called when the state is cancelled.
* @param state the state that was cancelled.
*/
void stateCancelled(FlowState state) { } | 3.68 |
hbase_HBaseTestingUtility_assignRegion | /**
* Uses directly the assignment manager to assign the region. and waits until the specified region
* has completed assignment.
* @return true if the region is assigned false otherwise.
*/
public boolean assignRegion(final RegionInfo regionInfo)
throws IOException, InterruptedException {
final AssignmentManag... | 3.68 |
framework_ComboBoxElement_getValue | /**
* Return value of the combo box element.
*
* @return value of the combo box element
*/
public String getValue() {
return getInputField().getAttribute("value");
} | 3.68 |
hudi_SqlQueryBuilder_select | /**
* Creates a SELECT query.
*
* @param columns The column names to select.
* @return The new {@link SqlQueryBuilder} instance.
*/
public static SqlQueryBuilder select(String... columns) {
if (columns == null || columns.length == 0) {
throw new IllegalArgumentException("No columns provided with SELECT state... | 3.68 |
hadoop_NamenodeStatusReport_getNumDeadDatanodes | /**
* Get the number of dead nodes.
*
* @return The number of dead nodes.
*/
public int getNumDeadDatanodes() {
return this.deadDatanodes;
} | 3.68 |
hmily_ResourceIdUtils_getResourceId | /**
* Gets resource id.
*
* @param jdbcUrl the jdbc url
* @return the resource id
*/
public String getResourceId(final String jdbcUrl) {
return resourceIds.computeIfAbsent(jdbcUrl, u -> u.contains("?") ? u.substring(0, u.indexOf('?')) : u);
} | 3.68 |
hadoop_EditLogOutputStream_setCurrentLogVersion | /**
* @param logVersion The version of the current edit log
*/
public void setCurrentLogVersion(int logVersion) {
this.currentLogVersion = logVersion;
} | 3.68 |
flink_ZooKeeperUtils_getZooKeeperEnsemble | /**
* Returns the configured ZooKeeper quorum (and removes whitespace, because ZooKeeper does not
* tolerate it).
*/
public static String getZooKeeperEnsemble(Configuration flinkConf)
throws IllegalConfigurationException {
String zkQuorum = flinkConf.getValue(HighAvailabilityOptions.HA_ZOOKEEPER_QUORUM)... | 3.68 |
pulsar_AuthorizationService_revokePermissionAsync | /**
* Revoke authorization-action permission on a topic to the given client.
*
* @param topicName
* @param role
*/
public CompletableFuture<Void> revokePermissionAsync(TopicName topicName, String role) {
return provider.revokePermissionAsync(topicName, role);
} | 3.68 |
pulsar_ManagedLedgerInterceptor_processPayloadBeforeLedgerWrite | /**
* Intercept before payload gets written to ledger.
* @param ledgerWriteOp OpAddEntry used to trigger ledger write.
* @param dataToBeStoredInLedger data to be stored in ledger
* @return handle to the processor
*/
default PayloadProcessorHandle processPayloadBeforeLedgerWrite(OpAddEntry ledgerWriteOp,
... | 3.68 |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.