name stringlengths 12 178 | code_snippet stringlengths 8 36.5k | score float64 3.26 3.68 |
|---|---|---|
flink_PrimitiveArrayTypeInfo_getComponentType | /**
* Gets the type information of the component type.
*
* @return The type information of the component type.
*/
@PublicEvolving
public TypeInformation<?> getComponentType() {
return BasicTypeInfo.getInfoFor(getComponentClass());
} | 3.68 |
morf_HumanReadableStatementProducer_addTable | /** @see org.alfasoftware.morf.upgrade.SchemaEditor#addTable(org.alfasoftware.morf.metadata.Table) **/
@Override
public void addTable(Table definition) {
consumer.schemaChange(HumanReadableStatementHelper.generateAddTableString(definition));
} | 3.68 |
pulsar_BrokerMonitor_main | /**
* Run a monitor from command line arguments.
*
* @param args Arguments for the monitor.
*/
public static void main(String[] args) throws Exception {
final Arguments arguments = new Arguments();
final JCommander jc = new JCommander(arguments);
jc.setProgramName("pulsar-perf monitor-brokers");
tr... | 3.68 |
flink_DeclarativeSlotManager_suspend | /** Suspends the component. This clears the internal state of the slot manager. */
@Override
public void suspend() {
if (!started) {
return;
}
LOG.info("Suspending the slot manager.");
slotManagerMetricGroup.close();
resourceTracker.clear();
if (taskExecutorManager != null) {
... | 3.68 |
hudi_HoodieTable_getIndex | /**
* Return the index.
*/
public HoodieIndex<?, ?> getIndex() {
return index;
} | 3.68 |
hadoop_AbstractClientRequestInterceptor_setNextInterceptor | /**
* Sets the {@link ClientRequestInterceptor} in the chain.
*/
@Override
public void setNextInterceptor(ClientRequestInterceptor nextInterceptor) {
this.nextInterceptor = nextInterceptor;
}
/**
* Sets the {@link Configuration} | 3.68 |
flink_DeltaIterationBase_getBroadcastInputs | /**
* DeltaIteration meta operator cannot have broadcast inputs.
*
* @return An empty map.
*/
public Map<String, Operator<?>> getBroadcastInputs() {
return Collections.emptyMap();
} | 3.68 |
hadoop_WritableName_addName | /**
* Add an alternate name for a class.
* @param writableClass input writableClass.
* @param name input name.
*/
public static synchronized void addName(Class<?> writableClass, String name) {
NAME_TO_CLASS.put(name, writableClass);
} | 3.68 |
hadoop_IOStatisticsLogging_sortedMap | /**
* Create a sorted (tree) map from an unsorted map.
* This incurs the cost of creating a map and that
* of inserting every object into the tree.
* @param source source map
* @param <E> value type
* @return a treemap with all the entries.
*/
private static <E> Map<String, E> sortedMap(
final Map<String, E>... | 3.68 |
AreaShop_GithubUpdateCheck_checkUpdate | /**
* Check if an update is available.
* @param callback Callback to execute when the update check is done
* @return GithubUpdateCheck containing the status of the check
*/
public GithubUpdateCheck checkUpdate(UpdateCallback callback) {
checking = true;
final GithubUpdateCheck self = this;
// Check for update on... | 3.68 |
hadoop_AMRMClientAsyncImpl_removeContainerRequest | /**
* Remove previous container request. The previous container request may have
* already been sent to the ResourceManager. So even after the remove request
* the app must be prepared to receive an allocation for the previous request
* even after the remove request
* @param req Resource request
*/
public void... | 3.68 |
hbase_BalanceRequest_isIgnoreRegionsInTransition | /**
* Returns true if the balancer should execute even if regions are in transition, otherwise false.
* This is an advanced usage feature, as it can cause more issues than it fixes.
*/
public boolean isIgnoreRegionsInTransition() {
return ignoreRegionsInTransition;
} | 3.68 |
flink_BiFunctionWithException_unchecked | /**
* Convert at {@link BiFunctionWithException} into a {@link BiFunction}.
*
* @param biFunctionWithException function with exception to convert into a function
* @param <A> input type
* @param <B> output type
* @return {@link BiFunction} which throws all checked exception as an unchecked exception.
*/
static <... | 3.68 |
pulsar_WindowManager_scanEvents | /**
* Scan events in the queue, using the expiration policy to check
* if the event should be evicted or not.
*
* @param fullScan if set, will scan the entire queue; if not set, will stop
* as soon as an event not satisfying the expiration policy is found
* @return the list of events to be processed as a part of ... | 3.68 |
graphhopper_LandmarkStorage_getFromWeight | /**
* @return the weight from the landmark to the specified node. Where the landmark integer is not
* a node ID but the internal index of the landmark array.
*/
int getFromWeight(int landmarkIndex, int node) {
int res = (int) landmarkWeightDA.getShort((long) node * LM_ROW_LENGTH + landmarkIndex * 4L + FROM_OFFSE... | 3.68 |
framework_TooltipDelay_getTestDescription | /*
* (non-Javadoc)
*
* @see com.vaadin.tests.components.AbstractTestUI#getTestDescription()
*/
@Override
protected String getTestDescription() {
return "Tooltips should appear with a five second delay.";
} | 3.68 |
hbase_HMaster_getMobCompactionState | /**
* Gets the mob file compaction state for a specific table. Whether all the mob files are selected
* is known during the compaction execution, but the statistic is done just before compaction
* starts, it is hard to know the compaction type at that time, so the rough statistics are chosen
* for the mob file comp... | 3.68 |
hadoop_AzureADAuthenticator_getTokenUsingClientCreds | /**
* gets Azure Active Directory token using the user ID and password of
* a service principal (that is, Web App in Azure Active Directory).
*
* Azure Active Directory allows users to set up a web app as a
* service principal. Users can optionally obtain service principal keys
* from AAD. This method gets a toke... | 3.68 |
hudi_KafkaConnectUtils_getWriteStatuses | /**
* Unwrap the Hudi {@link WriteStatus} from the received Protobuf message.
*
* @param participantInfo The {@link ControlMessage.ParticipantInfo} that contains the
* underlying {@link WriteStatus} sent by the participants.
* @return the list of {@link WriteStatus} returned by Hudi on a wri... | 3.68 |
hudi_HoodieInputFormatUtils_getFilteredCommitsTimeline | /**
* Extract HoodieTimeline based on HoodieTableMetaClient.
*
* @param job
* @param tableMetaClient
* @return
*/
public static Option<HoodieTimeline> getFilteredCommitsTimeline(JobContext job, HoodieTableMetaClient tableMetaClient) {
String tableName = tableMetaClient.getTableConfig().getTableName();
HoodieD... | 3.68 |
hadoop_ConfigFormat_resolve | /**
* Get a matching format or null
* @param type
* @return the format
*/
public static ConfigFormat resolve(String type) {
for (ConfigFormat format: values()) {
if (format.getSuffix().equals(type.toLowerCase(Locale.ENGLISH))) {
return format;
}
}
return null;
} | 3.68 |
framework_AbstractOrderedLayoutConnector_updateSlotListeners | /**
* Add/remove necessary ElementResizeListeners for one slot. This should be
* called after each update to the slot's or it's widget.
*/
private void updateSlotListeners(ComponentConnector child) {
Slot slot = getWidget().getSlot(child.getWidget());
// Clear all possible listeners first
slot.setWidget... | 3.68 |
hbase_RequestConverter_buildAssignRegionRequest | /**
* Create a protocol buffer AssignRegionRequest
* @return an AssignRegionRequest
*/
public static AssignRegionRequest buildAssignRegionRequest(final byte[] regionName) {
AssignRegionRequest.Builder builder = AssignRegionRequest.newBuilder();
builder.setRegion(buildRegionSpecifier(RegionSpecifierType.REGION_NA... | 3.68 |
querydsl_JTSGeometryExpression_geometryType | /**
* Returns the name of the instantiable subtype of Geometry of which this
* geometric object is an instantiable member. The name of the subtype of Geometry is returned as a string.
*
* @return geometry type
*/
public StringExpression geometryType() {
if (geometryType == null) {
geometryType = Expres... | 3.68 |
shardingsphere-elasticjob_JobFacade_beforeJobExecuted | /**
* Call before job executed.
*
* @param shardingContexts sharding contexts
*/
public void beforeJobExecuted(final ShardingContexts shardingContexts) {
for (ElasticJobListener each : elasticJobListeners) {
each.beforeJobExecuted(shardingContexts);
}
} | 3.68 |
hbase_HFileCleaner_startHFileDeleteThreads | /**
* Start threads for hfile deletion
*/
private void startHFileDeleteThreads() {
final String n = Thread.currentThread().getName();
running = true;
// start thread for large file deletion
for (int i = 0; i < largeFileDeleteThreadNumber; i++) {
Thread large = new Thread() {
@Override
public v... | 3.68 |
flink_KubernetesSessionCli_repStep | /**
* Check whether need to continue or kill the cluster.
*
* @param in input buffer reader
* @return f0, whether need to continue read from input. f1, whether need to kill the cluster.
*/
private Tuple2<Boolean, Boolean> repStep(BufferedReader in)
throws IOException, InterruptedException {
final long ... | 3.68 |
hbase_SnapshotManager_resetTempDir | /**
* Cleans up any zk-coordinated snapshots in the snapshot/.tmp directory that were left from
* failed snapshot attempts. For unfinished procedure2-coordinated snapshots, keep the working
* directory.
* @throws IOException if we can't reach the filesystem
*/
private void resetTempDir() throws IOException {
Set... | 3.68 |
flink_JobMaster_updateTaskExecutionState | /**
* Updates the task execution state for a given task.
*
* @param taskExecutionState New task execution state for a given task
* @return Acknowledge the task execution state update
*/
@Override
public CompletableFuture<Acknowledge> updateTaskExecutionState(
final TaskExecutionState taskExecutionState) {
... | 3.68 |
shardingsphere-elasticjob_ElasticJobSnapshotServiceConfiguration_snapshotService | /**
* Create a Snapshot service bean and start listening.
*
* @param registryCenter registry center
* @param snapshotServiceProperties snapshot service properties
* @return a bean of snapshot service
*/
@ConditionalOnProperty(name = "elasticjob.dump.port")
@Bean(initMethod = "listen", destroyMethod = "close")
pub... | 3.68 |
framework_AbstractComponent_setData | /**
* Sets the data object, that can be used for any application specific data.
* The component does not use or modify this data.
*
* @param data
* the Application specific data.
* @since 3.1
*/
public void setData(Object data) {
applicationData = data;
} | 3.68 |
framework_VUpload_updateEnabledForSubmitButton | /**
* Updates the enabled status for submit button. If the widget itself is
* disabled, so is the submit button. It must also follow overall enabled
* status in immediate mode, otherwise you cannot select a file at all. In
* non-immediate mode there is another button for selecting the file, so the
* submit button ... | 3.68 |
hbase_MetricsStochasticBalancer_updateMetricsSize | /**
* Updates the number of metrics reported to JMX
*/
public void updateMetricsSize(int size) {
stochasticSource.updateMetricsSize(size);
} | 3.68 |
flink_LinkElement_link | /**
* Creates a link with a given url. This url will be used as a description for that link.
*
* @param link address that this link should point to
* @return link representation
*/
public static LinkElement link(String link) {
return new LinkElement(link, link);
} | 3.68 |
streampipes_CalculateDurationProcessor_declareModel | //TODO: Change Icon
@Override
public DataProcessorDescription declareModel() {
return ProcessingElementBuilder.create("org.apache.streampipes.processors.transformation.jvm.duration-value")
.category(DataProcessorType.TIME)
.withLocales(Locales.EN)
.withAssets(Assets.DOCUMENTATION, Assets.ICON)
... | 3.68 |
framework_VComboBox_selectPrevItem | /**
* Selects the previous item in the filtered selections.
*/
public void selectPrevItem() {
debug("VComboBox.SP: selectPrevItem()");
final int index = menu.getSelectedIndex() - 1;
if (index > -1) {
selectItem(menu.getItems().get(index));
} else if (index == -1) {
selectPrevPage();
... | 3.68 |
hadoop_ResourceRequest_isAnyLocation | /**
* Check whether the given <em>host/rack</em> string represents an arbitrary
* host name.
*
* @param hostName <em>host/rack</em> on which the allocation is desired
* @return whether the given <em>host/rack</em> string represents an arbitrary
* host name
*/
@Public
@Stable
public static boolean isAnyLocation(S... | 3.68 |
framework_Window_setCloseShortcut | /**
* This is the old way of adding a keyboard shortcut to close a
* {@link Window} - to preserve compatibility with existing code under the
* new functionality, this method now first removes all registered close
* shortcuts, then adds the default ESCAPE shortcut key, and then attempts
* to add the shortcut provid... | 3.68 |
morf_ObjectTreeTraverser_dispatch | /**
* Invokes the callback on the object. If the object implements Driver then
* its drive method is invoked, propagating this traverser. Iterables and
* arrays will have the callback invoked on their elements.
*
* @param object the object node in the object graph.
* @return this, for method chaining.
* @param <... | 3.68 |
hudi_HoodieUnMergedLogRecordScanner_scan | /**
* Scans delta-log files processing blocks
*/
public final void scan() {
scan(false);
} | 3.68 |
flink_ResourceInformationReflector_setResourceInformationUnSafe | /**
* Same as {@link #setResourceInformation(Resource, String, long)} but allows to pass objects
* that are not of type {@link Resource}.
*/
@VisibleForTesting
void setResourceInformationUnSafe(Object resource, String resourceName, long amount) {
if (!isYarnResourceTypesAvailable) {
LOG.info(
... | 3.68 |
framework_AbstractMedia_setMuted | /**
* Set whether to mute the audio or not.
*
* @param muted
*/
public void setMuted(boolean muted) {
getState().muted = muted;
} | 3.68 |
hadoop_CloseableReferenceCount_setClosed | /**
* Mark the status as closed.
*
* Once the status is closed, it cannot be reopened.
*
* @return The current reference count.
* @throws ClosedChannelException If someone else closes the object
* before we do.
*/
public int setClosed() throws ClosedChann... | 3.68 |
zilla_HpackContext_staticIndex25 | // Index in static table for the given name of length 25
private static int staticIndex25(DirectBuffer name)
{
return (name.getByte(24) == 'y' && STATIC_TABLE[56].name.equals(name)) ? 56 : -1; // strict-transport-security
} | 3.68 |
hbase_ZKAuthentication_loginClient | /**
* Log in the current zookeeper client using the given configuration keys for the credential file
* and login principal.
* <p>
* <strong>This is only applicable when running on secure hbase</strong> On regular HBase (without
* security features), this will safely be ignored.
* </p>
* @param conf The ... | 3.68 |
hadoop_OBSFileSystem_getObsListing | /**
* Return the OBSListing instance used by this filesystem.
*
* @return the OBSListing instance
*/
OBSListing getObsListing() {
return obsListing;
} | 3.68 |
flink_ExecutionConfig_getDefaultKryoSerializers | /** Returns the registered default Kryo Serializers. */
public LinkedHashMap<Class<?>, SerializableSerializer<?>> getDefaultKryoSerializers() {
return defaultKryoSerializers;
} | 3.68 |
dubbo_RegistrySpecListener_onPostOfDirectory | /**
* Every time an event is triggered, multiple fixed key related to directory are increment, which has nothing to do with the monitored key
*/
public static AbstractMetricsKeyListener onPostOfDirectory(
MetricsKey metricsKey, CombMetricsCollector<?> collector) {
return AbstractMetricsKeyListener.onEvent... | 3.68 |
hudi_HoodieListData_lazy | /**
* Creates instance of {@link HoodieListData} bearing *lazy* execution semantic
*
* @param listData a {@link List} of objects in type T
* @param <T> type of object
* @return a new instance containing the {@link List<T>} reference
*/
public static <T> HoodieListData<T> lazy(List<T> listData) {
return new... | 3.68 |
rocketmq-connect_JdbcSourceConnector_taskConfigs | /**
* Returns a set of configurations for Tasks based on the current configuration,
* producing at most count configurations.
*
* @param maxTasks maximum number of configurations to generate
* @return configurations for Tasks
*/
@Override
public List<KeyValue> taskConfigs(int maxTasks) {
log.info("Connector t... | 3.68 |
flink_AbstractPythonStreamGroupAggregateOperator_onProcessingTime | /** Invoked when a processing-time timer fires. */
@Override
public void onProcessingTime(InternalTimer<RowData, VoidNamespace> timer) throws Exception {
if (stateCleaningEnabled) {
RowData key = timer.getKey();
long timestamp = timer.getTimestamp();
reuseTimerRowData.setLong(2, timestamp);
... | 3.68 |
hadoop_HsController_tasksPage | /*
* (non-Javadoc)
* @see org.apache.hadoop.mapreduce.v2.app.webapp.AppController#tasksPage()
*/
@Override
protected Class<? extends View> tasksPage() {
return HsTasksPage.class;
} | 3.68 |
hadoop_EditLogInputStream_nextValidOp | /**
* Get the next valid operation from the stream storage.
*
* This is exactly like nextOp, except that we attempt to skip over damaged
* parts of the edit log
*
* @return an operation from the stream or null if at end of stream
*/
protected FSEditLogOp nextValidOp() {
// This is a trivial implementation w... | 3.68 |
framework_GeneratedPropertyContainer_getWrappedContainer | /**
* Returns the original underlying container.
*
* @return the original underlying container
*/
public Container.Indexed getWrappedContainer() {
return wrappedContainer;
} | 3.68 |
framework_SliderElement_getValue | /**
* Get value of the slider
*
* Warning! This method cause slider popup to appear on the screen. To hide
* this popup just focus any other element on the page.
*/
public String getValue() {
WebElement popupElem = findElement(By.vaadin("#popup"));
return popupElem.getAttribute("textContent");
} | 3.68 |
flink_MemorySegment_put | /**
* Bulk put method. Copies {@code numBytes} bytes from the given {@code ByteBuffer}, into this
* memory segment. The bytes will be read from the target buffer starting at the buffer's
* current position, and will be written to this memory segment starting at {@code offset}. If
* this method attempts to read more... | 3.68 |
flink_CompositeTypeSerializerUtil_constructIntermediateCompatibilityResult | /**
* Constructs an {@link IntermediateCompatibilityResult} with the given array of nested
* serializers and their corresponding serializer snapshots.
*
* <p>This result is considered "intermediate", because the actual final result is not yet built
* if it isn't defined. This is the case if the final result is sup... | 3.68 |
flink_RestfulGateway_requestJob | /**
* Requests the {@link ArchivedExecutionGraph} for the given jobId. If there is no such graph,
* then the future is completed with a {@link FlinkJobNotFoundException}.
*
* @param jobId identifying the job whose {@link ArchivedExecutionGraph} is requested
* @param timeout for the asynchronous operation
* @retur... | 3.68 |
pulsar_ModularLoadManagerImpl_updateBundleSplitMetrics | /**
* As leader broker, update bundle split metrics.
*
* @param bundlesSplit the number of bundles splits
*/
private void updateBundleSplitMetrics(int bundlesSplit) {
bundleSplitCount += bundlesSplit;
List<Metrics> metrics = new ArrayList<>();
Map<String, String> dimensions = new HashMap<>();
dime... | 3.68 |
framework_NullValidator_validate | /**
* Validates the data given in value.
*
* @param value
* the value to validate.
* @throws Validator.InvalidValueException
* if the value was invalid.
*/
@Override
public void validate(Object value) throws Validator.InvalidValueException {
if ((onlyNullAllowed && value != null)
... | 3.68 |
hadoop_InMemoryConfigurationStore_getCurrentVersion | /**
* Configuration mutations not logged (i.e. not persisted). As such, they are
* not persisted and not versioned. Hence, a current version is not
* applicable.
* @return null A current version not applicable for this store.
*/
@Override
public Version getCurrentVersion() {
// Does nothing.
return null;
} | 3.68 |
hibernate-validator_AnnotationApiHelper_getDeclaredTypeByName | /**
* Returns the {@link DeclaredType} for the given class name.
*
* @param className A fully qualified class name, e.g. "java.lang.String".
*
* @return A {@link DeclaredType} representing the type with the given name,
* or null, if no such type exists.
*/
public DeclaredType getDeclaredTypeByName(String... | 3.68 |
hbase_JVM_isAmd64 | /**
* Check if the arch is amd64;
* @return whether this is amd64 or not.
*/
public static boolean isAmd64() {
return amd64;
} | 3.68 |
framework_FieldGroup_buildAndBind | /**
* Builds a field using the given caption and binds it to the given property
* id using the field binder.
*
* @param caption
* The caption for the field
* @param propertyId
* The property id to bind to. Must be present in the field
* finder.
* @throws BindException
* ... | 3.68 |
hadoop_FSBuilder_optLong | /**
* Set optional long parameter for the Builder.
*
* @param key key.
* @param value value.
* @return generic type B.
* @see #opt(String, String)
*/
default B optLong(@Nonnull String key, long value) {
return opt(key, Long.toString(value));
} | 3.68 |
framework_GridElement_getField | /**
* Gets the editor field for column in given index.
*
* @param colIndex
* the column index
* @return the editor field for given location
*
* @throws NoSuchElementException
* if {@code isEditable(colIndex) == false}
*/
public TestBenchElement getField(int colIndex) {
return grid.ge... | 3.68 |
morf_SqlDialect_getForUpdateSql | /**
* Default behaviour for FOR UPDATE. Can be overridden.
* @return The String representation of the FOR UPDATE clause.
*/
protected String getForUpdateSql() {
return " FOR UPDATE";
} | 3.68 |
graphhopper_QueryOverlayBuilder_buildVirtualEdges | /**
* For all specified snaps calculate the snapped point and if necessary set the closest node
* to a virtual one and reverse the closest edge. Additionally the wayIndex can change if an edge is
* swapped.
*/
private void buildVirtualEdges(List<Snap> snaps) {
GHIntObjectHashMap<List<Snap>> edge2res = new GHInt... | 3.68 |
druid_MySqlStatementParser_parseRepeat | /**
* parse repeat statement with label
*
* @param label
*/
public MySqlRepeatStatement parseRepeat(String label) {
MySqlRepeatStatement repeatStmt = new MySqlRepeatStatement();
repeatStmt.setLabelName(label);
accept(Token.REPEAT);
this.parseStatementList(repeatStmt.getStatements(), -1, repeatStmt);... | 3.68 |
framework_ApplicationConfiguration_useServiceUrlPathParam | /**
* Checks whether path info in requests to the server-side service should be
* in a request parameter (named <code>v-resourcePath</code>) or appended to
* the end of the service URL.
*
* @see #getServiceUrl()
*
* @return <code>true</code> if path info should be a request parameter;
* <code>false</cod... | 3.68 |
flink_JobMaster_declineCheckpoint | // TODO: This method needs a leader session ID
@Override
public void declineCheckpoint(DeclineCheckpoint decline) {
schedulerNG.declineCheckpoint(decline);
} | 3.68 |
hadoop_DomainRowKey_parseRowKeyFromString | /**
* Given the encoded row key as string, returns the row key as an object.
* @param encodedRowKey String representation of row key.
* @return A <cite>DomainRowKey</cite> object.
*/
public static DomainRowKey parseRowKeyFromString(String encodedRowKey) {
return new DomainRowKeyConverter().decodeFromString(encode... | 3.68 |
framework_VScrollTable_updateMaxIndent | /**
* For internal use only. May be removed or replaced in the future.
*/
public void updateMaxIndent() {
int oldIndent = scrollBody.getMaxIndent();
scrollBody.calculateMaxIndent();
if (oldIndent != scrollBody.getMaxIndent()) {
// indent updated, headers might need adjusting
triggerLazyCol... | 3.68 |
morf_InlineTableUpgrader_visitPortableSqlStatement | /**
* Write the sql statement.
*
* @param sql The {@link PortableSqlStatement}
*/
private void visitPortableSqlStatement(PortableSqlStatement sql) {
sql.inplaceUpdateTransitionalTableNames(tracker);
writeStatement(sql.getStatement(sqlDialect.getDatabaseType().identifier(), sqlDialect.schemaNamePrefix()));
} | 3.68 |
flink_JobGraph_getVerticesAsArray | /**
* Returns an array of all job vertices that are registered with the job graph. The order in
* which the vertices appear in the list is not defined.
*
* @return an array of all job vertices that are registered with the job graph
*/
public JobVertex[] getVerticesAsArray() {
return this.taskVertices.values().... | 3.68 |
framework_CheckBoxGroup_setHtmlContentAllowed | /**
* Sets whether html is allowed in the item captions. If set to true, the
* captions are passed to the browser as html and the developer is
* responsible for ensuring no harmful html is used. If set to false, the
* content is passed to the browser as plain text.
*
* @param htmlContentAllowed
* true... | 3.68 |
flink_CollectionUtil_iterableToList | /**
* Collects the elements in the Iterable in a List. If the iterable argument is null, this
* method returns an empty list.
*/
public static <E> List<E> iterableToList(@Nullable Iterable<E> iterable) {
if (iterable == null) {
return Collections.emptyList();
}
final ArrayList<E> list = new Arra... | 3.68 |
framework_AbstractTransactionalQuery_ensureTransaction | /**
* Check that a transaction is active.
*
* @throws SQLException
* if no active transaction
*/
protected void ensureTransaction() throws SQLException {
if (!isInTransaction()) {
throw new SQLException("No active transaction!");
}
} | 3.68 |
hadoop_SinglePendingCommit_getText | /**
* Arbitrary notes.
* @return any notes
*/
public String getText() {
return text;
} | 3.68 |
hadoop_WasbTokenRenewer_handleKind | /**
* Checks if this particular object handles the Kind of token passed.
* @param kind the kind of the token
* @return true if it handles passed token kind false otherwise.
*/
@Override
public boolean handleKind(Text kind) {
return WasbDelegationTokenIdentifier.TOKEN_KIND.equals(kind);
} | 3.68 |
graphhopper_NodeBasedWitnessPathSearcher_getMemoryUsageAsString | /**
* @return currently used memory in MB (approximately)
*/
public String getMemoryUsageAsString() {
return (8L * weights.length
+ changedNodes.buffer.length * 4L
+ heap.getMemoryUsage()
) / Helper.MB + "MB";
} | 3.68 |
hadoop_DecodingValidator_validate | /**
* Validate outputs decoded from inputs, by decoding an input back from
* those outputs and comparing it with the original one.
* @param inputs input buffers used for decoding
* @param erasedIndexes indexes of erased units used for decoding
* @param outputs decoded output buffers
* @throws IOException raised... | 3.68 |
hbase_ReplicationUtils_isReplicationForBulkLoadDataEnabled | /**
* @param c Configuration to look at
* @return True if replication for bulk load data is enabled.
*/
public static boolean isReplicationForBulkLoadDataEnabled(final Configuration c) {
return c.getBoolean(HConstants.REPLICATION_BULKLOAD_ENABLE_KEY,
HConstants.REPLICATION_BULKLOAD_ENABLE_DEFAULT);
} | 3.68 |
framework_FilesystemContainer_removeItemProperty | /**
* Filesystem container does not support removing properties.
*
* @see Item#removeItemProperty(Object)
*/
@Override
public boolean removeItemProperty(Object id)
throws UnsupportedOperationException {
throw new UnsupportedOperationException(
"Filesystem container does not support property ... | 3.68 |
pulsar_ResourceGroupService_getRgLocalUsageByteCount | // Visibility for testing.
protected static double getRgLocalUsageByteCount (String rgName, String monClassName) {
return rgLocalUsageBytes.labels(rgName, monClassName).get();
} | 3.68 |
dubbo_AbstractJSONImpl_checkObjectList | /**
* Casts a list of unchecked JSON values to a list of checked objects in Java type.
* If the given list contains a value that is not a Map, throws an exception.
*/
@SuppressWarnings("unchecked")
@Override
public List<Map<String, ?>> checkObjectList(List<?> rawList) {
assert rawList != null;
for (int i = 0... | 3.68 |
querydsl_JTSGeometryExpressions_polygonOperation | /**
* Create a new Polygon operation expression
*
* @param op operator
* @param args arguments
* @return operation expression
*/
public static JTSPolygonExpression<Polygon> polygonOperation(Operator op, Expression<?>... args) {
return new JTSPolygonOperation<Polygon>(Polygon.class, op, args);
} | 3.68 |
hbase_RecoverLeaseFSUtils_isFileClosed | /**
* Call HDFS-4525 isFileClosed if it is available.
* @return True if file is closed.
*/
private static boolean isFileClosed(final DistributedFileSystem dfs, final Method m,
final Path p) {
try {
return (Boolean) m.invoke(dfs, p);
} catch (SecurityException e) {
LOG.warn("No access", e);
} catch (E... | 3.68 |
hbase_RegionLocations_numNonNullElements | /**
* Returns the size of not-null locations
* @return the size of not-null locations
*/
public int numNonNullElements() {
return numNonNullElements;
} | 3.68 |
hbase_TableListModel_setTables | /**
* @param tables the tables to set
*/
public void setTables(List<TableModel> tables) {
this.tables = tables;
} | 3.68 |
hudi_ExternalFilePathUtil_isExternallyCreatedFile | /**
* Checks if the file name was created by an external system by checking for the external file marker at the end of the file name.
* @param fileName The file name
* @return True if the file was created by an external system, false otherwise
*/
public static boolean isExternallyCreatedFile(String fileName) {
re... | 3.68 |
dubbo_Bytes_zip | /**
* zip.
*
* @param bytes source.
* @return compressed byte array.
* @throws IOException
*/
public static byte[] zip(byte[] bytes) throws IOException {
UnsafeByteArrayOutputStream bos = new UnsafeByteArrayOutputStream();
OutputStream os = new DeflaterOutputStream(bos);
try {
os.write(bytes);... | 3.68 |
hbase_FutureUtils_consume | /**
* Log the error if the future indicates any failure.
*/
public static void consume(CompletableFuture<?> future) {
addListener(future, (r, e) -> {
if (e != null) {
LOG.warn("Async operation fails", e);
}
});
} | 3.68 |
hadoop_DynamicIOStatisticsBuilder_withAtomicIntegerMaximum | /**
* Add a maximum statistic to dynamically return the
* latest value of the source.
* @param key key of this statistic
* @param source atomic int maximum
* @return the builder.
*/
public DynamicIOStatisticsBuilder withAtomicIntegerMaximum(String key,
AtomicInteger source) {
withLongFunctionMaximum(key, s ... | 3.68 |
hbase_Client_getExtraHeader | /**
* Get an extra header value.
*/
public String getExtraHeader(final String name) {
return extraHeaders.get(name);
} | 3.68 |
morf_FieldReference_direction | /**
* Sets the direction to sort the field on.
*
* @param direction the direction to set
* @return this
*/
public Builder direction(Direction direction) {
this.direction = direction;
return this;
} | 3.68 |
hadoop_SimpleTcpServer_getBoundPort | // boundPort will be set only after server starts
public int getBoundPort() {
return this.boundPort;
} | 3.68 |
hbase_RestoreSnapshotProcedure_addRegionsToInMemoryStates | /**
* Add regions to in-memory states
* @param regionInfos regions to add
* @param env MasterProcedureEnv
* @param regionReplication the number of region replications
*/
private void addRegionsToInMemoryStates(List<RegionInfo> regionInfos, MasterProcedureEnv env,
int regionReplication) {
As... | 3.68 |
framework_VScrollTable_updateBody | /**
* For internal use only. May be removed or replaced in the future.
*
* @param uidl
* which contains row data
* @param firstRow
* first row in data set
* @param reqRows
* amount of rows in data set
*/
public void updateBody(UIDL uidl, int firstRow, int reqRows) {
int old... | 3.68 |
flink_LegacySourceTransformation_getOperatorFactory | /** Returns the {@code StreamOperatorFactory} of this {@code LegacySourceTransformation}. */
public StreamOperatorFactory<T> getOperatorFactory() {
return operatorFactory;
} | 3.68 |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.