name stringlengths 12 178 | code_snippet stringlengths 8 36.5k | score float64 3.26 3.68 |
|---|---|---|
hudi_SqlQueryBuilder_limit | /**
* Appends a "limit" clause to a query.
*
* @param count The limit count.
* @return The {@link SqlQueryBuilder} instance.
*/
public SqlQueryBuilder limit(long count) {
if (count < 0) {
throw new IllegalArgumentException("Please provide a positive integer for the LIMIT clause.");
}
sqlBuilder.append(" ... | 3.68 |
hmily_AggregateBinder_bind | /**
* Bind object.
*
* @param name the name
* @param target the target
* @param elementBinder the element binder
* @return the object
*/
@SuppressWarnings("unchecked")
public Object bind(final PropertyName name, final BindData<?> target, final AggregateElementBinder elementBinder) {
Object re... | 3.68 |
hbase_StoreFileInfo_getReferredToFile | /*
* Return path to the file referred to by a Reference. Presumes a directory hierarchy of
* <code>${hbase.rootdir}/data/${namespace}/tablename/regionname/familyname</code>.
* @param p Path to a Reference file.
* @return Calculated path to parent region file.
* @throws IllegalArgumentException when path regex fail... | 3.68 |
morf_SqlDialect_appendJoin | /**
* @param result the string builder to append to
* @param join the join statement
* @param innerJoinKeyword usually an INNER JOIN, but this can be changed for optimisations
*/
protected void appendJoin(StringBuilder result, Join join, String innerJoinKeyword) {
// Put the type in
switch (join.getType()) {
... | 3.68 |
hbase_CheckAndMutate_newBuilder | /**
* returns a builder object to build a CheckAndMutate object
* @param row row
* @return a builder object
*/
public static Builder newBuilder(byte[] row) {
return new Builder(row);
} | 3.68 |
hadoop_BaseRecord_isExpired | /**
* Check if this record is expired. The default is false. Override for
* customized behavior.
*
* @return True if the record is expired.
*/
public boolean isExpired() {
return false;
} | 3.68 |
hmily_TableMetaData_getColumnMetaData | /**
* Get column meta data.
*
* @param columnIndex column index
* @return column meta data
*/
public ColumnMetaData getColumnMetaData(final int columnIndex) {
return columns.get(columnNames.get(columnIndex));
} | 3.68 |
hadoop_ReencryptionHandler_startUpdaterThread | /**
* Start the re-encryption updater thread.
*/
void startUpdaterThread() {
updaterExecutor = Executors.newSingleThreadExecutor(
new ThreadFactoryBuilder().setDaemon(true)
.setNameFormat("reencryptionUpdaterThread #%d").build());
updaterExecutor.execute(reencryptionUpdater);
} | 3.68 |
framework_VLoadingIndicator_getThirdDelay | /**
* Returns the delay (in ms) which must pass before the loading indicator
* moves to its "third" state.
*
* @return The delay (in ms) until the loading indicator moves into its
* "third" state. Counted from when {@link #trigger()} is called.
*/
public int getThirdDelay() {
return thirdDelay;
} | 3.68 |
hbase_MobUtils_hasMobColumns | /**
* Checks whether this table has mob-enabled columns.
* @param htd The current table descriptor.
* @return Whether this table has mob-enabled columns.
*/
public static boolean hasMobColumns(TableDescriptor htd) {
ColumnFamilyDescriptor[] hcds = htd.getColumnFamilies();
for (ColumnFamilyDescriptor hcd : hcds)... | 3.68 |
hbase_HBaseReplicationEndpoint_reconnect | /**
* A private method used to re-establish a zookeeper session with a peer cluster.
*/
private void reconnect(KeeperException ke) {
if (
ke instanceof ConnectionLossException || ke instanceof SessionExpiredException
|| ke instanceof AuthFailedException
) {
String clusterKey = ctx.getPeerConfig().ge... | 3.68 |
hadoop_CSQueueStore_getQueues | /**
* Returns all queues as a list.
* @return List containing all the queues
*/
public Collection<CSQueue> getQueues() {
try {
modificationLock.readLock().lock();
return ImmutableList.copyOf(fullNameQueues.values());
} finally {
modificationLock.readLock().unlock();
}
} | 3.68 |
hadoop_FileIoProvider_getHardLinkCount | /**
* Retrieves the number of links to the specified file.
*
* @param volume target volume. null if unavailable.
* @param f file whose link count is being queried.
* @return number of hard-links to the given file, including the
* given path itself.
* @throws IOException
*/
public int getHardLinkCount(
... | 3.68 |
flink_SinkTestSuiteBase_testMetrics | /**
* Test connector sink metrics.
*
* <p>This test will create a sink in the external system, generate test data and write them to
* the sink via a Flink job. Then read and compare the metrics.
*
* <p>Now test: numRecordsOut
*/
@TestTemplate
@DisplayName("Test sink metrics")
public void testMetrics(
Tes... | 3.68 |
flink_Tuple16_copy | /**
* Shallow tuple copy.
*
* @return A new Tuple with the same fields as this.
*/
@Override
@SuppressWarnings("unchecked")
public Tuple16<T0, T1, T2, T3, T4, T5, T6, T7, T8, T9, T10, T11, T12, T13, T14, T15> copy() {
return new Tuple16<>(
this.f0, this.f1, this.f2, this.f3, this.f4, this.f5, this.f... | 3.68 |
flink_OptionalUtils_firstPresent | /** Returns the first {@link Optional} which is present. */
@SafeVarargs
public static <T> Optional<T> firstPresent(Optional<T>... opts) {
for (Optional<T> opt : opts) {
if (opt.isPresent()) {
return opt;
}
}
return Optional.empty();
} | 3.68 |
framework_Slot_onDetach | /*
* (non-Javadoc)
*
* @see com.google.gwt.user.client.ui.Widget#onDetach()
*/
@Override
protected void onDetach() {
if (spacer != null) {
spacer.removeFromParent();
}
super.onDetach();
} | 3.68 |
hadoop_EncryptionSecrets_init | /**
* Init all state, including after any read.
* @throws IOException error rebuilding state.
*/
private void init() throws IOException {
encryptionMethod = S3AEncryptionMethods.getMethod(
encryptionAlgorithm);
} | 3.68 |
framework_DesignFormatter_encodeForTextNode | /**
* <p>
* Encodes <em>some</em> special characters in a given input String to make
* it ready to be written as contents of a text node. WARNING: this will
* e.g. encode "<someTag>" to "&lt;someTag&gt;" as this method
* doesn't do any parsing and assumes that there are no intended HTML
* elements i... | 3.68 |
pulsar_NoStrictCacheSizeAllocator_release | /**
* This method used to release used cache size and add available cache size.
* in normal case, the available size shouldn't exceed max cache size.
*
* @param size release size
*/
public void release(long size) {
lock.lock();
try {
availableCacheSize.add(size);
if (availableCacheSize.long... | 3.68 |
flink_SuperstepBarrier_onEvent | /** Barrier will release the waiting thread if an event occurs. */
@Override
public void onEvent(TaskEvent event) {
if (event instanceof TerminationEvent) {
terminationSignaled = true;
} else if (event instanceof AllWorkersDoneEvent) {
AllWorkersDoneEvent wde = (AllWorkersDoneEvent) event;
... | 3.68 |
flink_MemorySegment_putChar | /**
* Writes a char value to the given position, in the system's native byte order.
*
* @param index The position at which the memory will be written.
* @param value The char value to be written.
* @throws IndexOutOfBoundsException Thrown, if the index is negative, or larger than the
* segment size minus 2.
... | 3.68 |
flink_ZooKeeperStateHandleStore_hasLock | /**
* Checks whether a lock is created for this instance on the passed ZooKeeper node.
*
* @param rootPath The node that shall be checked.
* @return {@code true} if the lock exists; {@code false} otherwise.
*/
private boolean hasLock(String rootPath) throws Exception {
final String normalizedRootPath = normali... | 3.68 |
hbase_UnassignProcedure_remoteCallFailed | /**
* Returns If true, we will re-wake up this procedure; if false, the procedure stays suspended.
*/
@Override
protected boolean remoteCallFailed(final MasterProcedureEnv env, final RegionStateNode regionNode,
final IOException exception) {
return true;
} | 3.68 |
flink_FlinkContainersSettings_defaultConfig | /**
* {@code FlinkContainersConfig} based on defaults.
*
* @return The Flink containers config.
*/
public static FlinkContainersSettings defaultConfig() {
return builder().build();
} | 3.68 |
framework_LocatorUtil_isNotificationElement | /**
* Checks if path refers to vaadin Notification element
* com.vaadin.ui.Notification.
*
* @param path
* to vaadin element
* @return true if path refers to Notification element, false otherwise
*/
public static boolean isNotificationElement(String path) {
String regex = "^\\/{0,2}(com\\.vaadin\\... | 3.68 |
morf_OracleDialect_makeStringLiteral | /**
* Turn a string value into an SQL string literal which has that value.
* <p>
* We use {@linkplain StringUtils#isEmpty(CharSequence)} because we want to
* differentiate between a single space and an empty string.
* </p>
* <p>
* This is necessary because char types cannot be null and must contain
* a single s... | 3.68 |
hbase_ProcedureCoordinator_memberAcquiredBarrier | /**
* Notification that the procedure had the specified member acquired its part of the barrier via
* {@link Subprocedure#acquireBarrier()}.
* @param procName name of the procedure that acquired
* @param member name of the member that acquired
*/
void memberAcquiredBarrier(String procName, final String member) {... | 3.68 |
graphhopper_VectorTile_getValuesList | /**
* <pre>
* Dictionary encoding for values
* </pre>
*
* <code>repeated .vector_tile.Tile.Value values = 4;</code>
*/
public java.util.List<vector_tile.VectorTile.Tile.Value> getValuesList() {
if (valuesBuilder_ == null) {
return java.util.Collections.unmodifiableList(values_);
} else {
return values... | 3.68 |
hadoop_AbstractOperationAuditor_getOptions | /**
* Get the options this auditor was initialized with.
* @return options.
*/
protected OperationAuditorOptions getOptions() {
return options;
} | 3.68 |
framework_VScrollTable_restoreRowVisibility | /**
* Restore row visibility which is set to "none" when the row is
* rendered (due a performance optimization).
*/
private void restoreRowVisibility() {
for (Widget row : renderedRows) {
row.getElement().getStyle().setProperty("visibility", "");
}
} | 3.68 |
morf_SpreadsheetDataSetConsumer_createTitle | /**
* Inserts a row at the top of the sheet with the given title
* @param sheet add the title to
* @param title to add
*/
protected void createTitle(WritableSheet sheet, String title) {
try {
Label cell = new Label(0, 0, title);
WritableFont headingFont = new WritableFont(WritableFont.ARIAL, 16, WritableF... | 3.68 |
framework_AbstractContainer_setPropertySetChangeListeners | /**
* Sets the property set change listener collection. For internal use only.
*
* @param propertySetChangeListeners
*/
protected void setPropertySetChangeListeners(
Collection<Container.PropertySetChangeListener> propertySetChangeListeners) {
this.propertySetChangeListeners = propertySetChangeListeners... | 3.68 |
pulsar_PulsarMockLedgerHandle_readAsync | // ReadHandle interface
@Override
public CompletableFuture<LedgerEntries> readAsync(long firstEntry, long lastEntry) {
return readHandle.readAsync(firstEntry, lastEntry);
} | 3.68 |
dubbo_TypeDefinitionBuilder_build | /**
* Build the instance of {@link TypeDefinition} from the specified {@link TypeMirror type}
*
* @param processingEnv {@link ProcessingEnvironment}
* @param type {@link TypeMirror type}
* @return non-null
*/
static TypeDefinition build(
ProcessingEnvironment processingEnv, TypeMirror type, Map<S... | 3.68 |
morf_ViewChanges_visit | /**
* Visit the selected node for the topological sort. If it has been marked start working
* back up the list. Otherwise, mark it and then try visiting all of its dependent nodes.
*
* @param node the node being visited.
* @param sortedList the list of sorted results. Items in this list are 'permanently' marked e.... | 3.68 |
hbase_Mutation_setClusterIds | /**
* Marks that the clusters with the given clusterIds have consumed the mutation
* @param clusterIds of the clusters that have consumed the mutation
*/
public Mutation setClusterIds(List<UUID> clusterIds) {
ByteArrayDataOutput out = ByteStreams.newDataOutput();
out.writeInt(clusterIds.size());
for (UUID clus... | 3.68 |
pulsar_FixedColumnLengthTableMaker_make | /**
* Make a table using the specified settings.
*
* @param rows Rows to construct the table from.
* @return A String version of the table.
*/
public String make(final Object[][] rows) {
final StringBuilder builder = new StringBuilder();
int numColumns = 0;
for (final Object[] row : rows) {
// ... | 3.68 |
hudi_HoodieMetaSyncOperations_getMetastoreFieldSchemas | /**
* Get the list of field schemas from metastore.
*/
default List<FieldSchema> getMetastoreFieldSchemas(String tableName) {
return Collections.emptyList();
} | 3.68 |
morf_AbstractSqlDialectTest_testNullMetadataError | /**
* Tests that passing a null value for the metadata fails.
*/
@Test
public void testNullMetadataError() {
InsertStatement stmt = new InsertStatement().into(new TableReference(TEST_TABLE));
try {
testDialect.convertStatementToSQL(stmt, null, SqlDialect.IdTable.withDeterministicName(ID_VALUES_TABLE));
f... | 3.68 |
pulsar_ConcurrentLongPairSet_remove | /**
* Remove an existing entry if found.
*
* @param item1
* @return true if removed or false if item was not present
*/
public boolean remove(long item1, long item2) {
checkBiggerEqualZero(item1);
long h = hash(item1, item2);
return getSection(h).remove(item1, item2, (int) h);
} | 3.68 |
morf_RenameTable_columns | /**
* @see org.alfasoftware.morf.metadata.Table#columns()
*/
@Override
public List<Column> columns() {
return baseTable.columns();
} | 3.68 |
hadoop_AzureBlobFileSystem_getOwnerUser | /**
* Get the username of the FS.
* @return the short name of the user who instantiated the FS
*/
public String getOwnerUser() {
return abfsStore.getUser();
} | 3.68 |
pulsar_PersistentReplicator_getAvailablePermits | /**
* Calculate available permits for read entries.
*
* @return
* 0: Producer queue is full, no permits.
* -1: Rate Limiter reaches limit.
* >0: available permits for read entries.
*/
private int getAvailablePermits() {
int availablePermits = producerQueueSize - PENDING_MESSAGES_UPDATER.get(this);
... | 3.68 |
hbase_RemoteProcedureDispatcher_submitTask | // ============================================================================================
// Task Helpers
// ============================================================================================
protected final void submitTask(Runnable task) {
threadPool.execute(task);
} | 3.68 |
framework_VCalendarPanel_setRangeEnd | /**
* Sets the end range for this component. The end range is inclusive, and it
* depends on the current resolution, what is considered inside the range.
*
* @param newRangeEnd
* - the allowed range's end date
*/
public void setRangeEnd(Date newRangeEnd) {
if (!SharedUtil.equals(rangeEnd, newRangeE... | 3.68 |
hbase_ReplicationSourceLogQueue_remove | /**
* Remove head from the queue corresponding to given walGroupId.
* @param walGroupId walGroupId
*/
public void remove(String walGroupId) {
PriorityBlockingQueue<Path> queue = getQueue(walGroupId);
if (queue == null || queue.isEmpty()) {
return;
}
queue.remove();
// Decrease size logQueue.
this.met... | 3.68 |
hbase_WALKey_getNonce | /** Returns The nonce */
default long getNonce() {
return HConstants.NO_NONCE;
} | 3.68 |
hadoop_DumpUtil_bytesToHex | /**
* Convert bytes into format like 0x02 02 00 80.
* If limit is negative or too large, then all bytes will be converted.
*
* @param bytes bytes.
* @param limit limit.
* @return bytesToHex.
*/
public static String bytesToHex(byte[] bytes, int limit) {
if (limit <= 0 || limit > bytes.length) {
limit = byte... | 3.68 |
hmily_HmilyRepositoryNode_getHmilyTransactionRootPath | /**
* Get hmily transaction root path.
*
* @return hmily transaction root path
*/
public String getHmilyTransactionRootPath() {
return Joiner.on("/").join("", ROOT_PATH_PREFIX, appName, HMILY_TRANSACTION_GLOBAL);
} | 3.68 |
flink_S3TestCredentials_getTestBucketUriWithScheme | /**
* Gets the URI for the path under which all tests should put their data.
*
* <p>This method throws an exception if the bucket was not configured. Tests should use {@link
* #assumeCredentialsAvailable()} to skip tests when credentials are not available.
*/
public static String getTestBucketUriWithScheme(String ... | 3.68 |
flink_SchemaValidator_deriveTableSinkSchema | /**
* Derives the table schema for a table sink. A sink ignores a proctime attribute and needs to
* track the origin of a rowtime field.
*
* @deprecated This method combines two separate concepts of table schema and field mapping.
* This should be split into two methods once we have support for the correspondi... | 3.68 |
dubbo_AbstractJSONImpl_getListOfObjects | /**
* Gets a list from an object for the given key, and verifies all entries are objects. If the key
* is not present, this returns null. If the value is not a List or an entry is not an object,
* throws an exception.
*/
@Override
public List<Map<String, ?>> getListOfObjects(Map<String, ?> obj, String key) {
... | 3.68 |
querydsl_SQLExpressions_stddevDistinct | /**
* returns the sample standard deviation of expr, a set of numbers.
*
* @param expr argument
* @return stddev(distinct expr)
*/
public static <T extends Number> WindowOver<T> stddevDistinct(Expression<T> expr) {
return new WindowOver<T>(expr.getType(), SQLOps.STDDEV_DISTINCT, expr);
} | 3.68 |
framework_AbstractExtension_internalSetParent | /**
* Actually sets the parent and calls required listeners.
*
* @since 7.1
* @param parent
* The parent to set
*/
private void internalSetParent(ClientConnector parent) {
ClientConnector oldParent = getParent();
// Send a detach event if the component is currently attached
if (isAttached(... | 3.68 |
flink_MemorySegment_getLong | /**
* Reads a long value (64bit, 8 bytes) from the given position, in the system's native byte
* order. This method offers the best speed for long integer reading and should be used unless a
* specific byte order is required. In most cases, it suffices to know that the byte order in
* which the value is written is ... | 3.68 |
shardingsphere-elasticjob_JobFacade_checkJobExecutionEnvironment | /**
* Check job execution environment.
*
* @throws JobExecutionEnvironmentException job execution environment exception
*/
public void checkJobExecutionEnvironment() throws JobExecutionEnvironmentException {
configService.checkMaxTimeDiffSecondsTolerable();
} | 3.68 |
hadoop_Chunk_getRemain | /**
* How many bytes remain in the current chunk?
*
* @return remaining bytes left in the current chunk.
* @throws java.io.IOException
*/
public int getRemain() throws IOException {
checkEOF();
return remain;
} | 3.68 |
flink_FileSystem_loadFileSystemFactories | /**
* Loads the factories for the file systems directly supported by Flink. Aside from the {@link
* LocalFileSystem}, these file systems are loaded via Java's service framework.
*
* @return A map from the file system scheme to corresponding file system factory.
*/
private static List<FileSystemFactory> loadFileSys... | 3.68 |
shardingsphere-elasticjob_JobRegistry_isJobRunning | /**
* Judge job is running or not.
*
* @param jobName job name
* @return job is running or not
*/
public boolean isJobRunning(final String jobName) {
return jobRunningMap.getOrDefault(jobName, false);
} | 3.68 |
morf_InjectMembersRule_apply | /**
* @see org.junit.rules.MethodRule#apply(org.junit.runners.model.Statement, org.junit.runners.model.FrameworkMethod, java.lang.Object)
*/
@Override
public Statement apply(final Statement base, final FrameworkMethod method, final Object target) {
return new Statement() {
@Override
public void evaluate() t... | 3.68 |
flink_ResultPartition_isReleased | /**
* Whether this partition is released.
*
* <p>A partition is released when each subpartition is either consumed and communication is
* closed by consumer or failed. A partition is also released if task is cancelled.
*/
@Override
public boolean isReleased() {
return isReleased.get();
} | 3.68 |
framework_UIProvider_getTheme | /**
* Finds the theme to use for a specific UI. If no specific theme is
* required, <code>null</code> is returned.
* <p>
* The default implementation checks for a @{@link Theme} annotation on the
* UI class.
*
* @param event
* the UI create event with information about the UI and the
* cu... | 3.68 |
hadoop_PairedDurationTrackerFactory_asDuration | /**
* @return the global duration
*/
@Override
public Duration asDuration() {
return firstDuration.asDuration();
} | 3.68 |
hbase_ZKTableArchiveClient_enableHFileBackupAsync | /**
* Turn on backups for all HFiles for the given table.
* <p>
* All deleted hfiles are moved to the archive directory under the table directory, rather than
* being deleted.
* <p>
* If backups are already enabled for this table, does nothing.
* <p>
* If the table does not exist, the archiving the table's hfil... | 3.68 |
framework_SingleSelectionModelImpl_doSetSelected | /**
* Sets the selected item. If the item is {@code null}, clears the current
* selection if any.
*
* @param item
* the selected item or {@code null} to clear selection
* @since 8.1
*/
protected void doSetSelected(T item) {
if (getParent() == null) {
throw new IllegalStateException(
... | 3.68 |
framework_FilesystemContainer_addRoot | /**
* Adds new root file directory. Adds a file to be included as root file
* directory in the <code>FilesystemContainer</code>.
*
* @param root
* the File to be added as root directory. Null values are
* ignored.
*/
public void addRoot(File root) {
if (root != null) {
final Fil... | 3.68 |
flink_OptimizerNode_getUniqueFields | /** Gets the FieldSets which are unique in the output of the node. */
public Set<FieldSet> getUniqueFields() {
return this.uniqueFields == null ? Collections.<FieldSet>emptySet() : this.uniqueFields;
} | 3.68 |
hbase_ZNodePaths_getMetaReplicaIdFromZNode | /**
* Parse the meta replicaId from the passed znode
* @param znode the name of the znode, does not include baseZNode
*/
public int getMetaReplicaIdFromZNode(String znode) {
return znode.equals(metaZNodePrefix)
? RegionInfo.DEFAULT_REPLICA_ID
: Integer.parseInt(znode.substring(metaZNodePrefix.length() + 1)... | 3.68 |
dubbo_ServiceDefinitionBuilder_build | /**
* Describe a Java interface in {@link ServiceDefinition}.
*
* @return Service description
*/
public static ServiceDefinition build(final Class<?> interfaceClass) {
ServiceDefinition sd = new ServiceDefinition();
build(sd, interfaceClass);
return sd;
} | 3.68 |
hbase_StoreFileWriter_build | /**
* Create a store file writer. Client is responsible for closing file when done. If metadata,
* add BEFORE closing using {@link StoreFileWriter#appendMetadata}.
*/
public StoreFileWriter build() throws IOException {
if ((dir == null ? 0 : 1) + (filePath == null ? 0 : 1) != 1) {
throw new IllegalArgumentExce... | 3.68 |
flink_BinaryStringData_toLowerCase | /**
* Converts all of the characters in this {@code BinaryStringData} to lower case.
*
* @return the {@code BinaryStringData}, converted to lowercase.
*/
public BinaryStringData toLowerCase() {
if (javaObject != null) {
return javaToLowerCase();
}
if (binarySection.sizeInBytes == 0) {
re... | 3.68 |
hadoop_CommitUtilsWithMR_getTempTaskAttemptPath | /**
* Compute the path where the output of a given task attempt will be placed.
* @param context task context
* @param jobUUID unique Job ID.
* @param out output directory of job
* @return the path to store temporary job attempt data.
*/
public static Path getTempTaskAttemptPath(TaskAttemptContext context,
fi... | 3.68 |
hbase_ProcedureExecutor_getProcedures | /**
* Get procedures.
* @return the procedures in a list
*/
public List<Procedure<TEnvironment>> getProcedures() {
List<Procedure<TEnvironment>> procedureList =
new ArrayList<>(procedures.size() + completed.size());
procedureList.addAll(procedures.values());
// Note: The procedure could show up twice in th... | 3.68 |
flink_PushCalcPastChangelogNormalizeRule_projectUsedFieldsWithConditions | /**
* Builds a new {@link StreamPhysicalCalc} on the input node with the given {@param conditions}
* and a used fields projection.
*/
private StreamPhysicalCalc projectUsedFieldsWithConditions(
RelBuilder relBuilder, RelNode input, List<RexNode> conditions, int[] usedFields) {
final RelDataType inputRowT... | 3.68 |
hadoop_FilterFileSystem_listStatus | /** List files in a directory. */
@Override
public FileStatus[] listStatus(Path f) throws IOException {
return fs.listStatus(f);
} | 3.68 |
framework_Color_withinRange | /**
* Checks whether the value is within the acceptable range of [0, 255].
*
* @param value
* @return true if the value falls within the range, false otherwise
*/
private boolean withinRange(int value) {
if (value < 0 || value > 255) {
return false;
}
return true;
} | 3.68 |
hbase_BulkLoadHFilesTool_groupOrSplitPhase | /**
* @param conn the HBase cluster connection
* @param tableName the table name of the table to load into
* @param pool the ExecutorService
* @param queue the queue for LoadQueueItem
* @param startEndKeys start and end keys
* @return A map that groups LQI by likely bulk load region targ... | 3.68 |
flink_InputGate_getAvailableFuture | /**
* @return a future that is completed if there are more records available. If there are more
* records available immediately, {@link #AVAILABLE} should be returned. Previously returned
* not completed futures should become completed once there are more records available.
*/
@Override
public CompletableFu... | 3.68 |
querydsl_SQLTemplatesRegistry_getTemplates | /**
* Get the SQLTemplates instance that matches best the SQL engine of the
* given database metadata
*
* @param md database metadata
* @return templates
* @throws SQLException
*/
public SQLTemplates getTemplates(DatabaseMetaData md) throws SQLException {
return getBuilder(md).build();
} | 3.68 |
hadoop_DataNodeFaultInjector_interceptBlockReader | /**
* Used as a hook to inject intercept When finish reading from block.
*/
public void interceptBlockReader() {} | 3.68 |
framework_Window_isResizeLazy | /**
*
* @return true if a delay is used before recalculating sizes, false if
* sizes are recalculated immediately.
*/
public boolean isResizeLazy() {
return getState(false).resizeLazy;
} | 3.68 |
flink_RichInputFormat_closeInputFormat | /**
* Closes this InputFormat instance. This method is called once per parallel instance. Resources
* allocated during {@link #openInputFormat()} should be closed in this method.
*
* @see InputFormat
* @throws IOException in case closing the resources failed
*/
@PublicEvolving
public void closeInputFormat() throw... | 3.68 |
flink_WatermarkStrategy_forMonotonousTimestamps | /**
* Creates a watermark strategy for situations with monotonously ascending timestamps.
*
* <p>The watermarks are generated periodically and tightly follow the latest timestamp in the
* data. The delay introduced by this strategy is mainly the periodic interval in which the
* watermarks are generated.
*
* @see... | 3.68 |
hudi_HoodieTableConfig_getRecordMergerStrategy | /**
* Read the payload class for HoodieRecords from the table properties.
*/
public String getRecordMergerStrategy() {
return getStringOrDefault(RECORD_MERGER_STRATEGY);
} | 3.68 |
pulsar_ProducerConfiguration_setMessageRoutingMode | /**
* Set the message routing mode for the partitioned producer.
*
* @param messageRouteMode message routing mode.
* @return producer configuration
* @see MessageRoutingMode
*/
public ProducerConfiguration setMessageRoutingMode(MessageRoutingMode messageRouteMode) {
Objects.requireNonNull(messageRouteMode);
... | 3.68 |
morf_AbstractSqlDialectTest_tableName | /**
* Many tests have common results apart from a table name decoration. This method allows for
* those tests to be commonised and save a lot of duplication between descendent classes.
*
* <p>If no decoration is required for an SQL dialect descendant classes need not implement this method.</p>
*
* @param baseName... | 3.68 |
framework_Table_addItem | /**
* Adds the new row to table and fill the visible cells (except generated
* columns) with given values.
*
* @param cells
* the Object array that is used for filling the visible cells
* new row. The types must be settable to visible column property
* types.
* @param itemId
* ... | 3.68 |
framework_AbstractComponent_isOrHasAncestor | /**
* Determine whether a <code>content</code> component is equal to, or the
* ancestor of this component.
*
* @param content
* the potential ancestor element
* @return <code>true</code> if the relationship holds
*/
protected boolean isOrHasAncestor(Component content) {
if (content instanceof HasC... | 3.68 |
dubbo_TripleServerStream_responseErr | /**
* Error in create stream, unsupported config or triple protocol error. There is no return value
* because stream will be reset if send trailers failed.
*
* @param status status of error
*/
private void responseErr(TriRpcStatus status) {
Http2Headers trailers = new DefaultHttp2Headers()
.status(... | 3.68 |
flink_FlinkContainers_start | /** Starts all containers. */
public void start() throws Exception {
if (haService != null) {
LOG.debug("Starting HA service container");
this.haService.start();
}
LOG.debug("Starting JobManager container");
this.jobManager.start();
waitUntilJobManagerRESTReachable(jobManager);
L... | 3.68 |
hbase_NamespaceStateManager_deleteNamespace | /**
* Delete the namespace state.
* @param namespace the name of the namespace to delete
*/
void deleteNamespace(String namespace) {
this.nsStateCache.remove(namespace);
} | 3.68 |
dubbo_MetadataInfo_init | /**
* Initialize necessary caches right after deserialization on the consumer side
*/
protected void init() {
buildMatchKey();
buildServiceKey(name, group, version);
// init method params
this.methodParams = URLParam.initMethodParameters(params);
// Actually, consumer params is empty after deseria... | 3.68 |
hbase_AsyncTable_deleteAll | /**
* A simple version of batch delete. It will fail if there are any failures.
* @param deletes list of things to delete.
* @return A {@link CompletableFuture} that always returns null when complete normally.
*/
default CompletableFuture<Void> deleteAll(List<Delete> deletes) {
return allOf(delete(deletes)).thenA... | 3.68 |
querydsl_JTSGeometryExpression_boundary | /**
* Returns the closure of the combinatorial boundary of this geometric object
*
* @return boundary
*/
public JTSGeometryExpression<Geometry> boundary() {
if (boundary == null) {
boundary = JTSGeometryExpressions.geometryOperation(SpatialOps.BOUNDARY, mixin);
}
return boundary;
} | 3.68 |
framework_GridConnector_updateColumnsFromState | /**
* Update columns from the current state.
*
*/
private void updateColumnsFromState() {
this.columnsUpdatedFromState = true;
final List<Column<?, JsonObject>> columns = new ArrayList<Column<?, JsonObject>>(getState().columns.size());
for (String columnId : getState().columnOrder) {
for (GridCol... | 3.68 |
hbase_HbckTableInfo_getTableDescriptor | /** Returns descriptor common to all regions. null if are none or multiple! */
TableDescriptor getTableDescriptor() {
if (htds.size() == 1) {
return (TableDescriptor) htds.toArray()[0];
} else {
LOG.error(
"None/Multiple table descriptors found for table '" + tableName + "' regions: " + htds);
}
r... | 3.68 |
flink_WindowMapState_entries | /**
* Returns all the mappings in the state.
*
* @return An iterable view of all the key-value pairs in the state.
* @throws Exception Thrown if the system cannot access the state.
*/
public Iterable<Map.Entry<RowData, UV>> entries(W window) throws Exception {
windowState.setCurrentNamespace(window);
retur... | 3.68 |
hbase_AbstractFSWAL_init | /**
* Used to initialize the WAL. Usually just call rollWriter to create the first log writer.
*/
@Override
public void init() throws IOException {
rollWriter();
} | 3.68 |
graphhopper_NodeBasedNodeContractor_calculatePriority | /**
* Warning: the calculated priority must NOT depend on priority(v) and therefore findAndHandleShortcuts should also not
* depend on the priority(v). Otherwise updating the priority before contracting in contractNodes() could lead to
* a slowish or even endless loop.
*/
@Override
public float calculatePriority(in... | 3.68 |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.