name stringlengths 12 178 | code_snippet stringlengths 8 36.5k | score float64 3.26 3.68 |
|---|---|---|
hbase_MultiRowRangeFilter_areSerializedFieldsEqual | /**
* Returns true if and only if the fields of the filter that are serialized are equal to the
* corresponding fields in other. Used for testing.
*/
@Override
boolean areSerializedFieldsEqual(Filter o) {
if (o == this) {
return true;
}
if (!(o instanceof MultiRowRangeFilter)) {
return false;
}
Mul... | 3.68 |
hadoop_BlockGrouper_getRequiredNumParityBlocks | /**
* Get required parity blocks count in a BlockGroup.
* @return count of required parity blocks
*/
public int getRequiredNumParityBlocks() {
return schema.getNumParityUnits();
} | 3.68 |
hadoop_DockerContainerDeletionTask_toString | /**
* Convert the DockerContainerDeletionTask to a String representation.
*
* @return String representation of the DockerContainerDeletionTask.
*/
@Override
public String toString() {
StringBuffer sb = new StringBuffer("DockerContainerDeletionTask : ");
sb.append(" id : ").append(this.getTaskId());
sb.append... | 3.68 |
framework_Upload_getContentLength | /**
* @return the length of the file that is being uploaded
*/
public long getContentLength() {
return length;
} | 3.68 |
dubbo_NetUtils_isPreferredNetworkInterface | /**
* Is preferred {@link NetworkInterface} or not
*
* @param networkInterface {@link NetworkInterface}
* @return if the name of the specified {@link NetworkInterface} matches
* the property value from {@link CommonConstants#DUBBO_PREFERRED_NETWORK_INTERFACE}, return <code>true</code>,
* or <code>false</code>
*/... | 3.68 |
framework_HasValue_getOldValue | /**
* Returns the value of the source before this value change event
* occurred.
*
* @return the value previously held by the source of this event
*/
public V getOldValue() {
return oldValue;
} | 3.68 |
hudi_NonThrownExecutor_executeSync | /**
* Run the action in a loop and wait for completion.
*/
public void executeSync(ThrowingRunnable<Throwable> action, String actionName, Object... actionParams) {
try {
executor.submit(wrapAction(action, this.exceptionHook, actionName, actionParams)).get();
} catch (InterruptedException e) {
handleExcept... | 3.68 |
hudi_AvroSchemaCompatibility_getWriter | /**
* Gets the writer schema that was validated.
*
* @return writer schema that was validated.
*/
public Schema getWriter() {
return mWriter;
} | 3.68 |
morf_AbstractSqlDialectTest_testTruncateTableStatements | /**
* Tests SQL for clearing tables.
*/
@SuppressWarnings("unchecked")
@Test
public void testTruncateTableStatements() {
Table table = metadata.getTable(TEST_TABLE);
compareStatements(
expectedTruncateTableStatements(),
testDialect.truncateTableStatements(table));
} | 3.68 |
flink_Schema_withComment | /** Apply comment to the previous column. */
public Builder withComment(@Nullable String comment) {
if (columns.size() > 0) {
columns.set(
columns.size() - 1, columns.get(columns.size() - 1).withComment(comment));
} else {
throw new IllegalArgumentException(
"Meth... | 3.68 |
flink_RpcEndpoint_validateResourceClosed | /**
* Validate whether all the resources are closed.
*
* @return true if all the resources are closed, otherwise false
*/
boolean validateResourceClosed() {
return mainThreadExecutor.validateScheduledExecutorClosed() && resourceRegistry.isClosed();
} | 3.68 |
flink_WindowedStream_aggregate | /**
* Applies the given window function to each window. The window function is called for each
* evaluation of the window for each key individually. The output of the window function is
* interpreted as a regular non-windowed stream.
*
* <p>Arriving data is incrementally aggregated using the given aggregate functi... | 3.68 |
hudi_HoodieAvroUtils_rewriteRecord | /**
* Given an Avro record with a given schema, rewrites it into the new schema while setting fields only from the new
* schema.
* <p>
* NOTE: This method is rewriting every record's field that is record itself recursively. It's
* caller's responsibility to make sure that no unnecessary re-writing occurs (by preem... | 3.68 |
framework_VTextField_updateFieldContent | /** For internal use only. May be removed or replaced in the future. */
public void updateFieldContent(final String text) {
setPrompting(inputPrompt != null && focusedTextField != this
&& (text.equals("")));
String fieldValue;
if (prompting) {
fieldValue = isReadOnly() ? "" : inputPromp... | 3.68 |
hbase_HFileArchiver_resolveAndArchiveFile | /**
* Attempt to archive the passed in file to the archive directory.
* <p>
* If the same file already exists in the archive, it is moved to a timestamped directory under
* the archive directory and the new file is put in its place.
* @param archiveDir {@link Path} to the directory that stores the archives o... | 3.68 |
morf_SchemaUtils_indexes | /**
* @see org.alfasoftware.morf.metadata.SchemaUtils.TableBuilder#indexes(java.lang.Iterable)
*/
@Override
public TableBuilder indexes(Iterable<? extends Index> indexes) {
return new TableBuilderImpl(getName(), columns(), indexes, isTemporary());
} | 3.68 |
hbase_HFileCorruptionChecker_createQuarantinePath | /**
* Given a path, generates a new path to where we move a corrupted hfile (bad trailer, no
* trailer). Path to a corrupt hfile (assumes that it is HBASE_DIR/ table /region/cf/file)
* @return path to where corrupted files are stored. This should be
* HBASE_DIR/.corrupt/table/region/cf/file.
*/
Path create... | 3.68 |
flink_CatalogManager_qualifyIdentifier | /**
* Returns the full name of the given table path, this name may be padded with current
* catalog/database name based on the {@code identifier's} length.
*
* @param identifier an unresolved identifier
* @return a fully qualified object identifier
*/
public ObjectIdentifier qualifyIdentifier(UnresolvedIdentifier... | 3.68 |
hadoop_BlockStorageMovementAttemptedItems_start | /**
* Starts the monitor thread.
*/
public synchronized void start() {
monitorRunning = true;
timerThread = new Daemon(new BlocksStorageMovementAttemptMonitor());
timerThread.setName("BlocksStorageMovementAttemptMonitor");
timerThread.start();
} | 3.68 |
hudi_WriteOperationType_isDataChange | /**
* Whether the operation changes the dataset.
*/
public static boolean isDataChange(WriteOperationType operation) {
return operation == WriteOperationType.INSERT
|| operation == WriteOperationType.UPSERT
|| operation == WriteOperationType.UPSERT_PREPPED
|| operation == WriteOperationType.DELETE... | 3.68 |
flink_CheckpointConfig_enableExternalizedCheckpoints | /**
* Sets the mode for externalized checkpoint clean-up. Externalized checkpoints will be enabled
* automatically unless the mode is set to {@link
* ExternalizedCheckpointCleanup#NO_EXTERNALIZED_CHECKPOINTS}.
*
* <p>Externalized checkpoints write their meta data out to persistent storage and are
* <strong>not</s... | 3.68 |
hbase_MasterRpcServices_createConfigurationSubset | /**
* @return Subset of configuration to pass initializing regionservers: e.g. the filesystem to use
* and root directory to use.
*/
private RegionServerStartupResponse.Builder createConfigurationSubset() {
RegionServerStartupResponse.Builder resp =
addConfig(RegionServerStartupResponse.newBuilder(), H... | 3.68 |
framework_GridElement_getSubPart | /**
* Helper function to get Grid subparts wrapped correctly
*
* @param subPartSelector
* SubPart to be used in ComponentLocator
* @return SubPart element wrapped in TestBenchElement class
*/
private TestBenchElement getSubPart(String subPartSelector) {
return (TestBenchElement) findElement(By.vaad... | 3.68 |
framework_AbstractComponent_fireComponentEvent | /**
* Emits the component event. It is transmitted to all registered listeners
* interested in such events.
*/
protected void fireComponentEvent() {
fireEvent(new Component.Event(this));
} | 3.68 |
flink_ContinuousProcessingTimeTrigger_of | /**
* Creates a trigger that continuously fires based on the given interval.
*
* @param interval The time interval at which to fire.
* @param <W> The type of {@link Window Windows} on which this trigger can operate.
*/
public static <W extends Window> ContinuousProcessingTimeTrigger<W> of(Time interval) {
retu... | 3.68 |
flink_KubernetesStateHandleStore_clearEntries | /**
* Remove all the filtered keys in the ConfigMap.
*
* @throws Exception when removing the keys failed
*/
@Override
public void clearEntries() throws Exception {
updateConfigMap(
configMap -> {
configMap.getData().keySet().removeIf(configMapKeyFilter);
... | 3.68 |
pulsar_LoadSimulationController_change | // Change producer settings for a given topic and JCommander arguments.
private void change(final ShellArguments arguments, final String topic, final int client) throws Exception {
outputStreams[client].write(LoadSimulationClient.CHANGE_COMMAND);
writeProducerOptions(outputStreams[client], arguments, topic);
... | 3.68 |
hbase_MasterQuotaManager_removeRegionSizesForTable | /**
* Removes each region size entry where the RegionInfo references the provided TableName.
* @param tableName tableName.
*/
public void removeRegionSizesForTable(TableName tableName) {
regionSizes.keySet().removeIf(regionInfo -> regionInfo.getTable().equals(tableName));
} | 3.68 |
framework_FileParameters_setSize | /**
* Sets the file size.
*
* @param size
* Size of the file.
*/
public void setSize(long size) {
this.size = size;
} | 3.68 |
dubbo_ClientStream_onComplete | /**
* Callback when request completed.
*
* @param status response status
* @param attachments attachments received from remote peer
* @param reserved triple protocol reserved data
*/
default void onComplete(
TriRpcStatus status,
Map<String, Object> attachments,
Map<String, String> ... | 3.68 |
hudi_BaseRollbackHelper_deleteFiles | /**
* Common method used for cleaning out files during rollback.
*/
protected List<HoodieRollbackStat> deleteFiles(HoodieTableMetaClient metaClient, List<String> filesToBeDeleted, boolean doDelete) throws IOException {
return filesToBeDeleted.stream().map(fileToDelete -> {
String basePath = metaClient.getBasePa... | 3.68 |
hbase_Pair_setFirst | /**
* Replace the first element of the pair.
* @param a operand
*/
public void setFirst(T1 a) {
this.first = a;
} | 3.68 |
pulsar_ManagedLedgerImpl_delete | /**
* Delete this ManagedLedger completely from the system.
*
* @throws Exception
*/
@Override
public void delete() throws InterruptedException, ManagedLedgerException {
final CountDownLatch counter = new CountDownLatch(1);
final AtomicReference<ManagedLedgerException> exception = new AtomicReference<>();
... | 3.68 |
framework_FocusableComplexPanel_setFocus | /**
* Sets/Removes the keyboard focus to the panel.
*
* @param focus
* If set to true then the focus is moved to the panel, if set to
* false the focus is removed
*/
public void setFocus(boolean focus) {
if (focus) {
FocusImpl.getFocusImplForPanel().focus(getElement());
} els... | 3.68 |
framework_VScrollTable_updatePageLength | /**
* Determines the pagelength when the table height is fixed.
*/
public void updatePageLength() {
// Only update if visible and enabled
if (!isVisible() || !enabled) {
return;
}
if (scrollBody == null) {
return;
}
if (isDynamicHeight()) {
return;
}
int rowH... | 3.68 |
flink_FlinkFilterJoinRule_validateJoinFilters | /**
* Validates that target execution framework can satisfy join filters.
*
* <p>If the join filter cannot be satisfied (for example, if it is {@code l.c1 > r.c2} and the
* join only supports equi-join), removes the filter from {@code joinFilters} and adds it to
* {@code aboveFilters}.
*
* <p>The default impleme... | 3.68 |
hadoop_DiskBalancerWorkItem_getBlocksCopied | /**
* Returns number of blocks copied for this DiskBalancerWorkItem.
*
* @return long count of blocks.
*/
public long getBlocksCopied() {
return blocksCopied;
} | 3.68 |
querydsl_NumberExpression_floor | /**
* Create a {@code floor(this)} expression
*
* <p>Returns the largest (closest to positive infinity)
* {@code double} value that is less than or equal to the
* argument and is equal to a mathematical integer.</p>
*
* @return floor(this)
* @see java.lang.Math#floor(double)
*/
public NumberExpression<T> floor... | 3.68 |
flink_PartialCachingAsyncLookupProvider_of | /**
* Build a {@link PartialCachingAsyncLookupProvider} from the specified {@link
* AsyncLookupFunction} and {@link LookupCache}.
*/
static PartialCachingAsyncLookupProvider of(
AsyncLookupFunction asyncLookupFunction, LookupCache cache) {
return new PartialCachingAsyncLookupProvider() {
@Overrid... | 3.68 |
morf_AbstractSqlDialectTest_expectedSelectEvery | /**
* @return The expected sql.
*/
protected String expectedSelectEvery() {
return "SELECT MIN(booleanField) FROM " + tableName(TEST_TABLE);
} | 3.68 |
morf_OracleDialect_getSqlForIsNull | /**
* @see org.alfasoftware.morf.jdbc.SqlDialect#getSqlForIsNull(org.alfasoftware.morf.sql.element.Function)
*/
@Override
protected String getSqlForIsNull(Function function) {
return "nvl(" + getSqlFrom(function.getArguments().get(0)) + ", " + getSqlFrom(function.getArguments().get(1)) + ") ";
} | 3.68 |
flink_StringUtils_toQuotedListString | /**
* Generates a string containing a comma-separated list of values in double-quotes. Uses
* lower-cased values returned from {@link Object#toString()} method for each element in the
* given array. Null values are skipped.
*
* @param values array of elements for the list
* @return The string with quoted list of ... | 3.68 |
framework_MenuBar_setScrollEnabled | /**
* Sets the menu scroll enabled or disabled.
*
* @since 7.2.6
* @param enabled
* the enabled state of the scroll.
*/
public void setScrollEnabled(boolean enabled) {
if (enabled) {
if (vertical) {
outer.getStyle().setOverflowY(Overflow.AUTO);
} else {
outer... | 3.68 |
framework_DataCommunicator_getInMemorySorting | /**
* Returns the {@link Comparator} to use with in-memory sorting.
*
* @return comparator used to sort data
* @since 8.0.6
*/
public Comparator<T> getInMemorySorting() {
return inMemorySorting;
} | 3.68 |
hbase_BackupManager_close | /**
* Stop all the work of backup.
*/
@Override
public void close() {
if (systemTable != null) {
try {
systemTable.close();
} catch (Exception e) {
LOG.error(e.toString(), e);
}
}
} | 3.68 |
hmily_SwaggerConfig_api | /**
* Api docket.
*
* @return the docket
*/
@Bean
public Docket api() {
return new Docket(DocumentationType.SWAGGER_2).apiInfo(apiInfo()).select().apis(RequestHandlerSelectors.withClassAnnotation(RestController.class))
// .paths(paths())
.build().pathMapping("/").directModelSubstitute(Lo... | 3.68 |
framework_TreeGridElement_getExpandElement | /**
* Gets the expand/collapse element for the given row.
*
* @param rowIndex
* 0-based row index
* @param hierarchyColumnIndex
* 0-based index of the hierarchy column
* @return the {@code span} element that is clicked for expanding/collapsing
* a rows
* @throws NoSuchElementExcep... | 3.68 |
hbase_BlockIOUtils_builderFromContext | /**
* Construct a fresh {@link AttributesBuilder} from the provided {@link Context}, populated with
* relevant attributes populated by {@link HFileContextAttributesBuilderConsumer#CONTEXT_KEY}.
*/
private static AttributesBuilder builderFromContext(Context context) {
final AttributesBuilder attributesBuilder = Att... | 3.68 |
MagicPlugin_CompatibilityUtilsBase_checkChunk | /**
* Take care if setting generate to false, the chunk will load but not show as loaded
*/
@Override
public boolean checkChunk(World world, int chunkX, int chunkZ, boolean generate) {
if (!world.isChunkLoaded(chunkX, chunkZ)) {
loadChunk(world, chunkX, chunkZ, generate);
return false;
}
r... | 3.68 |
flink_DeclarativeAggregateFunction_operands | /**
* Args of accumulate and retract, the input value (usually obtained from a new arrived data).
*/
public final UnresolvedReferenceExpression[] operands() {
int operandCount = operandCount();
Preconditions.checkState(
operandCount >= 0, "inputCount must be greater than or equal to 0.");
Unre... | 3.68 |
framework_IndexedContainer_generateId | /**
* Generates an unique identifier for use as an item id. Guarantees that the
* generated id is not currently used as an id.
*
* @return
*/
private Serializable generateId() {
Serializable id;
do {
id = Integer.valueOf(nextGeneratedItemId++);
} while (items.containsKey(id));
return id;
} | 3.68 |
hadoop_YarnVersionInfo_getDate | /**
* The date that YARN was compiled.
* @return the compilation date in unix date format
*/
public static String getDate() {
return YARN_VERSION_INFO._getDate();
} | 3.68 |
flink_LogicalSlot_releaseSlot | /**
* Releases this slot.
*
* @return Future which is completed once the slot has been released, in case of a failure it is
* completed exceptionally
* @deprecated Added because extended the actual releaseSlot method with cause parameter.
*/
default CompletableFuture<?> releaseSlot() {
return releaseSlot(... | 3.68 |
flink_JobGraph_getMaximumParallelism | /**
* Gets the maximum parallelism of all operations in this job graph.
*
* @return The maximum parallelism of this job graph
*/
public int getMaximumParallelism() {
int maxParallelism = -1;
for (JobVertex vertex : taskVertices.values()) {
maxParallelism = Math.max(vertex.getParallelism(), maxParall... | 3.68 |
flink_TimestampData_fromInstant | /**
* Creates an instance of {@link TimestampData} from an instance of {@link Instant}.
*
* @param instant an instance of {@link Instant}
*/
public static TimestampData fromInstant(Instant instant) {
long epochSecond = instant.getEpochSecond();
int nanoSecond = instant.getNano();
long millisecond = epo... | 3.68 |
hbase_HMobStore_resolve | /**
* Reads the cell from the mob file.
* @param reference The cell found in the HBase, its value is a path to a mob
* file.
* @param cacheBlocks Whether the scanner should cache blocks.
* @param readPt the read point.
* @pa... | 3.68 |
graphhopper_VectorTile_getGeometryCount | /**
* <pre>
* Contains a stream of commands and parameters (vertices).
* A detailed description on geometry encoding is located in
* section 4.3 of the specification.
* </pre>
*
* <code>repeated uint32 geometry = 4 [packed = true];</code>
*/
public int getGeometryCount() {
return geometry_.size();
} | 3.68 |
flink_ExternalTypeInfo_of | /**
* Creates type information for a {@link DataType} that is possibly represented by internal data
* structures but serialized and deserialized into external data structures.
*
* @param isInternalInput allows for a non-bidirectional serializer from internal to external
*/
public static <T> ExternalTypeInfo<T> of(... | 3.68 |
framework_BinderValidationStatus_hasErrors | /**
* Gets whether the validation for the binder failed or not.
*
* @return {@code true} if validation failed, {@code false} if validation
* passed
*/
public boolean hasErrors() {
return binderStatuses.stream().filter(ValidationResult::isError)
.findAny().isPresent()
|| bindingS... | 3.68 |
flink_BatchExecSink_getPhysicalRowType | /** Get the physical row type with given column indices. */
private RowType getPhysicalRowType(ResolvedSchema schema, int[] columnIndices) {
List<Column> columns = schema.getColumns();
List<Column> requireColumns = new ArrayList<>();
for (int columnIndex : columnIndices) {
requireColumns.add(columns... | 3.68 |
hbase_RegionReplicationSink_add | /**
* Add this edit to replication queue.
* <p/>
* The {@code rpcCall} is for retaining the cells if the edit is built within an rpc call and the
* rpc call has cell scanner, which is off heap.
*/
public void add(WALKeyImpl key, WALEdit edit, ServerCall<?> rpcCall) {
if (!tableDesc.hasRegionMemStoreReplication()... | 3.68 |
flink_Pattern_oneOrMore | /**
* Specifies that this pattern can occur {@code one or more} times and time interval corresponds
* to the maximum time gap between previous and current event for each times. This means at
* least one and at most infinite number of events can be matched to this pattern.
*
* <p>If this quantifier is enabled for a... | 3.68 |
druid_Base64_base64toInt | /**
* Translates the specified character, which is assumed to be in the "Base 64 Alphabet" into its equivalent 6-bit
* positive integer.
*
* @throw IllegalArgumentException or ArrayOutOfBoundsException if c is not in the Base64 Alphabet.
*/
private static int base64toInt(char c, byte[] alphaToInt) {
int result... | 3.68 |
hadoop_RouterMetricsService_getJvmMetrics | /**
* Get the JVM metrics for the Router.
*
* @return JVM metrics.
*/
public JvmMetrics getJvmMetrics() {
if (this.routerMetrics == null) {
return null;
}
return this.routerMetrics.getJvmMetrics();
} | 3.68 |
hadoop_AzureFileSystemInstrumentation_currentDownloadBytesPerSecond | /**
* Record the current bytes-per-second download rate seen.
* @param bytesPerSecond The bytes per second.
*/
public synchronized void currentDownloadBytesPerSecond(long bytesPerSecond) {
if (bytesPerSecond > currentMaximumDownloadBytesPerSecond) {
currentMaximumDownloadBytesPerSecond = bytesPerSecond;
ma... | 3.68 |
graphhopper_ArrayUtil_applyOrder | /**
* Creates a copy of the given array such that it is ordered by the given order.
* The order can be shorter or equal, but not longer than the array.
*/
public static int[] applyOrder(int[] arr, int[] order) {
if (order.length > arr.length)
throw new IllegalArgumentException("sort order must not be sho... | 3.68 |
pulsar_AbstractMetrics_getManagedLedgerCacheStats | /**
* Returns the managed ledger cache statistics from ML factory.
*
* @return
*/
protected ManagedLedgerFactoryMXBean getManagedLedgerCacheStats() {
return ((ManagedLedgerFactoryImpl) pulsar.getManagedLedgerFactory()).getCacheStats();
} | 3.68 |
dubbo_MulticastRegistry_destroy | /**
* Remove the expired providers(if clean is true), leave the multicast group and close the multicast socket.
*/
@Override
public void destroy() {
super.destroy();
try {
ExecutorUtil.cancelScheduledFuture(cleanFuture);
} catch (Throwable t) {
logger.warn(REGISTRY_SOCKET_EXCEPTION, "", ""... | 3.68 |
hadoop_EntityCacheItem_refreshCache | /**
* Refresh this cache item if it needs refresh. This will enforce an appLogs
* rescan and then load new data. The refresh process is synchronized with
* other operations on the same cache item.
*
* @param aclManager ACL manager for the timeline storage
* @param metrics Metrics to trace the status of the entity... | 3.68 |
hadoop_ConfigurationUtils_copy | /**
* Copy configuration key/value pairs from one configuration to another if a property exists in the target, it gets
* replaced.
*
* @param source source configuration.
* @param target target configuration.
*/
public static void copy(Configuration source, Configuration target) {
Check.notNull(source, "source"... | 3.68 |
hadoop_SysInfoWindows_getNumVCoresUsed | /** {@inheritDoc} */
@Override
public synchronized float getNumVCoresUsed() {
refreshIfNeeded();
float ret = cpuUsage;
if (ret != -1) {
ret = ret / 100F;
}
return ret;
} | 3.68 |
framework_Window_getAssistivePrefix | /**
* Gets the accessibility prefix for the window caption.
*
* This prefix is read to assistive device users before the window caption,
* but not visible on the page.
*
* @return The accessibility prefix
*/
public String getAssistivePrefix() {
return getState(false).assistivePrefix;
} | 3.68 |
hadoop_Service_getValue | /**
* Get the integer value of a state
* @return the numeric value of the state
*/
public int getValue() {
return value;
} | 3.68 |
framework_Calendar_getWeeklyCaptionFormat | /**
* Gets the date caption format for the weekly view.
*
* @return The pattern used in caption of dates in weekly view.
*/
public String getWeeklyCaptionFormat() {
return weeklyCaptionFormat;
} | 3.68 |
hbase_SnapshotQuotaObserverChore_getSnapshotsToComputeSize | /**
* Fetches each table with a quota (table or namespace quota), and then fetch the name of each
* snapshot which was created from that table.
* @return A mapping of table to snapshots created from that table
*/
Multimap<TableName, String> getSnapshotsToComputeSize() throws IOException {
Set<TableName> tablesToF... | 3.68 |
hadoop_FSEditLogAsync_logEdit | // return whether edit log wants to sync.
boolean logEdit() {
return log.doEditTransaction(op);
} | 3.68 |
hmily_HmilyMetaDataManager_register | /**
* Register hmily metadata.
*
* @param hmilyTacResource the hmily resource
* @param databaseType database type
*/
public static void register(final HmilyTacResource hmilyTacResource, final DatabaseType databaseType) {
DataSourceMetaData dataSourceMetaData;
try {
dataSourceMetaData = DataSourceMe... | 3.68 |
hudi_HiveHoodieTableFileIndex_listFileSlices | /**
* Lists latest file-slices (base-file along w/ delta-log files) per partition.
*
* @return mapping from string partition paths to its base/log files
*/
public Map<String, List<FileSlice>> listFileSlices() {
return getAllInputFileSlices().entrySet().stream()
.collect(Collectors.toMap(e -> e.getKey().getP... | 3.68 |
framework_VCalendarPanel_focusPreviousDay | /**
* Moves the focus backward the given number of days.
*/
private void focusPreviousDay(int days) {
focusNextDay(-days);
} | 3.68 |
flink_OutputFormatBase_postClose | /**
* Tear down the OutputFormat. This method is called at the end of {@link
* OutputFormatBase#close()}.
*/
protected void postClose() {} | 3.68 |
flink_BlobUtils_moveTempFileToStore | /**
* Moves the temporary <tt>incomingFile</tt> to its permanent location where it is available for
* use (not thread-safe!).
*
* @param incomingFile temporary file created during transfer
* @param jobId ID of the job this blob belongs to or <tt>null</tt> if job-unrelated
* @param blobKey BLOB key identifying the... | 3.68 |
hudi_TableCommand_createTable | /**
* Create a Hoodie Table if it does not exist.
*
* @param path Base Path
* @param name Hoodie Table Name
* @param tableTypeStr Hoodie Table Type
* @param payloadClass Payload Class
*/
@ShellMethod(key = "create", value = "Create a hoodie table if not present")
public String createTable(
@S... | 3.68 |
hbase_MetaTableAccessor_getDaughterRegions | /**
* Returns the daughter regions by reading the corresponding columns of the catalog table Result.
* @param data a Result object from the catalog table scan
* @return pair of RegionInfo or PairOfSameType(null, null) if region is not a split parent
*/
public static PairOfSameType<RegionInfo> getDaughterRegions(Res... | 3.68 |
flink_CatalogManager_dropTemporaryTable | /**
* Drop a temporary table in a given fully qualified path.
*
* @param objectIdentifier The fully qualified path of the table to drop.
* @param ignoreIfNotExists If false exception will be thrown if the table to be dropped does
* not exist.
*/
public void dropTemporaryTable(ObjectIdentifier objectIdentifier... | 3.68 |
hbase_IdLock_tryLockEntry | /**
* Blocks until the lock corresponding to the given id is acquired.
* @param id an arbitrary number to lock on
* @param time time to wait in ms
* @return an "entry" to pass to {@link #releaseLockEntry(Entry)} to release the lock
* @throws IOException if interrupted
*/
public Entry tryLockEntry(long id, long ... | 3.68 |
hadoop_LoggingAuditor_setLastHeader | /**
* Set that last header.
* @param lastHeader the value for the lastHeader field.
*/
private void setLastHeader(final String lastHeader) {
this.lastHeader = lastHeader;
} | 3.68 |
framework_ListDataSource_asList | /**
* Gets the list that backs this datasource. Any changes made to this list
* will be reflected in the datasource.
* <p>
* Note: The list is not the same list as passed into the data source via
* the constructor.
*
* @return Returns a list implementation that wraps the real list that backs
* the data ... | 3.68 |
hbase_ResponseConverter_buildGetServerInfoResponse | /**
* A utility to build a GetServerInfoResponse.
* @return the response
*/
public static GetServerInfoResponse buildGetServerInfoResponse(final ServerName serverName,
final int webuiPort) {
GetServerInfoResponse.Builder builder = GetServerInfoResponse.newBuilder();
ServerInfo.Builder serverInfoBuilder = Serve... | 3.68 |
hudi_InternalSchemaUtils_pruneType | /**
* Project hudi type by projected cols field_ids
* this is auxiliary function used by pruneInternalSchema.
*/
private static Type pruneType(Type type, List<Integer> fieldIds) {
switch (type.typeId()) {
case RECORD:
Types.RecordType record = (Types.RecordType) type;
List<Types.Field> fields = rec... | 3.68 |
framework_VAbstractPopupCalendar_onClick | /*
* (non-Javadoc)
*
* @see
* com.google.gwt.event.dom.client.ClickHandler#onClick(com.google.gwt.event
* .dom.client.ClickEvent)
*/
@Override
public void onClick(ClickEvent event) {
if (event.getSource() == calendarToggle && isEnabled()) {
if (open) {
closeCalendarPanel();
} else ... | 3.68 |
hbase_MiniHBaseCluster_stopMaster | /**
* Shut down the specified master cleanly
* @param serverNumber Used as index into a list.
* @param shutdownFS True is we are to shutdown the filesystem as part of this master's
* shutdown. Usually we do but you do not want to do this if you are running
* multiple maste... | 3.68 |
hbase_TableBackupClient_beginBackup | /**
* Begin the overall backup.
* @param backupInfo backup info
* @throws IOException exception
*/
protected void beginBackup(BackupManager backupManager, BackupInfo backupInfo)
throws IOException {
BackupSystemTable.snapshot(conn);
backupManager.setBackupInfo(backupInfo);
// set the start timestamp of the... | 3.68 |
framework_VaadinService_removeSessionDestroyListener | /**
* Removes a Vaadin service session destroy listener from this service.
*
* @see #addSessionDestroyListener(SessionDestroyListener)
*
* @param listener
* the vaadin service session destroy listener
* @deprecated use the {@link Registration} object returned by
* {@link #addSessionDestro... | 3.68 |
flink_AbstractBinaryExternalMerger_mergeChannelList | /**
* Merges the given sorted runs to a smaller number of sorted runs.
*
* @param channelIDs The IDs of the sorted runs that need to be merged.
* @return A list of the IDs of the merged channels.
* @throws IOException Thrown, if the readers or writers encountered an I/O problem.
*/
public List<ChannelWithMeta> me... | 3.68 |
flink_FromElementsFunction_getNumElements | /**
* Gets the number of elements produced in total by this function.
*
* @return The number of elements produced in total.
*/
public int getNumElements() {
return numElements;
} | 3.68 |
hmily_HmilyTccTransactionExecutor_preTryParticipant | /**
* this is Participant transaction preTry.
*
* @param context transaction context.
* @param point cut point
* @return TccTransaction hmily transaction
*/
public HmilyParticipant preTryParticipant(final HmilyTransactionContext context, final ProceedingJoinPoint point) {
LogUtil.debug(LOGGER, "participant ... | 3.68 |
morf_AbstractSqlDialectTest_checkDatabaseByteArrayToRecordValue | /**
* Format a value through the result set record for testing.
*
* @param value The value to format.
* @return The formatted value.
*/
private String checkDatabaseByteArrayToRecordValue(final byte[] value) throws SQLException {
ResultSet resultSet = mock(ResultSet.class);
when(resultSet.getBytes(anyInt())).t... | 3.68 |
flink_RocksDBStateBackend_getEmbeddedRocksDBStateBackend | /** @return The underlying {@link EmbeddedRocksDBStateBackend} instance. */
@VisibleForTesting
EmbeddedRocksDBStateBackend getEmbeddedRocksDBStateBackend() {
return rocksDBStateBackend;
} | 3.68 |
open-banking-gateway_Xs2aLogResolver_log | //responses
public void log(String message, Response<T> response) {
ResponseLog<T> responseLog = new ResponseLog<>();
responseLog.setStatusCode(response.getStatusCode());
responseLog.setHeaders(response.getHeaders());
responseLog.setBody(response.getBody());
if (log.isDebugEnabled()) {
log.... | 3.68 |
hmily_AbstractHmilySQLParserExecutor_generateHmilyInsertStatement | /**
* Generate Hmily insert statement.
*
* @param insertStatement insert statement
* @param hmilyInsertStatement hmily insert statement
* @return hmily insert statement
*/
public HmilyInsertStatement generateHmilyInsertStatement(final InsertStatement insertStatement, final HmilyInsertStatement hmilyInsertStatemen... | 3.68 |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.