name stringlengths 12 178 | code_snippet stringlengths 8 36.5k | score float64 3.26 3.68 |
|---|---|---|
flink_RestartPipelinedRegionFailoverStrategy_getRegionsToRestart_rdh | /**
* All 'involved' regions are proposed to be restarted. The 'involved' regions are calculated
* with rules below: 1. The region containing the failed task is always involved 2. If an input
* result partition of an involved region is not available, i.e. Missing or Corrupted, the
* region containing the partition ... | 3.26 |
flink_RestartPipelinedRegionFailoverStrategy_getTasksNeedingRestart_rdh | // task failure handling
// ------------------------------------------------------------------------
/**
* Returns a set of IDs corresponding to the set of vertices that should be restarted. In this
* strategy, all task vertices in 'involved' regions are proposed to be restarted. The
* 'involved' regions are calcula... | 3.26 |
flink_RestartPipelinedRegionFailoverStrategy_getFailoverRegion_rdh | // ------------------------------------------------------------------------
// testing
// ------------------------------------------------------------------------
/**
* Returns the failover region that contains the given execution vertex.
*
* @return the failover region that contains the given execution vertex
*/
@... | 3.26 |
flink_WindowMapState_put_rdh | /**
* Associates a new value with the given key.
*
* @param key
* The key of the mapping
* @param value
* The new value of the mapping
* @throws Exception
* Thrown if the system cannot access the state.
*/
public void put(W window, RowData key, UV value) throws Exception {
windowState.setCurrentNames... | 3.26 |
flink_WindowMapState_contains_rdh | /**
* Returns whether there exists the given mapping.
*
* @param key
* The key of the mapping
* @return True if there exists a mapping whose key equals to the given key
* @throws Exception
* Thrown if the system cannot access the state.
*/ public boolean contains(W window, RowData key) throws Exception {
... | 3.26 |
flink_WindowMapState_remove_rdh | /**
* Deletes the mapping of the given key.
*
* @param key
* The key of the mapping
* @throws Exception
* Thrown if the system cannot access the state.
*/
public void remove(W window, RowData key) throws Exception {
windowState.setCurrentNamespace(window);
windowState.remove(key);
} | 3.26 |
flink_WindowMapState_putAll_rdh | /**
* Copies all of the mappings from the given map into the state.
*
* @param map
* The mappings to be stored in this state
* @throws Exception
* Thrown if the system cannot access the state.
*/
public void putAll(W window, Map<RowData, UV> map) throws Exception {
windowState.setCurrentNamespace(window)... | 3.26 |
flink_WindowMapState_get_rdh | /**
* Returns the current value associated with the given key.
*
* @param key
* The key of the mapping
* @return The value of the mapping with the given key
* @throws Exception
* Thrown if the system cannot access the state.
*/
public UV get(W window, RowData key) throws
Exception {windowState.setCurrentNa... | 3.26 |
flink_WindowMapState_entries_rdh | /**
* Returns all the mappings in the state.
*
* @return An iterable view of all the key-value pairs in the state.
* @throws Exception
* Thrown if the system cannot access the state.
*/
public Iterable<Map.Entry<RowData, UV>> entries(W window) throws Exception {
windowState.setCurrentNamespace(window);
... | 3.26 |
flink_WindowMapState_values_rdh | /**
* Returns all the values in the state.
*
* @return An iterable view of all the values in the state.
* @throws Exception
* Thrown if the system cannot access the state.
*/
public Iterable<UV> values(W window) throws Exception {
windowState.setCurrentNamespace(window);
return windowState.values();
} | 3.26 |
flink_WindowMapState_iterator_rdh | /**
* Iterates over all the mappings in the state.
*
* @return An iterator over all the mappings in the state
* @throws Exception
* Thrown if the system cannot access the state.
*/
public Iterator<Map.Entry<RowData, UV>> iterator(W window) throws Exception {windowState.setCurrentNamespace(window);
return wi... | 3.26 |
flink_WindowMapState_keys_rdh | /**
* Returns all the keys in the state.
*
* @return An iterable view of all the keys in the state.
* @throws Exception
* Thrown if the system cannot access the state.
*/
public Iterable<RowData> keys(W window) throws Exception {
windowState.setCurrentNamespace(window);
return windowState.keys();} | 3.26 |
flink_WindowMapState_isEmpty_rdh | /**
* Returns true if this state contains no key-value mappings, otherwise false.
*
* @return True if this state contains no key-value mappings, otherwise false.
* @throws Exception
* Thrown if the system cannot access the state.
*/
public boolean isEmpty(W window) throws Exception
{
windowState.setCurrentN... | 3.26 |
flink_MainThreadValidatorUtil_isRunningInExpectedThread_rdh | /**
* Returns true iff the current thread is equals to the provided expected thread and logs
* violations.
*
* @param expected
* the expected main thread.
* @return true iff the current thread is equals to the provided expected thread.
*/
public static boolean
isRunningInExpectedThread(@Nullable
Thread expecte... | 3.26 |
flink_FileSystemSafetyNet_wrapWithSafetyNetWhenActivated_rdh | // ------------------------------------------------------------------------
// Utilities
// ------------------------------------------------------------------------
static FileSystem wrapWithSafetyNetWhenActivated(FileSystem fs) {
SafetyNetCloseableRegistry reg = REGISTRIES.get();
return reg != null ? new Safet... | 3.26 |
flink_FileSystemSafetyNet_closeSafetyNetAndGuardedResourcesForThread_rdh | /**
* Closes the safety net for a thread. This closes all remaining unclosed streams that were
* opened by safety-net-guarded file systems. After this method was called, no streams can be
* opened any more from any FileSystem instance that was obtained while the thread was guarded
* by the safety net.
*
* <p>This... | 3.26 |
flink_FileSystemSafetyNet_initializeSafetyNetForThread_rdh | // ------------------------------------------------------------------------
// Activating / Deactivating
// ------------------------------------------------------------------------
/**
* Activates the safety net for a thread. {@link FileSystem} instances obtained by the thread
* that called this method will be guarde... | 3.26 |
flink_AsyncSinkWriterStateSerializer_serialize_rdh | /**
* Serializes state in form of
* [DATA_IDENTIFIER,NUM_OF_ELEMENTS,SIZE1,REQUEST1,SIZE2,REQUEST2....].
*/
@Override
public byte[] serialize(BufferedRequestState<RequestEntryT> obj) throws IOException {
Collection<RequestEntryWrapper<RequestEntryT>> bufferState = obj.getBufferedRequ... | 3.26 |
flink_KubernetesStateHandleStore_releaseAndTryRemove_rdh | /**
* Remove the key in state config map. As well as the state on external storage will be removed.
* It returns the {@link RetrievableStateHandle} stored under the given state node if any.
*
* @param key
* Key to be removed from ConfigMap
* @return True if the state handle isn't listed anymore.
* @throws Exce... | 3.26 |
flink_KubernetesStateHandleStore_addEntry_rdh | /**
* Adds entry into the ConfigMap. If the entry already exists and contains delete marker, we try
* to finish the removal before the actual update.
*/private Optional<KubernetesConfigMap> addEntry(KubernetesConfigMap configMap, String key, byte[] serializedStateHandle) throws Exception {
final
String oldBase64Cont... | 3.26 |
flink_KubernetesStateHandleStore_replaceEntry_rdh | /**
* Replace the entry in the ConfigMap. If the entry already exists and contains delete marker,
* we treat it as non-existent and perform the best effort removal.
*/
private Optional<KubernetesConfigMap> replaceEntry(KubernetesConfigMap configMap, String key, byte[] serializedStateHandle, AtomicReference<Retriev... | 3.26 |
flink_KubernetesStateHandleStore_addAndLock_rdh | /**
* Creates a state handle, stores it in ConfigMap. We could guarantee that only the leader could
* update the ConfigMap. Since “Get(check the leader)-and-Update(write back to the ConfigMap)”
* is a transactional operation.
*
* @param key
* Key in ConfigMap
* @param state
* State to be added
* @throws Al... | 3.26 |
flink_KubernetesStateHandleStore_getAllAndLock_rdh | /**
* Gets all available state handles from Kubernetes.
*
* @return All state handles from ConfigMap.
*/
@Override
public List<Tuple2<RetrievableStateHandle<T>, String>> getAllAndLock() {
return kubeClient.getConfigMap(configMapName).map(configMap -> {
final List<Tuple2<RetrievableStateHandle<T>, Strin... | 3.26 |
flink_KubernetesStateHandleStore_getAndLock_rdh | /**
* Gets the {@link RetrievableStateHandle} stored in the given ConfigMap.
*
* @param key
* Key in ConfigMap
* @return The retrieved state handle from the specified ConfigMap and key
* @throws IOException
* if the method failed to deserialize the stored state handle
* @throws NotExistException
* when t... | 3.26 |
flink_KubernetesStateHandleStore_replace_rdh | /**
* Replaces a state handle in ConfigMap and discards the old state handle. Wo do not lock
* resource version and then replace in Kubernetes. Since the ConfigMap is periodically updated
* by leader, the resource version changes very fast. We use a "check-existence and update"
* transactional operation instead.
*... | 3.26 |
flink_KubernetesStateHandleStore_clearEntries_rdh | /**
* Remove all the filtered keys in the ConfigMap.
*
* @throws Exception
* when removing the keys failed
*/
@Override
public void clearEntries() throws Exception {
updateConfigMap(configMap -> {
configMap.getData().keySet().removeIf(configMapKeyFilter);
return Optional.of(configMap);
})... | 3.26 |
flink_KubernetesStateHandleStore_getAllHandles_rdh | /**
* Return a list of all valid keys for state handles.
*
* @return List of valid state handle keys in Kubernetes ConfigMap
* @throws Exception
* if get state handle names from ConfigMap failed.
*/
@Override
public Collection<String> getAllHandles() throws Exception {
return kubeClient.getConfigMap(confi... | 3.26 |
flink_KubernetesStateHandleStore_exists_rdh | /**
* Returns the resource version of the ConfigMap.
*
* @param key
* Key in ConfigMap
* @return resource version in {@link StringResourceVersion} format.
* @throws Exception
* if the check existence operation failed
*/
@Override
public StringResourceVersion exists(String key) throws Exception {
checkNo... | 3.26 |
flink_RestClusterClientConfiguration_getRetryMaxAttempts_rdh | /**
*
* @see RestOptions#RETRY_MAX_ATTEMPTS
*/public int getRetryMaxAttempts() {
return retryMaxAttempts;
} | 3.26 |
flink_RestClusterClientConfiguration_getAwaitLeaderTimeout_rdh | /**
*
* @see RestOptions#AWAIT_LEADER_TIMEOUT
*/
public long getAwaitLeaderTimeout() {
return awaitLeaderTimeout;
} | 3.26 |
flink_RestClusterClientConfiguration_getRetryDelay_rdh | /**
*
* @see RestOptions#RETRY_DELAY
*/
public long getRetryDelay() {
return f0;
} | 3.26 |
flink_TaskSlotTableImpl_notifyTimeout_rdh | // ---------------------------------------------------------------------
// TimeoutListener methods
// ---------------------------------------------------------------------
@Override
public void notifyTimeout(AllocationID key, UUID ticket) {
checkStarted();
if
(f0 != null) {
f0.timeoutSlot(key, tic... | 3.26 |
flink_TaskSlotTableImpl_getTaskSlot_rdh | // ---------------------------------------------------------------------
// Internal methods
// ---------------------------------------------------------------------
@Nullable
private TaskSlot<T> getTaskSlot(AllocationID allocationId) {
Preconditions.checkNotNull(allocationId);
return allocatedSlots.get(alloca... | 3.26 |
flink_TaskSlotTableImpl_createSlotReport_rdh | // ---------------------------------------------------------------------
// Slot report methods
// ---------------------------------------------------------------------
@Overridepublic SlotReport createSlotReport(ResourceID resourceId) {
List<SlotStatus> slotStatuses = new ArrayList<>();
for (int i = 0; i < n... | 3.26 |
flink_TaskSlotTableImpl_addTask_rdh | // ---------------------------------------------------------------------
// Task methods
// ---------------------------------------------------------------------
@Override
public boolean addTask(T task) throws SlotNotFoundException, SlotNotActiveException {
checkRunning();
Preconditions.checkNotNull(task);
... | 3.26 |
flink_DriverStrategy_getDriverClass_rdh | // --------------------------------------------------------------------------------------------
public Class<? extends Driver<?, ?>> getDriverClass() {
return this.driverClass;
} | 3.26 |
flink_CliTableResultView_getRow_rdh | // --------------------------------------------------------------------------------------------
@Override
protected String[] getRow(String[] resultRow) {
return resultRow;
} | 3.26 |
flink_CliTableResultView_updatePage_rdh | // --------------------------------------------------------------------------------------------
private void updatePage() {
// retrieve page
final int v13 = (page == LAST_PAGE) ? pageCount : page;
final List<RowData> rows;
try {
rows = materializedResult.retrievePage(v13);
} catch (SqlExecu... | 3.26 |
flink_StatsSummarySnapshot_getAverage_rdh | /**
* Calculates the average over all seen values.
*
* @return Average over all seen values.
*/public long getAverage() {if (count == 0) {
return 0;
} else {
return sum / count;
}
} | 3.26 |
flink_StatsSummarySnapshot_getQuantile_rdh | /**
* Returns the value for the given quantile based on the represented histogram statistics or
* {@link Double#NaN} if the histogram was not built.
*
* @param quantile
* Quantile to calculate the value for
* @return Value for the given quantile
*/
public double getQuantile(double quantile) {
return histog... | 3.26 |
flink_StatsSummarySnapshot_getMinimum_rdh | /**
* Returns the minimum seen value.
*
* @return The current minimum value.
*/
public long getMinimum() {
return min;
} | 3.26 |
flink_ArrowFieldWriter_reset_rdh | /**
* Resets the state of the writer to write the next batch of fields.
*/
public void reset() {
valueVector.reset();
count =
0;
} | 3.26 |
flink_ArrowFieldWriter_getCount_rdh | /**
* Returns the current count of elements written.
*/
public int getCount() {return count;
} | 3.26 |
flink_ArrowFieldWriter_write_rdh | /**
* Writes the specified ordinal of the specified row.
*/
public void write(IN row, int ordinal) {
doWrite(row, ordinal);
count += 1;
} | 3.26 |
flink_ArrowFieldWriter_finish_rdh | /**
* Finishes the writing of the current row batch.
*/
public void finish() {
valueVector.setValueCount(count);
} | 3.26 |
flink_EventId_snapshotConfiguration_rdh | // -----------------------------------------------------------------------------------
@Override
public TypeSerializerSnapshot<EventId>
snapshotConfiguration() {
return new EventIdSerializerSnapshot();
} | 3.26 |
flink_PartitionTable_startTrackingPartitions_rdh | /**
* Starts the tracking of the given partition for the given key.
*/
public void startTrackingPartitions(K key, Collection<ResultPartitionID> newPartitionIds) {
Preconditions.checkNotNull(key);
Preconditions.checkNotNull(newPartitionIds);
if (newPartitionIds.isEmpty()) {
return;
}
track... | 3.26 |
flink_PartitionTable_hasTrackedPartitions_rdh | /**
* Returns whether any partitions are being tracked for the given key.
*/
public boolean hasTrackedPartitions(K key) {
return trackedPartitionsPerKey.containsKey(key);
} | 3.26 |
flink_PartitionTable_stopTrackingPartitions_rdh | /**
* Stops the tracking of the given set of partitions for the given key.
*/
public void stopTrackingPartitions(K key, Collection<ResultPartitionID> partitionIds) {
Preconditions.checkNotNull(key);
Preconditions.checkNotNull(partitionIds);
// If the key is unknown we do not fail here, in line with
... | 3.26 |
flink_StreamingJoinOperator_processElement_rdh | /**
* Process an input element and output incremental joined records, retraction messages will be
* sent in some scenarios.
*
* <p>Following is the pseudo code to describe the core logic of this method. The logic of this
* method is too complex, so we provide the pseudo code to help understand the logic. We should... | 3.26 |
flink_StreamingJoinOperator_output_rdh | // -------------------------------------------------------------------------------------
private void output(RowData inputRow, RowData otherRow, boolean inputIsLeft) {
if (inputIsLeft) {
outRow.replace(inputRow, otherRow);
} else
{
outRow.replace(otherRow, inputRow);
}col... | 3.26 |
flink_OutputFormatProvider_of_rdh | /**
* Helper method for creating a static provider with a provided sink parallelism.
*/
static OutputFormatProvider of(OutputFormat<RowData> outputFormat, Integer sinkParallelism)
{
return new OutputFormatProvider() {
@Override
public OutputFormat<RowData> createOutputFormat() {
return... | 3.26 |
flink_LinkedOptionalMap_unwrapOptionals_rdh | /**
* Assuming all the entries of this map are present (keys and values) this method would return a
* map with these key and values, stripped from their Optional wrappers. NOTE: please note that
* if any of the key or values are absent this method would throw an {@link IllegalStateExc... | 3.26 |
flink_LinkedOptionalMap_isOrderedSubset_rdh | /**
* Returns {@code true} if keyNames present at @left, appearing in prefix order at @right.
*/
public boolean isOrderedSubset() {
return isOrderedSubset;
} | 3.26 |
flink_LinkedOptionalMap_absentKeysOrValues_rdh | /**
* Returns the key names of any keys or values that are absent.
*/
public Set<String> absentKeysOrValues()
{
return underlyingMap.entrySet().stream().filter(LinkedOptionalMap::keyOrValueIsAbsent).map(Entry::getKey).collect(Collectors.toCollection(LinkedHashSet::new));
} | 3.26 |
flink_LinkedOptionalMap_keyNames_rdh | /**
* Returns the key names added to this map.
*/
public Set<String> keyNames() {
return underlyingMap.keySet();
} | 3.26 |
flink_LinkedOptionalMap_mergeRightIntoLeft_rdh | /**
* Tries to merges the keys and the values of @right into @left.
*/
public static <K, V> MergeResult<K, V> mergeRightIntoLeft(LinkedOptionalMap<K, V> left, LinkedOptionalMap<K, V> right) {
LinkedOptionalMap<K, V> merged = new LinkedOptionalMap<>(left);
merged.putAll(right);
return new MergeResult<>(mer... | 3.26 |
flink_LinkedOptionalMap_size_rdh | // --------------------------------------------------------------------------------------------------------
// API
// --------------------------------------------------------------------------------------------------------
public int size() {
return underlyingMap.size();
} | 3.26 |
flink_LinkedOptionalMap_hasAbsentKeysOrValues_rdh | /**
* Checks whether there are entries with absent keys or values.
*/
public boolean hasAbsentKeysOrValues() {
for (Entry<String, KeyValue<K,
V>> entry : underlyingMap.entrySet()) {
if (keyOrValueIsAbsent(entry)) {
return true;
}
}
return false;
} | 3.26 |
flink_LogicalTableScan_create_rdh | // END FLINK MODIFICATION
/**
* Creates a LogicalTableScan.
*
* @param cluster
* Cluster
* @param relOptTable
* Table
* @param hints
* The hints
*/
public static LogicalTableScan create(RelOptCluster cluster, final RelOptTable relOptTable, List<RelHint> hints) {
final Table table = relOptTable.unwra... | 3.26 |
flink_LogicalTableScan_explainTerms_rdh | // BEGIN FLINK MODIFICATION
// {@link #explainTerms} method should consider hints due to CALCITE-4581.
// This file should be remove once CALCITE-4581 is fixed.
@Override
public RelWriter explainTerms(RelWriter
pw) {
return super.explainTerms(pw).itemIf("hints", getHints(), !getHints().isEmpty());
} | 3.26 |
flink_ChannelReaderInputView_close_rdh | /**
* Closes this InputView, closing the underlying reader and returning all memory segments.
*
* @return A list containing all memory segments originally supplied to this view.
* @throws IOException
* Thrown, if the underlying reader could not be properly closed.
*/
@Override
public List<MemorySegment> close()... | 3.26 |
flink_ChannelReaderInputView_nextSegment_rdh | // --------------------------------------------------------------------------------------------
// Utilities
// --------------------------------------------------------------------------------------------
/**
* Gets the next segment from the asynchronous block reader. If more requests are to be issued,
* the method f... | 3.26 |
flink_ChannelReaderInputView_sendReadRequest_rdh | /**
* Sends a new read requests, if further requests remain. Otherwise, this method adds the
* segment directly to the readers return queue.
*
* @param seg
* The segment to use for the read request.
* @throws IOException
* Thrown, if the reader is in error.
*/
protected void sendReadRequest(MemorySegment se... | 3.26 |
flink_SpecificInputTypeStrategies_windowTimeIndicator_rdh | /**
* See {@link WindowTimeIndictorInputTypeStrategy}.
*/
public static InputTypeStrategy windowTimeIndicator() {
return new WindowTimeIndictorInputTypeStrategy(null);
} | 3.26 |
flink_LocalInputPreferredSlotSharingStrategy_getExecutionVertices_rdh | /**
* The vertices are topologically sorted since {@link DefaultExecutionTopology#getVertices}
* are topologically sorted.
*/
private LinkedHashMap<JobVertexID, List<SchedulingExecutionVertex>> getExecutionVertices() {
final LinkedHashMap<JobVertexID, List<SchedulingExecutionVertex>> vertices = new LinkedHashMap... | 3.26 |
flink_LocalInputPreferredSlotSharingStrategy_build_rdh | /**
* Build ExecutionSlotSharingGroups for all vertices in the topology. The
* ExecutionSlotSharingGroup of a vertex is determined in order below:
*
* <p>1. try finding an existing group of the corresponding co-location constraint.
*
* <p>2. try finding an available group of its producer vertex if the producer is... | 3.26 |
flink_CompactCoordinator_coordinate_rdh | /**
* Do stable compaction coordination.
*/
private void coordinate(long checkpointId, Map<String, List<Path>> partFiles) {
Function<Path, Long> sizeFunc = path -> {
try {
return fileSystem.getFileStatus(path).getLen();
} catch
(IOException e) {
throw new Unchecke... | 3.26 |
flink_SplitsAssignment_assignment_rdh | /**
*
* @return A mapping from subtask ID to their split assignment.
*/
public Map<Integer, List<SplitT>> assignment() {
return assignment;
} | 3.26 |
flink_StateMetaInfoSnapshotReadersWriters_getWriter_rdh | /**
* Returns the writer for {@link StateMetaInfoSnapshot}.
*/
@Nonnull
public static StateMetaInfoWriter getWriter() {
return CurrentWriterImpl.INSTANCE;
} | 3.26 |
flink_StateMetaInfoSnapshotReadersWriters_getReader_rdh | /**
* Returns a reader for {@link StateMetaInfoSnapshot} with the requested state type and version
* number.
*
* @param readVersion
* the format version to read.
* @return the requested reader.
*/
@Nonnull
public static StateMetaIn... | 3.26 |
flink_LogicalTypeCasts_supportsAvoidingCast_rdh | /**
* Returns whether the source type can be safely interpreted as the target type. This allows
* avoiding casts by ignoring some logical properties. This is basically a relaxed {@link LogicalType#equals(Object)}.
*
* <p>In particular this means:
*
* <p>Atomic, non-string types (INT, BOOLEAN, ...) and user-define... | 3.26 |
flink_LogicalTypeCasts_supportsReinterpretCast_rdh | /**
* Returns whether the source type can be reinterpreted as the target type.
*
* <p>Reinterpret casts correspond to the SQL reinterpret_cast and represent the logic behind a
* {@code REINTERPRET_CAST(sourceType AS targetType)} operation.
*/
public static boolean supportsReinterpretCast(LogicalType sourceType, Lo... | 3.26 |
flink_LogicalTypeCasts_supportsExplicitCast_rdh | /**
* Returns whether the source type can be casted to the target type.
*
* <p>Explicit casts correspond to the SQL cast specification and represent the logic behind a
* {@code CAST(sourceType AS targetType)} operation. For example, it allows for converting most
* types of the {@link LogicalTypeFamily#PREDEFINED} ... | 3.26 |
flink_LogicalTypeCasts_supportsCasting_rdh | // --------------------------------------------------------------------------------------------
private static boolean supportsCasting(LogicalType sourceType,
LogicalType targetType, boolean allowExplicit) {
// a NOT NULL type cannot store a NULL type
... | 3.26 |
flink_PlanReference_fromJsonString_rdh | /**
* Create a reference starting from a JSON string.
*/
public static PlanReference fromJsonString(String jsonString) {
Objects.requireNonNull(jsonString, "Json string cannot be null");
return new ContentPlanReference(jsonString);
} | 3.26 |
flink_PlanReference_fromResource_rdh | /**
* Create a reference from a file in the classpath.
*/
public static PlanReference fromResource(ClassLoader classLoader, String resourcePath) { Objects.requireNonNull(classLoader, "ClassLoader cannot be null");
Objects.requireNonNull(resourcePath, "Resource path cannot be null"); return new ResourcePlanReferen... | 3.26 |
flink_PlanReference_fromFile_rdh | /**
* Create a reference starting from a file path.
*/
public static PlanReference fromFile(File file) {
Objects.requireNonNull(file, "File cannot be null");
return new FilePlanReference(file);
} | 3.26 |
flink_OggJsonFormatFactory_validateDecodingFormatOptions_rdh | /**
* Validator for ogg decoding format.
*/ private static void validateDecodingFormatOptions(ReadableConfig tableOptions) {
JsonFormatOptionsUtil.validateDecodingFormatOptions(tableOptions);
} | 3.26 |
flink_OggJsonFormatFactory_validateEncodingFormatOptions_rdh | /**
* Validator for ogg encoding format.
*/
private static void validateEncodingFormatOptions(ReadableConfig tableOptions) {
JsonFormatOptionsUtil.validateEncodingFormatOptions(tableOptions);
} | 3.26 |
flink_RowTimeMiniBatchAssginerOperator_getMiniBatchStart_rdh | // ------------------------------------------------------------------------
// Utilities
// ------------------------------------------------------------------------
/**
* Method to get the mini-batch start for a watermark.
*/
private static long getMiniBatchStart(long watermark, long interval) {
return watermark ... | 3.26 |
flink_ResourceManagerServiceImpl_startNewLeaderResourceManager_rdh | // ------------------------------------------------------------------------
// Internal
// ------------------------------------------------------------------------
@GuardedBy("lock")
private void startNewLeaderResourceManager(UUID newLeaderSessionID) throws Exception {
stopLeaderResourceMana... | 3.26 |
flink_ResourceManagerServiceImpl_startResourceManagerIfIsLeader_rdh | /**
* Returns a future that completes as {@code true} if the resource manager is still leader and
* started, and {@code false} if it's no longer leader.
*/
@GuardedBy("lock")
private CompletableFuture<Boolean> startResourceManagerIfIsLeader(ResourceManager<?> resourceManager) {
if
(isLeader(resourceManager)) {
... | 3.26 |
flink_ResourceManagerServiceImpl_start_rdh | // ------------------------------------------------------------------------
// ResourceManagerService
// ------------------------------------------------------------------------
@Override
public void start() throws Exception {
synchronized(lock) {
if (running) {
LOG.debug("Resource manager serv... | 3.26 |
flink_ResourceManagerServiceImpl_grantLeadership_rdh | // ------------------------------------------------------------------------
// LeaderContender
// ------------------------------------------------------------------------
@Override
public void grantLeadership(UUID newLeaderSessionID) {
handleLeaderEventExecutor.execute(() -> {
synchronized(lock) {
... | 3.26 |
flink_PriorityQueueSetFactory_m0_rdh | /**
* Creates a {@link KeyGroupedInternalPriorityQueue}.
*
* @param stateName
* unique name for associated with this queue.
* @param byteOrderedElementSerializer
* a serializer that with a format that is lexicographically
* ordered in alignment with elementPriorityComparator.
* @param allowFutureMetadataU... | 3.26 |
flink_ExecutorNotifier_notifyReadyAsync_rdh | /**
* Call the given callable once. Notify the {@link #executorToNotify} to execute the handler.
*
* <p>Note that when this method is invoked multiple times, it is possible that multiple
* callables are executed concurrently, so do the handlers. For example, assuming both the
* workerExecut... | 3.26 |
flink_RequestedLocalProperties_reset_rdh | /**
* This method resets the local properties to a state where no properties are given.
*/
public void reset() {
this.ordering = null;
this.groupedFields = null;
} | 3.26 |
flink_RequestedLocalProperties_getGroupedFields_rdh | /**
* Gets the grouped fields.
*
* @return The grouped fields, or <code>null</code> if nothing is grouped.
*/
public FieldSet getGroupedFields() {
return this.groupedFields;
} | 3.26 |
flink_RequestedLocalProperties_hashCode_rdh | // --------------------------------------------------------------------------------------------
@Override
public int hashCode() {
final int prime = 31;
int v10 = 1;
v10 = (prime * v10) + (this.ordering == null ? 0 : this.ordering.hashCode());
v10 = (prime * v10) + (this.groupedFields == null ? 0 : th... | 3.26 |
flink_RequestedLocalProperties_getOrdering_rdh | // --------------------------------------------------------------------------------------------
/**
* Gets the key order.
*
* @return The key order, or <code>null</code> if nothing is ordered.
*/
public Ordering getOrdering() {
return ordering;
} | 3.26 |
flink_RequestedLocalProperties_isTrivial_rdh | /**
* Checks, if the properties in this object are trivial, i.e. only standard values.
*/
public boolean isTrivial() {return (ordering == null) && (this.groupedFields == null);
} | 3.26 |
flink_RequestedLocalProperties_filterBySemanticProperties_rdh | // --------------------------------------------------------------------------------------------
/**
* Filters these properties by what can be preserved by the given SemanticProperties when
* propagated down to the given input.
*
* @param props
* The SemanticProperties which define whi... | 3.26 |
flink_RequestedLocalProperties_setOrdering_rdh | /**
* Sets the order for these interesting local properties.
*
* @param ordering
* The order to set.
*/
public void setOrdering(Ordering ordering) {
this.ordering = ordering;
} | 3.26 |
flink_RequestedLocalProperties_isMetBy_rdh | /**
* Checks, if this set of properties, as interesting properties, is met by the given properties.
*
* @param other
* The properties for which to check whether they meet these properties.
* @return True, if the properties are met, false otherwise.
*/public boolean isMetBy(LocalProperties other) {
if (this.... | 3.26 |
flink_RequestedLocalProperties_m0_rdh | /**
* Sets the fields that are grouped in these data properties.
*
* @param groupedFields
* The fields that are grouped in these data properties.
*/
public void m0(FieldSet groupedFields) {
this.groupedFields = groupedFields;
} | 3.26 |
flink_ExponentialDelayRestartBackoffTimeStrategy_calculateJitterBackoffMS_rdh | /**
* Calculate jitter offset to avoid thundering herd scenario. The offset range increases with
* the number of restarts.
*
* <p>F.e. for backoff time 8 with jitter 0.25, it generates random number in range [-2, 2].
*
* @return random value in interval [-n,... | 3.26 |
flink_SqlConstraintEnforcement_symbol_rdh | /**
* Creates a parse-tree node representing an occurrence of this keyword at a particular position
* in the parsed text.
*/
public SqlLiteral symbol(SqlParserPos pos) {
return SqlLiteral.createSymbol(this, pos);
} | 3.26 |
flink_TimeIndicatorTypeInfo_createSerializer_rdh | // this replaces the effective serializer by a LongSerializer
// it is a hacky but efficient solution to keep the object creation overhead low but still
// be compatible with the corresponding SqlTimestampTypeInfo
@Override
@SuppressWarnings("unchecked") public TypeSerializer<Timestamp> createSerializer(ExecutionConfig... | 3.26 |
flink_SingleElementIterator_set_rdh | /**
* Resets the element. After this call, the iterator has one element available, which is the
* given element.
*
* @param current
* The element to make available to the iterator.
*/
public void set(E current) {
this.current = current;
this.available = true;
} | 3.26 |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.