name
stringlengths 12
178
| code_snippet
stringlengths 8
36.5k
| score
float64 3.26
3.68
|
|---|---|---|
flink_SlotID_getDynamicSlotID_rdh
|
/**
* Get a SlotID without actual slot index for dynamic slot allocation.
*/
public static SlotID getDynamicSlotID(ResourceID resourceID) {
return new SlotID(resourceID);
}
| 3.26
|
flink_SlotID_getResourceID_rdh
|
// ------------------------------------------------------------------------
@Override
public ResourceID getResourceID() {
return resourceId;}
| 3.26
|
flink_SlotID_equals_rdh
|
// ------------------------------------------------------------------------
@Override
public boolean equals(Object o) {
if (this == o) {
return true;
}
if ((o == null) || (getClass() != o.getClass())) {
return false;
}
SlotID slotID = ((SlotID) (o));
return (slotNumber == slotID.slotNumber) && resourceId.equals(slotID.resourceId);
}
| 3.26
|
flink_GenericValueMetricGroup_putVariables_rdh
|
// ------------------------------------------------------------------------
@Override
protected void putVariables(Map<String, String> variables) {
variables.put(ScopeFormat.asVariable(this.key), value);
}
| 3.26
|
flink_SqlNodeConverter_supportedSqlKinds_rdh
|
/**
* Returns the {@link SqlKind SqlKinds} of {@link SqlNode SqlNodes} that the {@link SqlNodeConverter} supports to convert.
*
* <p>If a {@link SqlNodeConverter} returns a non-empty SqlKinds, The conversion framework will
* find the corresponding converter by matching the SqlKind of SqlNode instead of the class of
* SqlNode
*
* @see SqlQueryConverter
*/
default Optional<EnumSet<SqlKind>> supportedSqlKinds() {
return Optional.empty();
}
| 3.26
|
flink_PartitionSpec_getFieldIndices_rdh
|
/**
* Gets field index of all fields in input.
*/
@JsonIgnore
public int[] getFieldIndices() {
return fields;
}
| 3.26
|
flink_FunctionIdentifier_normalizeName_rdh
|
/**
* Normalize a function name.
*/
public static String normalizeName(String name) {
return name.toLowerCase(); }
| 3.26
|
flink_FunctionIdentifier_asSummaryString_rdh
|
/**
* Returns a string that summarizes this instance for printing to a console or log.
*/
public String asSummaryString() {
if (objectIdentifier != null) {
return String.join(".", objectIdentifier.getCatalogName(), objectIdentifier.getDatabaseName(), objectIdentifier.getObjectName());
} else {return functionName;
}
}
| 3.26
|
flink_FunctionIdentifier_toList_rdh
|
/**
* List of the component names of this function identifier.
*/
public List<String> toList() {
if (objectIdentifier != null) {
return objectIdentifier.toList();} else if (functionName != null)
{
return Collections.singletonList(functionName);
} else { throw new IllegalStateException("functionName and objectIdentifier are both null which should never happen.");
}
}
| 3.26
|
flink_FunctionIdentifier_normalizeObjectIdentifier_rdh
|
/**
* Normalize an object identifier by only normalizing the function name.
*/
public static ObjectIdentifier normalizeObjectIdentifier(ObjectIdentifier oi) {
return ObjectIdentifier.of(oi.getCatalogName(), oi.getDatabaseName(), normalizeName(oi.getObjectName()));
}
| 3.26
|
flink_RefCountedFileWithStream_newFile_rdh
|
// ------------------------------ Factory methods for initializing a temporary file
// ------------------------------
public static RefCountedFileWithStream newFile(final File file, final OutputStream currentOut) throws IOException {
return new RefCountedFileWithStream(file, currentOut, 0L);
}
| 3.26
|
flink_SplitEnumerator_notifyCheckpointComplete_rdh
|
/**
* We have an empty default implementation here because most source readers do not have to
* implement the method.
*
* @see CheckpointListener#notifyCheckpointComplete(long)
*/
@Override
default void notifyCheckpointComplete(long checkpointId) throws Exception {
}
| 3.26
|
flink_SplitEnumerator_handleSourceEvent_rdh
|
/**
* Handles a custom source event from the source reader.
*
* <p>This method has a default implementation that does nothing, because it is only required to
* be implemented by some sources, which have a custom event protocol between reader and
* enumerator. The common events for reader registration and split requests are not dispatched
* to this method, but rather invoke the {@link #addReader(int)} and {@link #handleSplitRequest(int, String)} methods.
*
* @param subtaskId
* the subtask id of the source reader who sent the source event.
* @param sourceEvent
* the source event from the source reader.
*/
default void handleSourceEvent(int subtaskId, SourceEvent sourceEvent) {
}
| 3.26
|
flink_TGetQueryIdResp_isSet_rdh
|
/**
* Returns true if field corresponding to fieldID is set (has been assigned a value) and false
* otherwise
*/
public boolean isSet(_Fields field) {
if (field == null) {
throw new IllegalArgumentException();
}
switch (field) {
case QUERY_ID :
return isSetQueryId();
}
throw new
IllegalStateException();
}
| 3.26
|
flink_TGetQueryIdResp_findByThriftId_rdh
|
/**
* Find the _Fields constant that matches fieldId, or null if its not found.
*/
public static _Fields findByThriftId(int fieldId) {
switch (fieldId) {
case 1 :
// QUERY_ID
return QUERY_ID;default :return null;
}
}
| 3.26
|
flink_TGetQueryIdResp_findByThriftIdOrThrow_rdh
|
/**
* Find the _Fields constant that matches fieldId, throwing an exception if it is not found.
*/
public static _Fields findByThriftIdOrThrow(int fieldId) {
_Fields fields = findByThriftId(fieldId);
if (fields == null)
throw new IllegalArgumentException(("Field " + fieldId) + " doesn't exist!");
return fields;
}
| 3.26
|
flink_TGetQueryIdResp_isSetQueryId_rdh
|
/**
* Returns true if field queryId is set (has been assigned a value) and false otherwise
*/
public boolean isSetQueryId() {
return this.queryId != null;
}
| 3.26
|
flink_TGetQueryIdResp_findByName_rdh
|
/**
* Find the _Fields constant that matches name, or null if its not found.
*/
public static _Fields findByName(String name) {
return byName.get(name);
}
| 3.26
|
flink_RequestJobsOverview_hashCode_rdh
|
// ------------------------------------------------------------------------
@Override
public int hashCode() {
return RequestJobsOverview.class.hashCode();
}
| 3.26
|
flink_RequestJobsOverview_readResolve_rdh
|
/**
* Preserve the singleton property by returning the singleton instance
*/
private Object readResolve() {
return INSTANCE;
}
| 3.26
|
flink_GenericInputSplit_getSplitNumber_rdh
|
// --------------------------------------------------------------------------------------------
@Override
public int getSplitNumber() {
return this.partitionNumber;
}
| 3.26
|
flink_GenericInputSplit_hashCode_rdh
|
// --------------------------------------------------------------------------------------------
@Override
public int hashCode() {
return this.partitionNumber ^ this.totalNumberOfPartitions;
}
| 3.26
|
flink_LocalInputChannel_checkpointStarted_rdh
|
// ------------------------------------------------------------------------
// Consume
// ------------------------------------------------------------------------
public void checkpointStarted(CheckpointBarrier barrier) throws CheckpointException {
channelStatePersister.startPersisting(barrier.getId(), Collections.emptyList());
}
| 3.26
|
flink_LocalInputChannel_releaseAllResources_rdh
|
/**
* Releases the partition reader.
*/
@Override
void releaseAllResources() throws IOException {
if (!isReleased) {
isReleased = true;
ResultSubpartitionView view = subpartitionView;
if (view != null) {
view.releaseAllResources();
subpartitionView = null;
}
}
}
| 3.26
|
flink_LocalInputChannel_getSubpartitionView_rdh
|
// ------------------------------------------------------------------------
@VisibleForTesting
ResultSubpartitionView getSubpartitionView() {
return subpartitionView;
}
| 3.26
|
flink_LocalInputChannel_isReleased_rdh
|
// ------------------------------------------------------------------------
// Life cycle
// ------------------------------------------------------------------------
@Override
boolean isReleased() {
return isReleased;
}
| 3.26
|
flink_LocalInputChannel_retriggerSubpartitionRequest_rdh
|
/**
* Retriggers a subpartition request.
*/
void retriggerSubpartitionRequest(Timer timer) {
synchronized(requestLock) {
checkState(subpartitionView == null, "already requested partition");
timer.schedule(new TimerTask() {
@Override
public void run() {
try {
requestSubpartition();
} catch (Throwable t) {
setError(t);
}
}
}, getCurrentBackoff());
}
}
| 3.26
|
flink_ChannelStateCheckpointWriter_m0_rdh
|
/**
* The throwable is just used for specific subtask that triggered the failure. Other subtasks
* should fail by {@link CHANNEL_STATE_SHARED_STREAM_EXCEPTION}.
*/
public void m0(JobVertexID jobVertexID, int subtaskIndex, Throwable throwable) {
if (isDone()) {
return;
}
this.throwable = throwable;
ChannelStatePendingResult result = pendingResults.get(SubtaskID.of(jobVertexID, subtaskIndex));
if (result != null) {
result.fail(throwable);
}
failResultAndCloseStream(new CheckpointException(CHANNEL_STATE_SHARED_STREAM_EXCEPTION, throwable));
}
| 3.26
|
flink_EndOfChannelStateEvent_read_rdh
|
// ------------------------------------------------------------------------
@Override
public void read(DataInputView in) {
// Nothing to do here
}
| 3.26
|
flink_EndOfChannelStateEvent_hashCode_rdh
|
// ------------------------------------------------------------------------
@Override
public int hashCode() {return 1965146670;
}
| 3.26
|
flink_DuplicatingFileSystem_of_rdh
|
/**
* A factory method for creating a simple pair of source/destination.
*/
static CopyRequest of(Path source, Path destination) {
return new CopyRequest() {
@Override
public Path getSource()
{
return source;
}
@Override
public Path getDestination() {
return destination;
}
};
}
| 3.26
|
flink_HashTableBloomFilter_addHash_rdh
|
/**
*
* @return false if the accuracy of the BloomFilter is not high.
*/
boolean addHash(int hash) {
setLocation(hash);
filter.addHash(hash); size++;
return size <= maxSize;
}
| 3.26
|
flink_EvictingWindowReader_process_rdh
|
/**
* Reads window state generated without any preaggregation such as {@code WindowedStream#apply}
* and {@code WindowedStream#process}.
*
* @param uid
* The uid of the operator.
* @param readerFunction
* The window reader function.
* @param keyType
* The key type of the window.
* @param stateType
* The type of records stored in state.
* @param outputType
* The output type of the reader function.
* @param <K>
* The type of the key.
* @param <T>
* The type of the records stored in state.
* @param <OUT>
* The output type of the reader function.
* @return A {@code DataSet} of objects read from keyed state.
* @throws IOException
* If the savepoint does not contain the specified uid.
*/
public <K, T, OUT> DataSet<OUT> process(String uid, WindowReaderFunction<T, OUT, K, W> readerFunction, TypeInformation<K> keyType, TypeInformation<T> stateType, TypeInformation<OUT> outputType) throws IOException {
WindowReaderOperator<?, K, StreamRecord<T>, W, OUT> operator = WindowReaderOperator.evictingWindow(new ProcessEvictingWindowReader<>(readerFunction), keyType, windowSerializer, stateType, env.getConfig());
return readWindowOperator(uid, outputType, operator);
}
| 3.26
|
flink_EvictingWindowReader_aggregate_rdh
|
/**
* Reads window state generated using an {@link AggregateFunction}.
*
* @param uid
* The uid of the operator.
* @param aggregateFunction
* The aggregate function used to create the window.
* @param readerFunction
* The window reader function.
* @param keyType
* The key type of the window.
* @param inputType
* The type information of the accumulator function.
* @param outputType
* The output type of the reader function.
* @param <K>
* The type of the key.
* @param <T>
* The type of the values that are aggregated.
* @param <ACC>
* The type of the accumulator (intermediate aggregate state).
* @param <R>
* The type of the aggregated result.
* @param <OUT>
* The output type of the reader function.
* @return A {@code DataSet} of objects read from keyed state.
* @throws IOException
* If savepoint does not contain the specified uid.
*/
public <K, T, ACC, R, OUT> DataSet<OUT> aggregate(String uid, AggregateFunction<T, ACC, R> aggregateFunction, WindowReaderFunction<R, OUT, K, W> readerFunction, TypeInformation<K> keyType, TypeInformation<T> inputType, TypeInformation<OUT> outputType) throws IOException {
WindowReaderOperator<?, K, StreamRecord<T>, W, OUT> operator = WindowReaderOperator.evictingWindow(new AggregateEvictingWindowReaderFunction<>(readerFunction, aggregateFunction), keyType, windowSerializer, inputType, env.getConfig());
return readWindowOperator(uid, outputType, operator);
}
| 3.26
|
flink_EvictingWindowReader_reduce_rdh
|
/**
* Reads window state generated using a {@link ReduceFunction}.
*
* @param uid
* The uid of the operator.
* @param function
* The reduce function used to create the window.
* @param readerFunction
* The window reader function.
* @param keyType
* The key type of the window.
* @param reduceType
* The type information of the reduce function.
* @param outputType
* The output type of the reader function.
* @param <K>
* The type of the key.
* @param <T>
* The type of the reduce function.
* @param <OUT>
* The output type of the reduce function.
* @return A {@code DataSet} of objects read from keyed state.
* @throws IOException
* If savepoint does not contain the specified uid.
*/
public <K, T, OUT> DataSet<OUT> reduce(String uid, ReduceFunction<T> function, WindowReaderFunction<T, OUT, K, W> readerFunction, TypeInformation<K> keyType, TypeInformation<T> reduceType, TypeInformation<OUT> outputType) throws IOException {
WindowReaderOperator<?,
K, StreamRecord<T>, W, OUT> operator = WindowReaderOperator.evictingWindow(new ReduceEvictingWindowReaderFunction<>(readerFunction, function), keyType, windowSerializer, reduceType, env.getConfig());
return readWindowOperator(uid, outputType, operator);
}
| 3.26
|
flink_HashSubpartitionBufferAccumulator_m0_rdh
|
// ------------------------------------------------------------------------
// Called by HashBufferAccumulator
// ------------------------------------------------------------------------
public void m0(ByteBuffer record, Buffer.DataType dataType) throws IOException {
if (dataType.isEvent()) {
writeEvent(record, dataType);
} else {
writeRecord(record, dataType);
}
}
| 3.26
|
flink_HashSubpartitionBufferAccumulator_writeEvent_rdh
|
// ------------------------------------------------------------------------
// Internal Methods
// ------------------------------------------------------------------------
private void writeEvent(ByteBuffer event, Buffer.DataType dataType) {checkArgument(dataType.isEvent());
// Each event should take an exclusive buffer
finishCurrentWritingBufferIfNotEmpty();
// Store the events in the heap segments to improve network memory efficiency
MemorySegment data = MemorySegmentFactory.wrap(event.array());
flushFinishedBuffer(new NetworkBuffer(data, FreeingBufferRecycler.INSTANCE, dataType, data.size()));
}
| 3.26
|
flink_Ordering_getNumberOfFields_rdh
|
// --------------------------------------------------------------------------------------------
public int getNumberOfFields() {
return this.indexes.size();
}
| 3.26
|
flink_Ordering_clone_rdh
|
// --------------------------------------------------------------------------------------------
public Ordering clone() {
Ordering newOrdering = new Ordering();
newOrdering.indexes = this.indexes;
newOrdering.types.addAll(this.types);
newOrdering.orders.addAll(this.orders);
return newOrdering;
}
| 3.26
|
flink_Ordering_isMetBy_rdh
|
// --------------------------------------------------------------------------------------------
public boolean isMetBy(Ordering otherOrdering) {
if
((otherOrdering == null) || (this.indexes.size() > otherOrdering.indexes.size())) {
return false;
} for (int i = 0; i < this.indexes.size(); i++) {
if (!this.indexes.get(i).equals(otherOrdering.indexes.get(i))) {
return false;
}
// if this one request no order, everything is good
if (this.orders.get(i) != Order.NONE) {
if (this.orders.get(i) == Order.ANY) {
// if any order is requested, any not NONE order is good
if (otherOrdering.orders.get(i) == Order.NONE) {
return false;
}
} else if (otherOrdering.orders.get(i) != this.orders.get(i)) {
// the orders must be equal
return false;
}}
}
return true;
}
| 3.26
|
flink_Ordering_appendOrdering_rdh
|
/**
* Extends this ordering by appending an additional order requirement. If the index has been
* previously appended then the unmodified Ordering is returned.
*
* @param index
* Field index of the appended order requirement.
* @param type
* Type of the appended order requirement.
* @param order
* Order of the appended order requirement.
* @return This ordering with an additional appended order requirement.
*/
public Ordering appendOrdering(Integer index, Class<? extends Comparable<?>> type, Order order) {
if (index < 0) {
throw new IllegalArgumentException("The key index must not be negative.");
}
if (order == null) {
throw new NullPointerException();}
if (order == Order.NONE) {
throw new IllegalArgumentException("An ordering must not be created with a NONE order.");
}
if (!this.indexes.contains(index)) {
this.indexes = this.indexes.addField(index);
this.types.add(type);
this.orders.add(order);
}
return this;
}
| 3.26
|
flink_Ordering_getTypes_rdh
|
// --------------------------------------------------------------------------------------------
@SuppressWarnings("unchecked")
public Class<? extends Comparable<?>>[] getTypes() {
return this.types.toArray(new Class[this.types.size()]);
}
| 3.26
|
flink_BinaryMapData_valueOf_rdh
|
// ------------------------------------------------------------------------------------------
// Construction Utilities
// ------------------------------------------------------------------------------------------
public static BinaryMapData valueOf(BinaryArrayData key, BinaryArrayData value) {
checkArgument((key.segments.length == 1) && (value.getSegments().length == 1));
byte[] bytes = new byte[(4 + key.sizeInBytes) + value.sizeInBytes];
MemorySegment segment = MemorySegmentFactory.wrap(bytes);
segment.putInt(0, key.sizeInBytes);
key.getSegments()[0].copyTo(key.getOffset(), segment, 4, key.sizeInBytes);
value.getSegments()[0].copyTo(value.getOffset(), segment, 4 + key.sizeInBytes, value.sizeInBytes);
BinaryMapData map
= new BinaryMapData();
map.pointTo(segment, 0, bytes.length);
return map;
}
| 3.26
|
flink_DefaultLeaderRetrievalService_notifyLeaderAddress_rdh
|
/**
* Called by specific {@link LeaderRetrievalDriver} to notify leader address.
*
* @param leaderInformation
* new notified leader information address. The exception will be
* handled by leader listener.
*/
@Override
@GuardedBy("lock")
public void notifyLeaderAddress(LeaderInformation leaderInformation) {
final UUID newLeaderSessionID = leaderInformation.getLeaderSessionID();
final String newLeaderAddress = leaderInformation.getLeaderAddress();
synchronized(lock) {
if (running) {
if ((!Objects.equals(newLeaderAddress, lastLeaderAddress)) || (!Objects.equals(newLeaderSessionID, lastLeaderSessionID))) {
if (LOG.isDebugEnabled()) {
if ((newLeaderAddress == null) && (newLeaderSessionID == null)) {
LOG.debug("Leader information was lost: The listener will be notified accordingly.");
} else {
LOG.debug("New leader information: Leader={}, session ID={}.", newLeaderAddress, newLeaderSessionID);
}
}
lastLeaderAddress = newLeaderAddress;
lastLeaderSessionID = newLeaderSessionID;
// Notify the listener only when the leader is truly changed.
leaderListener.notifyLeaderAddress(newLeaderAddress, newLeaderSessionID);
}
} else if (LOG.isDebugEnabled()) {
LOG.debug("Ignoring notification since the {} has already been closed.", leaderRetrievalDriver);
}
}
}
| 3.26
|
flink_HeapPriorityQueue_clear_rdh
|
/**
* Clears the queue.
*/
public void clear() {
final int arrayOffset = getHeadElementIndex();
Arrays.fill(queue, arrayOffset, arrayOffset + size, null);
size = 0;
}
| 3.26
|
flink_DemultiplexingRecordDeserializer_getNextRecord_rdh
|
/**
* Summarizes the status and watermarks of all virtual channels.
*/
@Override
public DeserializationResult getNextRecord(DeserializationDelegate<StreamElement> delegate) throws IOException {
DeserializationResult result;
do {
result = currentVirtualChannel.getNextRecord(delegate);
if (result.isFullRecord()) {
final StreamElement element = delegate.getInstance();
if (element.isRecord() || element.isLatencyMarker()) {
return result;
} else if (element.isWatermark()) {
// basically, do not emit a watermark if not all virtual channel are past it
final Watermark minWatermark = channels.values().stream().map(virtualChannel -> virtualChannel.lastWatermark).min(Comparator.comparing(Watermark::getTimestamp)).orElseThrow(() -> new IllegalStateException("Should always have a watermark"));
// at least one virtual channel has no watermark, don't emit any watermark yet
if (minWatermark.equals(Watermark.UNINITIALIZED)) {
continue;
}
delegate.setInstance(minWatermark);
return result;
} else if (element.isWatermarkStatus()) {
// summarize statuses across all virtual channels
// duplicate statuses are filtered in StatusWatermarkValve
if (channels.values().stream().anyMatch(d -> d.watermarkStatus.isActive())) {
delegate.setInstance(WatermarkStatus.ACTIVE);
}
return result;
}
}
// loop is only re-executed for suppressed watermark
} while (!result.isBufferConsumed() );
return DeserializationResult.PARTIAL_RECORD;
}
| 3.26
|
flink_UnsortedGrouping_min_rdh
|
/**
* Syntactic sugar for aggregate (MIN, field).
*
* @param field
* The index of the Tuple field on which the aggregation function is applied.
* @return An AggregateOperator that represents the min'ed DataSet.
* @see org.apache.flink.api.java.operators.AggregateOperator
*/
public AggregateOperator<T> min(int field) {
return this.aggregate(Aggregations.MIN, field, Utils.getCallLocationName());
}
| 3.26
|
flink_UnsortedGrouping_max_rdh
|
/**
* Syntactic sugar for aggregate (MAX, field).
*
* @param field
* The index of the Tuple field on which the aggregation function is applied.
* @return An AggregateOperator that represents the max'ed DataSet.
* @see org.apache.flink.api.java.operators.AggregateOperator
*/
public AggregateOperator<T> max(int field) {
return this.aggregate(Aggregations.MAX, field, Utils.getCallLocationName());
}
| 3.26
|
flink_UnsortedGrouping_reduce_rdh
|
/**
* Applies a Reduce transformation on a grouped {@link DataSet}.
*
* <p>For each group, the transformation consecutively calls a {@link org.apache.flink.api.common.functions.RichReduceFunction} until only a single element for
* each group remains. A ReduceFunction combines two elements into one new element of the same
* type.
*
* @param reducer
* The ReduceFunction that is applied on each group of the DataSet.
* @return A ReduceOperator that represents the reduced DataSet.
* @see org.apache.flink.api.common.functions.RichReduceFunction
* @see ReduceOperator
* @see DataSet
*/
public ReduceOperator<T> reduce(ReduceFunction<T> reducer) {if (reducer == null) {
throw new NullPointerException("Reduce function must not be null.");
}
return new ReduceOperator<T>(this, inputDataSet.clean(reducer), Utils.getCallLocationName());
}
| 3.26
|
flink_UnsortedGrouping_withPartitioner_rdh
|
/**
* Uses a custom partitioner for the grouping.
*
* @param partitioner
* The custom partitioner.
* @return The grouping object itself, to allow for method chaining.
*/
public UnsortedGrouping<T> withPartitioner(Partitioner<?> partitioner) {
Preconditions.checkNotNull(partitioner);
getKeys().validateCustomPartitioner(partitioner, null);
this.customPartitioner = partitioner;return this;
}
| 3.26
|
flink_UnsortedGrouping_sortGroup_rdh
|
/**
* Sorts elements within a group on a key extracted by the specified {@link org.apache.flink.api.java.functions.KeySelector} in the specified {@link Order}.
*
* <p>Chaining {@link #sortGroup(KeySelector, Order)} calls is not supported.
*
* @param keySelector
* The KeySelector with which the group is sorted.
* @param order
* The Order in which the extracted key is sorted.
* @return A SortedGrouping with specified order of group element.
* @see Order
*/
public <K> SortedGrouping<T> sortGroup(KeySelector<T, K> keySelector, Order order) {
if (!(this.getKeys() instanceof Keys.SelectorFunctionKeys)) {
throw new InvalidProgramException("KeySelector group-sorting keys can only be used with KeySelector grouping keys.");
}
TypeInformation<K> keyType = TypeExtractor.getKeySelectorTypes(keySelector, this.inputDataSet.getType());
SortedGrouping<T> sg = new SortedGrouping<T>(this.inputDataSet, this.keys, new Keys.SelectorFunctionKeys<T, K>(keySelector, this.inputDataSet.getType(), keyType), order);
sg.customPartitioner = getCustomPartitioner();
return sg;
}
| 3.26
|
flink_UnsortedGrouping_maxBy_rdh
|
/**
* Applies a special case of a reduce transformation (maxBy) on a grouped {@link DataSet}.
*
* <p>The transformation consecutively calls a {@link ReduceFunction} until only a single
* element remains which is the result of the transformation. A ReduceFunction combines two
* elements into one new element of the same type.
*
* @param fields
* Keys taken into account for finding the minimum.
* @return A {@link ReduceOperator} representing the minimum.
*/
@SuppressWarnings({ "unchecked", "rawtypes" })
public ReduceOperator<T> maxBy(int... fields) {
// Check for using a tuple
if ((!this.inputDataSet.getType().isTupleType()) || (!(this.inputDataSet.getType() instanceof TupleTypeInfo))) {
throw new InvalidProgramException("Method maxBy(int) only works on tuples.");
}
return new ReduceOperator<T>(this, new SelectByMaxFunction(((TupleTypeInfo) (this.inputDataSet.getType())), fields), Utils.getCallLocationName());
}
| 3.26
|
flink_UnsortedGrouping_sum_rdh
|
/**
* Syntactic sugar for aggregate (SUM, field).
*
* @param field
* The index of the Tuple field on which the aggregation function is applied.
* @return An AggregateOperator that represents the summed DataSet.
* @see org.apache.flink.api.java.operators.AggregateOperator
*/
public AggregateOperator<T> sum(int field) {
return this.aggregate(Aggregations.SUM, field, Utils.getCallLocationName());
}
| 3.26
|
flink_UnsortedGrouping_first_rdh
|
/**
* Returns a new set containing the first n elements in this grouped {@link DataSet}.
*
* @param n
* The desired number of elements for each group.
* @return A GroupReduceOperator that represents the DataSet containing the elements.
*/
public GroupReduceOperator<T, T> first(int n) {
if (n < 1) {
throw new InvalidProgramException("Parameter n of first(n) must be at least 1.");
}
return reduceGroup(new FirstReducer<T>(n));
}
| 3.26
|
flink_UnsortedGrouping_reduceGroup_rdh
|
/**
* Applies a GroupReduce transformation on a grouped {@link DataSet}.
*
* <p>The transformation calls a {@link org.apache.flink.api.common.functions.RichGroupReduceFunction} for each group of the DataSet.
* A GroupReduceFunction can iterate over all elements of a group and emit any number of output
* elements including none.
*
* @param reducer
* The GroupReduceFunction that is applied on each group of the DataSet.
* @return A GroupReduceOperator that represents the reduced DataSet.
* @see org.apache.flink.api.common.functions.RichGroupReduceFunction
* @see GroupReduceOperator
* @see DataSet
*/
public <R> GroupReduceOperator<T, R> reduceGroup(GroupReduceFunction<T, R> reducer) {
if (reducer == null) {throw new NullPointerException("GroupReduce function must not be null.");}
TypeInformation<R> resultType = TypeExtractor.getGroupReduceReturnTypes(reducer, this.getInputDataSet().getType(), Utils.getCallLocationName(), true);
return new GroupReduceOperator<T, R>(this, resultType, inputDataSet.clean(reducer), Utils.getCallLocationName());
}
| 3.26
|
flink_UnsortedGrouping_combineGroup_rdh
|
/**
* Applies a GroupCombineFunction on a grouped {@link DataSet}. A GroupCombineFunction is
* similar to a GroupReduceFunction but does not perform a full data exchange. Instead, the
* CombineFunction calls the combine method once per partition for combining a group of results.
* This operator is suitable for combining values into an intermediate format before doing a
* proper groupReduce where the data is shuffled across the node for further reduction. The
* GroupReduce operator can also be supplied with a combiner by implementing the RichGroupReduce
* function. The combine method of the RichGroupReduce function demands input and output type to
* be the same. The CombineFunction, on the other side, can have an arbitrary output type.
*
* @param combiner
* The GroupCombineFunction that is applied on the DataSet.
* @return A GroupCombineOperator which represents the combined DataSet.
*/
public <R>
GroupCombineOperator<T, R> combineGroup(GroupCombineFunction<T, R> combiner) {
if (combiner == null) {
throw new NullPointerException("GroupCombine function must not be null.");
}
TypeInformation<R> v1 = TypeExtractor.getGroupCombineReturnTypes(combiner, this.getInputDataSet().getType(), Utils.getCallLocationName(), true);
return new GroupCombineOperator<T, R>(this, v1, inputDataSet.clean(combiner), Utils.getCallLocationName());
}
| 3.26
|
flink_UnsortedGrouping_minBy_rdh
|
/**
* Applies a special case of a reduce transformation (minBy) on a grouped {@link DataSet}.
*
* <p>The transformation consecutively calls a {@link ReduceFunction} until only a single
* element remains which is the result of the transformation. A ReduceFunction combines two
* elements into one new element of the same type.
*
* @param fields
* Keys taken into account for finding the minimum.
* @return A {@link ReduceOperator} representing the minimum.
*/
@SuppressWarnings({ "unchecked", "rawtypes" })
public ReduceOperator<T> minBy(int... fields) {
// Check for using a tuple
if ((!this.inputDataSet.getType().isTupleType()) || (!(this.inputDataSet.getType() instanceof TupleTypeInfo))) {
throw new InvalidProgramException("Method minBy(int) only works on tuples.");
}
return new ReduceOperator<T>(this, new SelectByMinFunction(((TupleTypeInfo) (this.inputDataSet.getType())), fields), Utils.getCallLocationName());}
| 3.26
|
flink_FileMergingSnapshotManagerBase_createManagedDirectory_rdh
|
// ------------------------------------------------------------------------
// utilities
// ------------------------------------------------------------------------
private void createManagedDirectory(Path
managedPath) {
try {
FileStatus fileStatus = null;
try {
fileStatus = fs.getFileStatus(managedPath);
} catch (FileNotFoundException e) {
// expected exception when the path not exist, and we ignore it.
}
if (fileStatus == null) {
fs.mkdirs(managedPath);
LOG.info("Created a directory {} for checkpoint file-merging.", managedPath);
} else if (fileStatus.isDir()) {
LOG.info("Reusing previous directory {} for checkpoint file-merging.", managedPath);} else {
throw
new FlinkRuntimeException(("The managed path " + managedPath) + " for file-merging is occupied by another file. Cannot create directory.");
}
} catch (IOException
e) {
throw new FlinkRuntimeException(("Cannot create directory " + managedPath) + " for file-merging ", e);
}
}
| 3.26
|
flink_FileMergingSnapshotManagerBase_createPhysicalFile_rdh
|
/**
* Create a physical file in right location (managed directory), which is specified by scope of
* this checkpoint and current subtask.
*
* @param subtaskKey
* the {@link SubtaskKey} of current subtask.
* @param scope
* the scope of the checkpoint.
* @return the created physical file.
* @throws IOException
* if anything goes wrong with file system.
*/
@Nonnull
protected PhysicalFile createPhysicalFile(SubtaskKey subtaskKey, CheckpointedStateScope scope) throws IOException {
PhysicalFile result;
Exception latestException = null;
Path dirPath = getManagedDir(subtaskKey, scope);
if (dirPath == null) {
throw new IOException(((("Could not get " + scope) + " path for subtask ") + subtaskKey) + ", the directory may have not been created.");
}
for (int attempt = 0; attempt < 10; attempt++) {
try {
OutputStreamAndPath streamAndPath = EntropyInjector.createEntropyAware(fs, generatePhysicalFilePath(dirPath), WriteMode.NO_OVERWRITE);
FSDataOutputStream outputStream = streamAndPath.stream();Path filePath = streamAndPath.path();
result = new PhysicalFile(outputStream, filePath, this.physicalFileDeleter, scope);
updateFileCreationMetrics(filePath);
return result;
} catch (Exception e) {
latestException = e;
}
}
throw new IOException("Could not open output stream for state file merging.", latestException);
}
| 3.26
|
flink_FileMergingSnapshotManagerBase_generatePhysicalFilePath_rdh
|
/**
* Generate a file path for a physical file.
*
* @param dirPath
* the parent directory path for the physical file.
* @return the generated file path for a physical file.
*/
protected Path generatePhysicalFilePath(Path dirPath) {
// this must be called after initFileSystem() is called
// so the checkpoint directories must be not null if we reach here
final String fileName = UUID.randomUUID().toString();
return new Path(dirPath, fileName);
}
| 3.26
|
flink_FileMergingSnapshotManagerBase_createLogicalFile_rdh
|
// ------------------------------------------------------------------------
// logical & physical file
// ------------------------------------------------------------------------
/**
* Create a logical file on a physical file.
*
* @param physicalFile
* the underlying physical file.
* @param startOffset
* the offset of the physical file that the logical file start from.
* @param length
* the length of the logical file.
* @param subtaskKey
* the id of the subtask that the logical file belongs to.
* @return the created logical file.
*/
protected LogicalFile createLogicalFile(@Nonnull
PhysicalFile physicalFile, int startOffset, int length, @Nonnull
SubtaskKey subtaskKey) {
LogicalFileId fileID = LogicalFileId.generateRandomId();
return new LogicalFile(fileID,
physicalFile, startOffset, length, subtaskKey);
}
| 3.26
|
flink_FileMergingSnapshotManagerBase_deletePhysicalFile_rdh
|
/**
* Delete a physical file by given file path. Use the io executor to do the deletion.
*
* @param filePath
* the given file path to delete.
*/
protected final void deletePhysicalFile(Path filePath) {
f0.execute(() -> {
try {
fs.delete(filePath, false);
LOG.debug("Physical file deleted: {}.", filePath);
} catch (IOException e) {
LOG.warn("Fail to delete file: {}", filePath);
}
});
}
| 3.26
|
flink_FileMergingSnapshotManagerBase_getManagedDir_rdh
|
// ------------------------------------------------------------------------
// file system
// ------------------------------------------------------------------------
@Overridepublic Path getManagedDir(SubtaskKey subtaskKey, CheckpointedStateScope scope) {
if (scope.equals(CheckpointedStateScope.SHARED)) {
return managedSharedStateDir.get(subtaskKey);
}
else {
return managedExclusiveStateDir;
}
}
| 3.26
|
flink_TemplateUtils_extractProcedureLocalFunctionTemplates_rdh
|
/**
* Retrieve local templates from procedure method.
*/static Set<FunctionTemplate> extractProcedureLocalFunctionTemplates(DataTypeFactory typeFactory, Method method) {
return m0(typeFactory, collectAnnotationsOfMethod(ProcedureHint.class,
method));
}
| 3.26
|
flink_TemplateUtils_extractGlobalFunctionTemplates_rdh
|
/**
* Retrieve global templates from function class.
*/
static Set<FunctionTemplate> extractGlobalFunctionTemplates(DataTypeFactory typeFactory, Class<? extends UserDefinedFunction> function) {
return asFunctionTemplates(typeFactory, collectAnnotationsOfClass(FunctionHint.class, function));
}
| 3.26
|
flink_TemplateUtils_findInputOnlyTemplates_rdh
|
/**
* Hints that only declare an input.
*/
static Set<FunctionSignatureTemplate> findInputOnlyTemplates(Set<FunctionTemplate> global, Set<FunctionTemplate> local, Function<FunctionTemplate, FunctionResultTemplate> accessor) {
return Stream.concat(global.stream(), local.stream()).filter(t -> (t.getSignatureTemplate() != null) && (accessor.apply(t) == null)).map(FunctionTemplate::getSignatureTemplate).collect(Collectors.toCollection(LinkedHashSet::new));}
| 3.26
|
flink_TemplateUtils_findResultOnlyTemplates_rdh
|
/**
* Find a template that only specifies a result.
*/
static Set<FunctionResultTemplate> findResultOnlyTemplates(Set<FunctionTemplate> functionTemplates, Function<FunctionTemplate, FunctionResultTemplate> accessor) {
return functionTemplates.stream().filter(t -> (t.getSignatureTemplate() == null) && (accessor.apply(t) != null)).map(accessor).collect(Collectors.toCollection(LinkedHashSet::new));
}
| 3.26
|
flink_TemplateUtils_m0_rdh
|
/**
* Converts {@link ProcedureHint}s to {@link FunctionTemplate}.
*/
static Set<FunctionTemplate> m0(DataTypeFactory typeFactory, Set<ProcedureHint> hints) {
return hints.stream().map(hint -> {
try {
return FunctionTemplate.fromAnnotation(typeFactory, hint);
} catch (Throwable t) {
throw extractionError(t, "Error in procedure hint annotation.");
}
}).collect(Collectors.toCollection(LinkedHashSet::new)); }
| 3.26
|
flink_TemplateUtils_asFunctionTemplates_rdh
|
/**
* Converts {@link FunctionHint}s to {@link FunctionTemplate}.
*/
static Set<FunctionTemplate> asFunctionTemplates(DataTypeFactory typeFactory, Set<FunctionHint> hints) {
return hints.stream().map(hint -> {
try {
return FunctionTemplate.fromAnnotation(typeFactory, hint);
} catch (Throwable t) {
throw
extractionError(t, "Error in function hint annotation.");
}
}).collect(Collectors.toCollection(LinkedHashSet::new));
}
| 3.26
|
flink_TemplateUtils_extractLocalFunctionTemplates_rdh
|
/**
* Retrieve local templates from function method.
*/
static Set<FunctionTemplate> extractLocalFunctionTemplates(DataTypeFactory typeFactory, Method method) {return
asFunctionTemplates(typeFactory, collectAnnotationsOfMethod(FunctionHint.class, method));
}
| 3.26
|
flink_TemplateUtils_extractProcedureGlobalFunctionTemplates_rdh
|
/**
* Retrieve global templates from procedure class.
*/
static Set<FunctionTemplate> extractProcedureGlobalFunctionTemplates(DataTypeFactory typeFactory, Class<? extends Procedure> procedure) {
return m0(typeFactory, collectAnnotationsOfClass(ProcedureHint.class, procedure));
}
| 3.26
|
flink_TemplateUtils_findResultMappingTemplates_rdh
|
/**
* Hints that map a signature to a result.
*/
static Set<FunctionTemplate> findResultMappingTemplates(Set<FunctionTemplate> globalTemplates, Set<FunctionTemplate> localTemplates, Function<FunctionTemplate, FunctionResultTemplate> accessor) {
return Stream.concat(globalTemplates.stream(), localTemplates.stream()).filter(t -> (t.getSignatureTemplate() != null)
&& (accessor.apply(t) != null)).collect(Collectors.toCollection(LinkedHashSet::new));}
| 3.26
|
flink_TemplateUtils_findResultOnlyTemplate_rdh
|
/**
* Hints that only declare a result (either accumulator or output).
*/
@Nullable
static FunctionResultTemplate findResultOnlyTemplate(Set<FunctionResultTemplate> globalResultOnly, Set<FunctionResultTemplate> localResultOnly, Set<FunctionTemplate> explicitMappings, Function<FunctionTemplate, FunctionResultTemplate> accessor, String hintType) {
final Set<FunctionResultTemplate> resultOnly = Stream.concat(globalResultOnly.stream(), localResultOnly.stream()).collect(Collectors.toCollection(LinkedHashSet::new));
final Set<FunctionResultTemplate> allResults = Stream.concat(resultOnly.stream(), explicitMappings.stream().map(accessor)).collect(Collectors.toCollection(LinkedHashSet::new));
if ((resultOnly.size() == 1) && (allResults.size() == 1)) {
return resultOnly.stream().findFirst().orElse(null);
}
// different results is only fine as long as those come from a mapping
if ((resultOnly.size() > 1)
|| ((!resultOnly.isEmpty()) && (!explicitMappings.isEmpty()))) {
throw extractionError(String.format("%s hints that lead to ambiguous results are not allowed.", hintType));
}
return null;
}
| 3.26
|
flink_ResolvedExpression_asSerializableString_rdh
|
/**
* Returns a string that fully serializes this instance. The serialized string can be used for
* storing the query in, for example, a {@link org.apache.flink.table.catalog.Catalog} as a
* view.
*
* @return detailed string for persisting in a catalog
*/
default String asSerializableString() {
throw new TableException(String.format("Expression '%s' is not string serializable. Currently, only expressions that " + "originated from a SQL expression have a well-defined string representation.", asSummaryString()));
}
| 3.26
|
flink_PythonStreamGroupWindowAggregateOperator_createTumblingGroupWindowAggregateOperator_rdh
|
// The below static create methods are reflected from the planner
public static <K, W extends Window> PythonStreamGroupWindowAggregateOperator<K, W> createTumblingGroupWindowAggregateOperator(Configuration config, RowType inputType, RowType outputType,
PythonAggregateFunctionInfo[] aggregateFunctions, DataViewSpec[][] dataViewSpecs, int[] grouping, int indexOfCountStar, boolean generateUpdateBefore, boolean countStarInserted, int inputTimeFieldIndex, WindowAssigner<W> windowAssigner, boolean isRowTime, boolean isTimeWindow, long size, long allowedLateness, NamedWindowProperty[] namedProperties, ZoneId shiftTimeZone) {
return new PythonStreamGroupWindowAggregateOperator<>(config, inputType, outputType, aggregateFunctions, dataViewSpecs, grouping, indexOfCountStar, generateUpdateBefore, countStarInserted, inputTimeFieldIndex, windowAssigner, WindowType.TUMBLING_GROUP_WINDOW, isRowTime, isTimeWindow, size, 0, 0, allowedLateness, namedProperties, shiftTimeZone);
}
| 3.26
|
flink_CheckpointedInputGate_processPriorityEvents_rdh
|
/**
* Eagerly pulls and processes all priority events. Must be called from task thread.
*
* <p>Basic assumption is that no priority event needs to be handled by the {@link StreamTaskNetworkInput}.
*/
private void processPriorityEvents() throws IOException, InterruptedException {
// check if the priority event is still not processed (could have been pulled before mail
// was being executed)
boolean
hasPriorityEvent = inputGate.getPriorityEventAvailableFuture().isDone();
while (hasPriorityEvent) {
// process as many priority events as possible
final Optional<BufferOrEvent> bufferOrEventOpt = pollNext();
if (!bufferOrEventOpt.isPresent()) {
break;
}
final BufferOrEvent bufferOrEvent = bufferOrEventOpt.get();
checkState(bufferOrEvent.hasPriority(),
"Should only poll priority events");
hasPriorityEvent = bufferOrEvent.morePriorityEvents();
}
// re-enqueue mail to process future priority events
waitForPriorityEvents(inputGate, mailboxExecutor);
}
| 3.26
|
flink_CheckpointedInputGate_getNumberOfInputChannels_rdh
|
/**
*
* @return number of underlying input channels.
*/
public int getNumberOfInputChannels() {
return inputGate.getNumberOfInputChannels();
}
| 3.26
|
flink_CheckpointedInputGate_toString_rdh
|
// ------------------------------------------------------------------------
// Utilities
// ------------------------------------------------------------------------
@Override
public String toString() {
return barrierHandler.toString();
}
| 3.26
|
flink_StreamingFileSink_initializeState_rdh
|
// --------------------------- Sink Methods -----------------------------
@Override
public void initializeState(FunctionInitializationContext context) throws
Exception {
this.helper = new StreamingFileSinkHelper<>(bucketsBuilder.createBuckets(getRuntimeContext().getIndexOfThisSubtask()), context.isRestored(), context.getOperatorStateStore(), ((StreamingRuntimeContext) (getRuntimeContext())).getProcessingTimeService(), bucketCheckInterval);
}
| 3.26
|
flink_StreamingFileSink_build_rdh
|
/**
* Creates the actual sink.
*/
public StreamingFileSink<IN> build() {
return new StreamingFileSink<>(this, bucketCheckInterval);
}
| 3.26
|
flink_TimerGauge_getAccumulatedCount_rdh
|
/**
*
* @return the accumulated period by the given * TimerGauge.
*/
public synchronized long getAccumulatedCount() {
return accumulatedCount;
}
| 3.26
|
flink_ReusingKeyGroupedIterator_next_rdh
|
/**
* Prior to call this method, call hasNext() once!
*/
@Override
public E next() {
if (this.currentIsUnconsumed || hasNext()) {
this.currentIsUnconsumed = false;
return ReusingKeyGroupedIterator.this.current;
} else {
throw new NoSuchElementException();
}
}
| 3.26
|
flink_ClusterEntryPointExceptionUtils_tryEnrichClusterEntryPointError_rdh
|
/**
* Tries to enrich the passed exception or its causes with additional information.
*
* <p>This method improves error messages for direct and metaspace {@link OutOfMemoryError}. It
* adds descriptions about possible causes and ways of resolution.
*
* @param root
* The Throwable of which the cause tree shall be traversed.
*/
public static void tryEnrichClusterEntryPointError(@Nullable
Throwable root) {tryEnrichOutOfMemoryError(root, JM_METASPACE_OOM_ERROR_MESSAGE, JM_DIRECT_OOM_ERROR_MESSAGE, JM_HEAP_SPACE_OOM_ERROR_MESSAGE);
}
| 3.26
|
flink_StreamOperatorFactory_isInputTypeConfigurable_rdh
|
/**
* If the stream operator need to be configured with the data type they will operate on.
*/
default boolean isInputTypeConfigurable() {
return false;
}
| 3.26
|
flink_StreamOperatorFactory_setOutputType_rdh
|
/**
* Is called by the {@link StreamGraph#addOperator} method when the {@link StreamGraph} is
* generated. The method is called with the output {@link TypeInformation} which is also used
* for the {@link StreamTask} output serializer.
*
* @param type
* Output type information of the {@link StreamTask}
* @param executionConfig
* Execution configuration
*/
default void setOutputType(TypeInformation<OUT> type, ExecutionConfig executionConfig) {
}
| 3.26
|
flink_StreamOperatorFactory_isStreamSource_rdh
|
/**
* Is this factory for {@link StreamSource}.
*/
default boolean isStreamSource() {
return false;
}
| 3.26
|
flink_StreamOperatorFactory_setInputType_rdh
|
/**
* Is called by the {@link StreamGraph#addOperator} method when the {@link StreamGraph} is
* generated.
*
* @param type
* The data type of the input.
* @param executionConfig
* The execution config for this parallel execution.
*/
default void setInputType(TypeInformation<?> type, ExecutionConfig executionConfig) {
}
| 3.26
|
flink_RequestJobDetails_shouldIncludeFinished_rdh
|
// ------------------------------------------------------------------------
public boolean shouldIncludeFinished() {
return includeFinished;
}
| 3.26
|
flink_RequestJobDetails_equals_rdh
|
// ------------------------------------------------------------------------
@Override
public boolean equals(Object o) {
if (this == o) {return true;
} else if (o instanceof RequestJobDetails) {
RequestJobDetails that = ((RequestJobDetails) (o));
return (this.includeFinished == that.includeFinished) &&
(this.includeRunning ==
that.includeRunning);
} else {
return false;
}
}
| 3.26
|
flink_ConstraintEnforcer_build_rdh
|
/**
* If neither of NOT NULL or CHAR/VARCHAR length or BINARY/VARBINARY enforcers are
* configured, null is returned.
*/
public ConstraintEnforcer build() {
if (isConfigured) {
String
operatorName = ("ConstraintEnforcer[" + String.join(", ", operatorNames)) + "]";
return new ConstraintEnforcer(notNullEnforcer, notNullFieldIndices, typeLengthEnforcer, charFieldInfo != null ? charFieldInfo.stream().mapToInt(fi -> fi.fieldIdx).toArray() : null, charFieldInfo != null ? charFieldInfo.stream().mapToInt(fi -> fi.length).toArray() : null, charFieldInfo != null ? buildCouldPad(charFieldInfo) : null, binaryFieldInfo != null ? binaryFieldInfo.stream().mapToInt(fi -> fi.fieldIdx).toArray() : null, binaryFieldInfo != null ? binaryFieldInfo.stream().mapToInt(fi -> fi.length).toArray() : null, binaryFieldInfo != null ? buildCouldPad(binaryFieldInfo) : null, allFieldNames, operatorName);
}
return null;
}
| 3.26
|
flink_AbstractHeapState_getStateTable_rdh
|
/**
* This should only be used for testing.
*/
@VisibleForTesting
public StateTable<K, N, SV> getStateTable() {
return stateTable;
}
| 3.26
|
flink_AbstractHeapState_clear_rdh
|
// ------------------------------------------------------------------------
@Override public final void clear() {
stateTable.remove(currentNamespace);}
| 3.26
|
flink_KeyGroupsStateHandle_getDelegateStateHandle_rdh
|
/**
*
* @return The handle to the actual states
*/
public StreamStateHandle getDelegateStateHandle() {
return stateHandle;
}
| 3.26
|
flink_KeyGroupsStateHandle_getGroupRangeOffsets_rdh
|
/**
*
* @return the internal key-group range to offsets metadata
*/
public KeyGroupRangeOffsets getGroupRangeOffsets() {
return groupRangeOffsets; }
| 3.26
|
flink_JobManagerCheckpointStorage_createFromConfig_rdh
|
/**
* Creates a new {@link JobManagerCheckpointStorage} using the given configuration.
*
* @param config
* The Flink configuration (loaded by the TaskManager).
* @param classLoader
* The clsas loader that should be used to load the checkpoint storage.
* @return The created checkpoint storage.
* @throws IllegalConfigurationException
* If the configuration misses critical values, or
* specifies invalid values
*/
public static JobManagerCheckpointStorage createFromConfig(ReadableConfig config, ClassLoader classLoader) throws IllegalConfigurationException {
try {
return new JobManagerCheckpointStorage().configure(config, classLoader);
} catch (IllegalArgumentException e) {
throw new IllegalConfigurationException("Invalid configuration for the state backend", e);
}
}
| 3.26
|
flink_JobManagerCheckpointStorage_configure_rdh
|
// ------------------------------------------------------------------------
// Reconfiguration
// ------------------------------------------------------------------------
/**
* Creates a copy of this checkpoint storage that uses the values defined in the configuration
* for fields where that were not specified in this checkpoint storage.
*
* @param config
* The configuration
* @return The re-configured variant of the checkpoint storage
*/
@Override
public JobManagerCheckpointStorage configure(ReadableConfig config, ClassLoader
classLoader) {
return new JobManagerCheckpointStorage(this, config);
}
| 3.26
|
flink_JobManagerCheckpointStorage_getCheckpointPath_rdh
|
/**
*
* @return The location where checkpoints will be externalized if set.
*/
@Nullable
public Path getCheckpointPath() {
return location.getBaseCheckpointPath();
}
| 3.26
|
flink_JobManagerCheckpointStorage_getMaxStateSize_rdh
|
// ------------------------------------------------------------------------
// Properties
// ------------------------------------------------------------------------
/**
* Gets the maximum size that an individual state can have, as configured in the constructor (by
* default {@value #DEFAULT_MAX_STATE_SIZE}).
*
* @return The maximum size that an individual state can have
*/
public int getMaxStateSize() {
return maxStateSize;
}
| 3.26
|
flink_JobManagerCheckpointStorage_resolveCheckpoint_rdh
|
// ------------------------------------------------------------------------
// checkpoint state persistence
// ------------------------------------------------------------------------
@Override
public CompletedCheckpointStorageLocation resolveCheckpoint(String pointer) throws IOException {
return AbstractFsCheckpointStorageAccess.resolveCheckpointPointer(pointer);
}
| 3.26
|
flink_JobManagerCheckpointStorage_toString_rdh
|
// ------------------------------------------------------------------------
// utilities
// ------------------------------------------------------------------------
@Override
public String toString() {
return (("JobManagerCheckpointStorage (checkpoints to JobManager) " + "( maxStateSize: ") + maxStateSize) + ")";
}
| 3.26
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.