name stringlengths 12 178 | code_snippet stringlengths 8 36.5k | score float64 3.26 3.68 |
|---|---|---|
flink_OutputFormatBase_postOpen_rdh | /**
* Initialize the OutputFormat. This method is called at the end of {@link OutputFormatBase#open(int, int)}.
*/
protected void postOpen() {
} | 3.26 |
flink_TimestampedValue_getTimestamp_rdh | /**
*
* @return The timestamp associated with this stream value in milliseconds.
*/
public long getTimestamp() {
if (hasTimestamp) {
return timestamp;
} else
{
throw new IllegalStateException("Record has no timestamp. Is the time characteristic set to 'ProcessingTime', or " + "did you for... | 3.26 |
flink_TimestampedValue_getValue_rdh | /**
*
* @return The value wrapped in this {@link TimestampedValue}.
*/
public T getValue() {
return value;
} | 3.26 |
flink_TimestampedValue_from_rdh | /**
* Creates a TimestampedValue from given {@link StreamRecord}.
*
* @param streamRecord
* The StreamRecord object from which TimestampedValue is to be created.
*/
public static <T> TimestampedValue<T> from(StreamRecord<T> streamRecord) {
if (streamRecord.hasTimestamp()) {
return new TimestampedVal... | 3.26 |
flink_TimestampedValue_hasTimestamp_rdh | /**
* Checks whether this record has a timestamp.
*
* @return True if the record has a timestamp, false if not.
*/
public boolean hasTimestamp() {
return hasTimestamp;
} | 3.26 |
flink_TimestampedValue_m0_rdh | /**
* Creates a {@link StreamRecord} from this TimestampedValue.
*/
public StreamRecord<T> m0() {
StreamRecord<T> streamRecord = new StreamRecord<>(value);
if (hasTimestamp) {
streamRecord.setTimestamp(timestamp);
}
return streamRecord;
} | 3.26 |
flink_CloseableRegistry_unregisterAndCloseAll_rdh | /**
* Unregisters all given {@link Closeable} objects from this registry and closes all objects
* that are were actually registered. Suppressed (and collects) all exceptions that happen
* during closing and throws only when the all {@link Closeable} objects have been processed.
*
* @param t... | 3.26 |
flink_CloseableRegistry_doClose_rdh | /**
* This implementation doesn't imply any exception during closing due to backward compatibility.
*/
@Overridepublic void doClose(List<Closeable> toClose) throws IOException {
IOUtils.closeAllQuietly(reverse(toClose));
} | 3.26 |
flink_FileSourceSplitSerializer_getVersion_rdh | // ------------------------------------------------------------------------
@Override
public int getVersion() {
return VERSION;
} | 3.26 |
flink_SegmentsUtil_getFloat_rdh | /**
* get float from segments.
*
* @param segments
* target segments.
* @param offset
* value offset.
*/
public static float getFloat(MemorySegment[] segments, int offset) {
if (inFirstSegment(segments, offset, 4)) {
return segments[0].getFloat(offset);
} else {
return getFloatMultiSegments(segments,... | 3.26 |
flink_SegmentsUtil_getByte_rdh | /**
* get byte from segments.
*
* @param segments
* target segments.
* @param offset
* value offset.
*/
public static byte getByte(MemorySegment[]
segments, int offset) {
if (inFirstSegment(segments, offset, 1)) {
return segments[0].get(offset);
} else {
return getByteMultiSegments(se... | 3.26 |
flink_SegmentsUtil_hashByWords_rdh | /**
* hash segments to int, numBytes must be aligned to 4 bytes.
*
* @param segments
* Source segments.
* @param offset
* Source segments offset.
* @param numBytes
* the number bytes to hash.
*/
public static int hashByWords(MemorySegment[] segments, int offset, int numBytes) {
if (inFirstSegment(... | 3.26 |
flink_SegmentsUtil_getShort_rdh | /**
* get short from segments.
*
* @param segments
* target segments.
* @param offset
* value offset.
*/
public static short getShort(MemorySegment[] segments, int offset) {
if (inFirstSegment(segments, offset, 2)) {
return segments[0].getShort(offset);
} else {
return getShortMultiSegm... | 3.26 |
flink_SegmentsUtil_copyFromBytes_rdh | /**
* Copy target segments from source byte[].
*
* @param segments
* target segments.
* @param offset
* target segments offset.
* @param bytes
* source byte[].
* @param bytesOffset
* source byte[] offset.
* @param numBytes
* the number bytes to copy.
*/
public static void copyFromBytes(MemorySegm... | 3.26 |
flink_SegmentsUtil_copyToView_rdh | /**
* Copy bytes of segments to output view. Note: It just copies the data in, not include the
* length.
*
* @param segments
* source segments
* @param offset
* offset for segments
* @param sizeInBytes
* size in bytes
* @param target
* target output view
*/
public static void copyToView(MemorySegmen... | 3.26 |
flink_SegmentsUtil_setDouble_rdh | /**
* set double from segments.
*
* @param segments
* target segments.
* @param offset
* value offset.
*/
public static void setDouble(MemorySegment[] segments, int offset, double value) {
if
(inFirstSegment(segments, offset, 8)) {
segments[0].putDouble(offset, value);} else {
setD... | 3.26 |
flink_SegmentsUtil_getBytes_rdh | /**
* Maybe not copied, if want copy, please use copyTo.
*/
public static byte[] getBytes(MemorySegment[] segments,
int baseOffset, int sizeInBytes) {
// avoid copy if `base` is `byte[]`
if (segments.length == 1) {byte[] heapMemory = segments[0].getHeapMemory();
if (((baseOffset == 0) && (heapMemory... | 3.26 |
flink_SegmentsUtil_find_rdh | /**
* Find equal segments2 in segments1.
*
* @param segments1
* segs to find.
* @param segments2
* sub segs.
* @return Return the found offset, return -1 if not find.
*/
public static int find(MemorySegment[] segments1, int offset1, int numBytes1, MemorySegment[] segments2, int offset2, int numBytes2) {
... | 3.26 |
flink_SegmentsUtil_getInt_rdh | /**
* get int from segments.
*
* @param segments
* target segments.
* @param offset
* value offset.
*/
public static int getInt(MemorySegment[] segments, int offset) {if (inFirstSegment(segments, offset, 4)) {
return segments[0].getInt(offset);
} else {
return getIntMultiSegments(segments... | 3.26 |
flink_SegmentsUtil_bitSet_rdh | /**
* set bit from segments.
*
* @param segments
* target segments.
* @param baseOffset
* bits base offset.
* @param index
* bit index from base offset.
*/
public static voi... | 3.26 |
flink_SegmentsUtil_copyToUnsafe_rdh | /**
* Copy segments to target unsafe pointer.
*
* @param segments
* Source segments.
* @param offset
* The position where the bytes are started to be read from these memory segments.
* @param target
* The unsafe memory to copy the bytes to.
* @param pointer
* The position in the target unsafe memory t... | 3.26 |
flink_SegmentsUtil_copyToBytes_rdh | /**
* Copy segments to target byte[].
*
* @param segments
* Source segments.
* @param offset
* Source segments offset.
* @param bytes
* target byte[].
* @param bytesOffset
* target byte[] offset.
* @param numBytes
* the number bytes to copy.
*/
public static byte[] copyToBytes(MemorySegment[] seg... | 3.26 |
flink_SegmentsUtil_setByte_rdh | /**
* set byte from segments.
*
* @param segments
* target segments.
* @param offset
* value offset.
*/
public static void setByte(MemorySegment[] segments, int offset, byte value) {
if (inFirstSegment(segments, offset, 1)) {
segments[0].put(offset, value);
} else {
setByteMultiSegmen... | 3.26 |
flink_SegmentsUtil_bitGet_rdh | /**
* read bit from segments.
*
* @param segments
* target segments.
* @param baseOffset
* bits base offset.
* @param index
* bit index from base offset.
*/public static boolean bitGet(MemorySegment[] segments, int baseOffset, int index) {
int offset
= baseOffset + byteIndex(index);
byte cur... | 3.26 |
flink_SegmentsUtil_setFloat_rdh | /**
* set float from segments.
*
* @param segments
* target segments.
* @param offset
* value offset.
*/
public static void setFloat(MemorySegment[] segments, int offset, float value) {
if (inFirstSegment(segments, offset, 4)) {
segments[0].putFloat(offset, value);
} else {setFloatMultiSegments(segments,... | 3.26 |
flink_SegmentsUtil_hash_rdh | /**
* hash segments to int.
*
* @param segments
* Source segments.
* @param offset
* Source segments offset.
* @param numBytes
* the number bytes to hash.
*/
public static i... | 3.26 |
flink_SegmentsUtil_getLong_rdh | /**
* get long from segments.
*
* @param segments
* target segments.
* @param offset
* value offset.
*/
public static long getLong(MemorySegment[] segments, int offset) {
if (inFirstSegment(segments, offset, 8)) {
return segments[0].getLong(offset);
} else {
return getLongMultiSegm... | 3.26 |
flink_SegmentsUtil_m0_rdh | /**
* Equals two memory segments regions.
*
* @param segments1
* Segments 1
* @param offset1
* Offset of segments1 to start equaling
* @param segments2
* Segments 2
* @param offset2
* Offset of segments2 to start equaling
* @param len
* Length of the equaled memory region
* @return true if equal,... | 3.26 |
flink_SegmentsUtil_getBoolean_rdh | /**
* get boolean from segments.
*
* @param segments
* target segments.
* @param offset
* value offset.
*/
public static boolean getBoolean(MemorySegment[] segments, int offset) {
if (inFirstSegment(segments, offset, 1)) {
return segments[0].getBoolean(offset);
} else {
return getBool... | 3.26 |
flink_SegmentsUtil_getDouble_rdh | /**
* get double from segments.
*
* @param segments
* target segments.
* @param offset
* value offset.
*/public static double getDouble(MemorySegment[] segments, int offset) {
if (inFirstSegment(segments, offset, 8)) {
return segments[0].getDouble(offset);
} else {
return getDoubleMultiSegments(segme... | 3.26 |
flink_SegmentsUtil_setShort_rdh | /**
* set short from segments.
*
* @param segments
* target segments.
* @param offset
* value offset.
*/
public static void setShort(MemorySegment[] segments, int offset, short value) {
if (inFirstSegment(segments, offset, 2)) {
segments[0].putShort(offset, value);
} else {
setSh... | 3.26 |
flink_SegmentsUtil_m1_rdh | /**
* read bit.
*
* @param segment
* target segment.
* @param baseOffset
* bits base offset.
* @param index
* bit index from base offset.
*/
public static boolean m1(MemorySegment segment, int baseOffset, int index) {
int offset = baseOffset + byteIndex(index);
byte current = segment.get(offset);... | 3.26 |
flink_SegmentsUtil_allocateReuseBytes_rdh | /**
* Allocate bytes that is only for temporary usage, it should not be stored in somewhere else.
* Use a {@link ThreadLocal} to reuse bytes to avoid overhead of byte[] new and gc.
*
* <p>If there are methods that can only accept a byte[], instead of a MemorySegment[]
* parameter, we can allocate a reuse bytes and... | 3.26 |
flink_SegmentsUtil_setLong_rdh | /**
* set long from segments.
*
* @param segments
* target segments.
* @param offset
* value offset.
*/public static void setLong(MemorySegment[] segments, int
offset, long value) {
if (inFirstSegment(segments, offset, 8)) {
segments[0].putLong(offset, value);
} else {
setLongMultiSeg... | 3.26 |
flink_SegmentsUtil_setInt_rdh | /**
* set int from segments.
*
* @param segments
* target segments.
* @param offset
* value offset.
*/
public static void setInt(MemorySegment[] segments, int offset, int value) {
if (inFirstSegment(segments, offset, 4)) {
segments[0].putInt(offset, value);
} else {
setIntMultiSegme... | 3.26 |
flink_SegmentsUtil_inFirstSegment_rdh | /**
* Is it just in first MemorySegment, we use quick way to do something.
*/private static boolean inFirstSegment(MemorySegment[] segments, int offset, int numBytes) {
return (numBytes + offset) <= segments[0].size();
} | 3.26 |
flink_SegmentsUtil_bitUnSet_rdh | /**
* unset bit from segments.
*
* @param segments
* target segments.
* @param baseOffset
* bits base offset.
* @param index
* bit index from base offset.
*/
public static void bitUnSet(MemorySegment[]
segments, int baseOffset, int index) {if (segments.length == 1) {
MemorySegment segment = segme... | 3.26 |
flink_SegmentsUtil_setBoolean_rdh | /**
* set boolean from segments.
*
* @param segments
* target segments.
* @param offset
* value offset.
*/
public static void setBoolean(MemorySegment[] segments, int offset, boolean value) {
if (inFirstSegment(segments, offset, 1)) {
segments[0].putBoolean(offset, value);
} else {
s... | 3.26 |
flink_PartitionOperator_getCustomPartitioner_rdh | // --------------------------------------------------------------------------------------------
// Properties
// --------------------------------------------------------------------------------------------
/**
* Gets the custom partitioner from this partitioning.
*
* @return The custom partitioner.
*/
@Internalpubl... | 3.26 |
flink_PartitionOperator_translateToDataFlow_rdh | // --------------------------------------------------------------------------------------------
// Translation
// --------------------------------------------------------------------------------------------
protected SingleInputOperator<?, T, ?> translateToDataFlow(Operator<T> input) {
String name = "Partition at ... | 3.26 |
flink_PartitionOperator_withOrders_rdh | /**
* Sets the order of keys for range partitioning. NOTE: Only valid for {@link PartitionMethod#RANGE}.
*
* @param orders
* array of orders for each specified partition key
* @return The partitioneOperator with properly set orders for given keys
*/
@PublicEvolving
public PartitionOperator<T>
withOrders(Order..... | 3.26 |
flink_SkipListKeyComparator_compareNamespaceAndNode_rdh | /**
* Compares the namespace in the memory segment with the namespace in the node . Returns a
* negative integer, zero, or a positive integer as the first node is less than, equal to, or
* greater than the second.
*
* @param namespaceSegment
* memory segment to store the namespace.
* @param namespaceOffset
* ... | 3.26 |
flink_SkipListKeyComparator_compareTo_rdh | /**
* Compares for order. Returns a negative integer, zero, or a positive integer as the first node
* is less than, equal to, or greater than the second.
*
* @param left
* left skip list key's ByteBuffer
* @param leftOffset
* left skip list key's ByteBuffer's offset
* @param right
* right skip list key's... | 3.26 |
flink_RocksDBProperty_getConfigKey_rdh | /**
*
* @return key for enabling metric using {@link org.apache.flink.configuration.Configuration}.
*/
public String getConfigKey() {
return String.format(CONFIG_KEY_FORMAT, property);
} | 3.26 |
flink_RocksDBProperty_getRocksDBProperty_rdh | /**
*
* @return property string that can be used to query {@link RocksDB#getLongProperty(ColumnFamilyHandle, String)}.
*/
public String getRocksDBProperty() {
return String.format(ROCKS_DB_PROPERTY_FORMAT, property);
} | 3.26 |
flink_BinaryInputFormat_getCurrentState_rdh | // --------------------------------------------------------------------------------------------
// Checkpointing
// --------------------------------------------------------------------------------------------
@PublicEvolving
@Override
public Tuple2<Long, Long> getCurrentState() throws IOException {
if (this.blockBased... | 3.26 |
flink_BinaryInputFormat_createStatistics_rdh | /**
* Fill in the statistics. The last modification time and the total input size are prefilled.
*
* @param files
* The files that are associated with this block input format.
* @param stats
* The pre-filled statistics.
*/
protected SequentialStatistics createStatistics(List<FileStatus> files, FileBaseStatis... | 3.26 |
flink_ReusingBuildFirstReOpenableHashJoinIterator_reopenProbe_rdh | /**
* Set new input for probe side
*
* @throws IOException
*/
public void reopenProbe(MutableObjectIterator<V2> probeInput) throws IOException {
reopenHashTable.reopenProbe(probeInput);
} | 3.26 |
flink_TimeWindow_getEnd_rdh | /**
* Gets the end timestamp of this window. The end timestamp is exclusive, meaning it is the
* first timestamp that does not belong to this window any more.
*
* @return The exclusive end timestamp of this window.
*/
public long getEnd() {
return f0;
} | 3.26 |
flink_TimeWindow_snapshotConfiguration_rdh | // ------------------------------------------------------------------------
@Override
public TypeSerializerSnapshot<TimeWindow> snapshotConfiguration() {
return new TimeWindow.Serializer.TimeWindowSerializerSnapshot();
} | 3.26 |
flink_TimeWindow_modInverse_rdh | /**
* Compute the inverse of (odd) x mod 2^32.
*/
private int modInverse(int x) {
// Cube gives inverse mod 2^4, as x^4 == 1 (mod 2^4) for all odd x.
int v1 = (x * x) * x;
// Newton iteration doubles correct bits at each step.
v1 *= 2 - (x * v1);
v1 *= 2 - (x * v1);
v1 *= 2 - (x * v1);
r... | 3.26 |
flink_TimeWindow_intersects_rdh | /**
* Returns {@code true} if this window intersects the given window.
*/
public boolean intersects(TimeWindow other) {
return (this.start <= other.f0) && (this.f0 >= other.start);
} | 3.26 |
flink_TimeWindow_maxTimestamp_rdh | /**
* Gets the largest timestamp that still belongs to this window.
*
* <p>This timestamp is identical to {@code getEnd() - 1}.
*
* @return The largest timestamp that still belongs to this window.
* @see #getEnd()
*/
@Override
public long maxTimestamp() {
return f0 - 1;
} | 3.26 |
flink_TimeWindow_cover_rdh | /**
* Returns the minimal window covers both this window and the given window.
*/
public TimeWindow cover(TimeWindow other) {
return
new TimeWindow(Math.min(start, other.start), Math.max(f0, other.f0));
} | 3.26 |
flink_TimeWindow_m0_rdh | /**
* Gets the starting timestamp of the window. This is the first timestamp that belongs to this
* window.
*
* @return The starting timestamp of this window.
*/public long m0() {
return start;
} | 3.26 |
flink_OperatorTransformation_bootstrapWith_rdh | /**
* Create a new {@link OneInputStateTransformation} from a {@link DataStream}.
*
* @param stream
* A data stream of elements.
* @param <T>
* The type of the input.
* @return A {@link OneInputStateTransformation}.
*/
public static <T> OneInputStateTransformation<T> bootstrapWith(DataStream<T> stream) {
... | 3.26 |
flink_CumulativeWindowAssigner_of_rdh | // ------------------------------------------------------------------------
// Utilities
// ------------------------------------------------------------------------
/**
* Creates a new {@link CumulativeWindowAssigner} that assigns elements to cumulative time
* windows based on the element timestamp.
*
* @param maxS... | 3.26 |
flink_ReflectionUtil_getFullTemplateType_rdh | /**
* Extract the full type information from the given type.
*
* @param type
* to be analyzed
* @return Full type information describing the given type
*/
public static FullTypeInfo getFullTemplateType(Type type) {
if (type instanceof ParameterizedType) {
ParameterizedType parameterizedType = ((Paramet... | 3.26 |
flink_MessageParameters_resolveUrl_rdh | /**
* Resolves the given URL (e.g "jobs/:jobid") using the given path/query parameters.
*
* <p>This method will fail with an {@link IllegalStateException} if any mandatory parameter was
* not resolved.
*
* <p>Unresolved optional parameters will be ignored.
*
* @param genericUrl
* URL to resolve
* @param par... | 3.26 |
flink_MessageParameters_isResolved_rdh | /**
* Returns whether all mandatory parameters have been resolved.
*
* @return true, if all mandatory parameters have been resolved, false otherwise
*/
public final boolean isResolved() {
return getPathParameters().stream().filter(MessageParameter::isMandatory).allMatch(MessageParameter::isResolved) && getQue... | 3.26 |
flink_StreamTableEnvironment_create_rdh | /**
* Creates a table environment that is the entry point and central context for creating Table
* and SQL API programs that integrate with the Java-specific {@link DataStream} API.
*
* <p>It is unified for bounded and unbounded data processing.
*
* <p>A stream table environment is responsible for:
*
* <ul>
* ... | 3.26 |
flink_SqlNodeConverters_convertSqlNode_rdh | /**
* Convert the given validated SqlNode into Operation if there is a registered converter for the
* node.
*/
@SuppressWarnings({ "unchecked", "rawtypes" })
public static Optional<Operation> convertSqlNode(SqlNode validatedSqlNode, ConvertContext context) {
... | 3.26 |
flink_FileChannelMemoryMappedBoundedData_createWithRegionSize_rdh | /**
* Creates new FileChannelMemoryMappedBoundedData, creating a memory mapped file at the given
* path. Each mapped region (= ByteBuffer) will be of the given size.
*/
public static FileChannelMemoryMappedBoundedData createWithRegionSize(Path memMappedFilePath, int regionSize) throws IOException {
checkNotNul... | 3.26 |
flink_FileChannelMemoryMappedBoundedData_finishWrite_rdh | /**
* Finishes the current region and prevents further writes. After calling this method, further
* calls to {@link #writeBuffer(Buffer)} will fail.
*/
@Override
public void finishWrite() throws IOException {
m0();
fileChannel.close();
} | 3.26 |
flink_FileChannelMemoryMappedBoundedData_create_rdh | // ------------------------------------------------------------------------
// Factories
// ------------------------------------------------------------------------
/**
* Creates new FileChannelMemoryMappedBoundedData, creating a memory mapped file at the given
* path.
*/
public static FileChannelMemoryMappedBounded... | 3.26 |
flink_FileChannelMemoryMappedBoundedData_close_rdh | /**
* Closes the file and unmaps all memory mapped regions. After calling this method, access to
* any ByteBuffer obtained from this instance will cause a segmentation fault.
*/
public void close() throws IOException {
IOUtils.closeQuietly(fileChannel);
for (ByteBuffer bb : memoryMappedRegions) {
Pla... | 3.26 |
flink_SqlLikeUtils_sqlToRegexLike_rdh | /**
* Translates a SQL LIKE pattern to Java regex pattern.
*/
static String sqlToRegexLike(String sqlPattern, char escapeChar) {
int i;
final int v9 = sqlPattern.length();
final StringBuilder javaPattern = new StringBuilder(v9 + v9);
for (i = 0; i < v9; i++) {
char c = sqlPattern.charAt(i);
... | 3.26 |
flink_SqlLikeUtils_like_rdh | /**
* SQL {@code LIKE} function with escape.
*/
public static boolean like(String s, String pattern, String escape) {
final String regex = sqlToRegexLike(pattern, escape);
return Pattern.matches(regex, s);
} | 3.26 |
flink_SqlLikeUtils_m0_rdh | /**
* Translates a SQL SIMILAR pattern to Java regex pattern, with optional escape string.
*/public static String m0(String sqlPattern, CharSequence escapeStr) {
final char escapeChar;
if (escapeStr != null) {
if (escapeStr.length() != 1) {
throw invalidEscapeCharacter(escapeStr.toString(... | 3.26 |
flink_SqlLikeUtils_sqlToRegexSimilar_rdh | /**
* Translates SQL SIMILAR pattern to Java regex pattern.
*/
public static String sqlToRegexSimilar(String
sqlPattern, char escapeChar) {
similarEscapeRuleChecking(sqlPattern, escapeChar);
boolean insideCharacterEnumeration = false;
final StringBuilder javaPattern = new StringBuilder(sqlPattern.length(... | 3.26 |
flink_SqlLikeUtils_similar_rdh | /**
* SQL {@code SIMILAR} function with escape.
*/
public static boolean similar(String s, String pattern, String escape) {final String regex = m0(pattern, escape);
return Pattern.matches(regex, s);
} | 3.26 |
flink_SqlLikeUtils_ilike_rdh | /**
* SQL {@code ILIKE} function with escape.
*/
public static boolean ilike(String s, String patternStr, String escape) {
final String regex = sqlToRegexLike(patternStr, escape);
Pattern pattern = Pattern.compile(regex, Pattern.CASE_INSENSITIVE);
Matcher matcher = pattern.matcher(s);
return matcher.m... | 3.26 |
flink_FixedLengthRecordSorter_isEmpty_rdh | /**
* Checks whether the buffer is empty.
*
* @return True, if no record is contained, false otherwise.
*/
@Override
public boolean isEmpty() {
return this.numRecords ==
0;
} | 3.26 |
flink_FixedLengthRecordSorter_writeToOutput_rdh | /**
* Writes a subset of the records in this buffer in their logical order to the given output.
*
* @param output
* The output view to write the records to.
* @param start
* The logical start position of the subset.
* @param num
* The number of elements to write.
* @throws IOException
* Thrown, if an ... | 3.26 |
flink_FixedLengthRecordSorter_getRecord_rdh | // -------------------------------------------------------------------------
// Retrieving and Writing
// -------------------------------------------------------------------------
@Override
public T getRecord(int logicalPosition) throws IOException {
return getRecord(f0.createInstance(), logicalPosition);
} | 3.26 |
flink_FixedLengthRecordSorter_compare_rdh | // -------------------------------------------------------------------------
// Sorting
// -------------------------------------------------------------------------
@Override
public int compare(int
i, int j) {
final int segmentNumberI = i / this.recordsPerSegment;
final int segmentOffsetI =
(i % this.reco... | 3.26 |
flink_FixedLengthRecordSorter_getIterator_rdh | /**
* Gets an iterator over all records in this buffer in their logical order.
*
* @return An iterator returning the records in their logical order.
*/
@Override
public final MutableObjectIterator<T> getIterator() {
final SingleSegmentInputView startIn
= new SingleSegmentInputView(this.recordsPerSegment * t... | 3.26 |
flink_FixedLengthRecordSorter_write_rdh | /**
* Writes a given record to this sort buffer. The written record will be appended and take the
* last logical position.
*
* @param record
* The record to be written.
* @return True, if the record was successfully written, false, if the sort buffer was full.
* @throws IOException
* Thrown, if an error occ... | 3.26 |
flink_FixedLengthRecordSorter_memoryAvailable_rdh | // ------------------------------------------------------------------------
// Access Utilities
// ------------------------------------------------------------------------
private boolean memoryAvailable() {
return !this.fr... | 3.26 |
flink_SlotPoolService_castInto_rdh | /**
* Tries to cast this slot pool service into the given clazz.
*
* @param clazz
* to cast the slot pool service into
* @param <T>
* type of clazz
* @return {@link Optional#of} the target type if it can be cast; otherwise {@link Optional#empty()}
*/
default <T> Optional<T> castInto(Class<T> clazz) {
if... | 3.26 |
flink_SlotPoolService_notifyNotEnoughResourcesAvailable_rdh | /**
* Notifies that not enough resources are available to fulfill the resource requirements.
*
* @param acquiredResources
* the resources that have been acquired
*/
default void notifyNotEnoughResourcesAvailable(Collection<ResourceRequirement> acquiredResources) {
} | 3.26 |
flink_Trigger_canMerge_rdh | /**
* Returns true if this trigger supports merging of trigger state and can therefore be used with
* a {@link org.apache.flink.streaming.api.windowing.assigners.MergingWindowAssigner}.
*
* <p>If this returns {@code true} you must properly implement {@link #onMerge(Window,
* OnMergeContext)... | 3.26 |
flink_NonReusingKeyGroupedIterator_nextKey_rdh | /**
* Moves the iterator to the next key. This method may skip any values that have not yet been
* returned by the iterator created by the {@link #getValues()} method. Hence, if called
* multiple times it "removes" key groups.
*
* @return true, if the input iterator has an other group of records with the same key.... | 3.26 |
flink_JobMasterId_generate_rdh | /**
* Generates a new random JobMasterId.
*/
public static JobMasterId generate() {
return new
JobMasterId();
} | 3.26 |
flink_JobMasterId_toUUID_rdh | /**
* Creates a UUID with the bits from this JobMasterId.
*/
public UUID toUUID() {
return new UUID(getUpperPart(), getLowerPart());
} | 3.26 |
flink_JobMasterId_fromUuidOrNull_rdh | /**
* If the given uuid is null, this returns null, otherwise a JobMasterId that corresponds to the
* UUID, via {@link #JobMasterId(UUID)}.
*/
public static JobMasterId fromUuidOrNull(@Nullable
UUID uuid)
{
return uuid == null ? null : new JobMasterId(uuid);
} | 3.26 |
flink_TableConfigValidation_validateTimeZone_rdh | /**
* Validates user configured time zone.
*/
public static void validateTimeZone(String zone) {
boolean isValid;
try {
// We enforce a zone string that is compatible with both java.util.TimeZone and
// java.time.ZoneId to avoid bugs.
// In general, advertising either TZDB ID, GMT+xx:x... | 3.26 |
flink_ScopeFormats_fromConfig_rdh | // ------------------------------------------------------------------------
// Parsing from Config
// ------------------------------------------------------------------------
/**
* Creates the scope formats as defined in the given configuration.
*
* @param config
* The configuration that defines the formats
* @r... | 3.26 |
flink_ScopeFormats_getJobManagerFormat_rdh | // ------------------------------------------------------------------------
// Accessors
// ------------------------------------------------------------------------
public JobManagerScopeFormat getJobManagerFormat() {
return this.jobManagerFormat;
} | 3.26 |
flink_ZooKeeperStateHandleStore_m0_rdh | /**
* Releases the lock from the node under the given ZooKeeper path. If no lock exists, then
* nothing happens.
*
* @param pathInZooKeeper
* Path describing the ZooKeeper node
* @throws Exception
* if the delete operation of the lock node fails
*/
@Override
public void m0(String pathInZooKeeper) throws Exc... | 3.26 |
flink_ZooKeeperStateHandleStore_clearEntries_rdh | /**
* Recursively deletes all children.
*
* @throws Exception
* ZK errors
*/
@Override
public void clearEntries() throws Exception {
final String path = "/" + client.getNamespace();
LOG.info("Removing {} from ZooKeeper", path);
ZKPaths.deleteChildren(client.getZookeeperClient().getZooKeeper(), path, true);
... | 3.26 |
flink_ZooKeeperStateHandleStore_getAndLock_rdh | /**
* Gets the {@link RetrievableStateHandle} stored in the given ZooKeeper node and locks it. A
* locked node cannot be removed by another {@link ZooKeeperStateHandleStore} instance as long
* as this instance remains connected to ZooKeeper.
*
* @param pathInZooKeeper
* Path to the ZooKeeper node which contains... | 3.26 |
flink_ZooKeeperStateHandleStore_normalizePath_rdh | /**
* Makes sure that every path starts with a "/".
*
* @param path
* Path to normalize
* @return Normalized path such that it starts with a "/"
*/
private static String normalizePath(String path) {
if (path.startsWith("/"))
{
return path;
} else {
return '/' + path;
}
} | 3.26 |
flink_ZooKeeperStateHandleStore_setStateHandle_rdh | // this method is provided for the sole purpose of easier testing
@VisibleForTesting
protected void setStateHandle(String path, byte[] serializedStateHandle, int expectedVersion) throws Exception {
// Replace state handle in ZooKeeper. We use idempotent set here to avoid a scenario, where
// we retry an update... | 3.26 |
flink_ZooKeeperStateHandleStore_getRootLockPath_rdh | /**
* Returns the sub-path for lock nodes of the corresponding node (referred to through the passed
* {@code rooPath}. The returned sub-path collects the lock nodes for the {@code rootPath}'s
* node. The {@code rootPath} is marked for deletion if the sub-path for lock nodes is deleted.
*/
@VisibleForTesting
static ... | 3.26 |
flink_ZooKeeperStateHandleStore_addAndLock_rdh | /**
* Creates a state handle, stores it in ZooKeeper and locks it. A locked node cannot be removed
* by another {@link ZooKeeperStateHandleStore} instance as long as this instance remains
* connected to ZooKeeper.
*
* <p><strong>Important</strong>: This will <em>not</em> store the actual state in ZooKeeper,
* but... | 3.26 |
flink_ZooKeeperStateHandleStore_getAllAndLock_rdh | /**
* Gets all available state handles from ZooKeeper and locks the respective state nodes.
*
* <p>If there is a concurrent modification, the operation is retried until it succeeds.
*
* @return All state handles from ZooKeeper.
* @throws Exception
* If a ZooKeeper or state handle operation fails
*/
@Override
... | 3.26 |
flink_ZooKeeperStateHandleStore_get_rdh | // ---------------------------------------------------------------------------------------------------------
// Private methods
// ---------------------------------------------------------------------------------------------------------
/**
* Gets a state handle from ZooKeeper and optionally locks it.
*
* @param pat... | 3.26 |
flink_ZooKeeperStateHandleStore_releaseAll_rdh | /**
* Releases all lock nodes of this ZooKeeperStateHandleStore.
*
* @throws Exception
* if the delete operation of a lock file fails
*/
@Override
public void releaseAll() throws Exception {
Collection<String> children = getAllHandles();
Exception exception = null;
for (String child : children) {
... | 3.26 |
flink_ZooKeeperStateHandleStore_getInstanceLockPath_rdh | /**
* Returns the path for the lock node relative to the given path.
*
* @param rootPath
* Root path under which the lock node shall be created
* @return Path for the lock node
*/
@VisibleForTesting
String getInstanceLockPath(String rootPath) {
return (getRootLockPath(rootPath) + '/') + lockNode;
} | 3.26 |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.