code
stringlengths
25
201k
docstring
stringlengths
19
96.2k
func_name
stringlengths
0
235
language
stringclasses
1 value
repo
stringlengths
8
51
path
stringlengths
11
314
url
stringlengths
62
377
license
stringclasses
7 values
protected static int getMaxDepth(int x) { if (x <= 0) { throw new IllegalArgumentException("Undefined for " + x); } return (32 - Integer.numberOfLeadingZeros(x - 1)) << 2; }
Deepest recursion before giving up and doing a heapsort. Returns 2 * ceil(log(n)).
getMaxDepth
java
apache/flink
flink-runtime/src/main/java/org/apache/flink/runtime/operators/sort/QuickSort.java
https://github.com/apache/flink/blob/master/flink-runtime/src/main/java/org/apache/flink/runtime/operators/sort/QuickSort.java
Apache-2.0
private static void sortInternal( final IndexedSortable s, int recordsPerSegment, int recordSize, int maxOffset, int p, int pN, int pO, int r, int rN, int rO, int depth) { while (true) { if (r - p < 13) { // switch to insertion sort int i = p + 1, iN, iO; if (pO == maxOffset) { iN = pN + 1; iO = 0; } else { iN = pN; iO = pO + recordSize; } while (i < r) { int j = i, jN = iN, jO = iO; int jd = j - 1, jdN, jdO; if (jO == 0) { jdN = jN - 1; jdO = maxOffset; } else { jdN = jN; jdO = jO - recordSize; } while (j > p && s.compare(jdN, jdO, jN, jO) > 0) { s.swap(jN, jO, jdN, jdO); j = jd; jN = jdN; jO = jdO; jd--; if (jdO == 0) { jdN--; jdO = maxOffset; } else { jdO -= recordSize; } } i++; if (iO == maxOffset) { iN++; iO = 0; } else { iO += recordSize; } } return; } if (--depth < 0) { // switch to heap sort alt.sort(s, p, r); return; } int rdN, rdO; if (rO == 0) { rdN = rN - 1; rdO = maxOffset; } else { rdN = rN; rdO = rO - recordSize; } int m = (p + r) >>> 1, mN = m / recordsPerSegment, mO = (m % recordsPerSegment) * recordSize; // select, move pivot into first position fix(s, mN, mO, pN, pO); fix(s, mN, mO, rdN, rdO); fix(s, pN, pO, rdN, rdO); // Divide int i = p, iN = pN, iO = pO; int j = r, jN = rN, jO = rO; int ll = p, llN = pN, llO = pO; int rr = r, rrN = rN, rrO = rO; int cr; while (true) { i++; if (iO == maxOffset) { iN++; iO = 0; } else { iO += recordSize; } while (i < j) { if ((cr = s.compare(iN, iO, pN, pO)) > 0) { break; } if (0 == cr) { ll++; if (llO == maxOffset) { llN++; llO = 0; } else { llO += recordSize; } if (ll != i) { s.swap(llN, llO, iN, iO); } } i++; if (iO == maxOffset) { iN++; iO = 0; } else { iO += recordSize; } } j--; if (jO == 0) { jN--; jO = maxOffset; } else { jO -= recordSize; } while (j > i) { if ((cr = s.compare(pN, pO, jN, jO)) > 0) { break; } if (0 == cr) { rr--; if (rrO == 0) { rrN--; rrO = maxOffset; } else { rrO -= recordSize; } if (rr != j) { s.swap(rrN, rrO, jN, jO); } } j--; if (jO == 0) { jN--; jO = maxOffset; } else { jO -= recordSize; } } if (i < j) { s.swap(iN, iO, jN, jO); } else { break; } } j = i; jN = iN; jO = iO; // swap pivot- and all eq values- into position while (ll >= p) { i--; if (iO == 0) { iN--; iO = maxOffset; } else { iO -= recordSize; } s.swap(llN, llO, iN, iO); ll--; if (llO == 0) { llN--; llO = maxOffset; } else { llO -= recordSize; } } while (rr < r) { s.swap(rrN, rrO, jN, jO); rr++; if (rrO == maxOffset) { rrN++; rrO = 0; } else { rrO += recordSize; } j++; if (jO == maxOffset) { jN++; jO = 0; } else { jO += recordSize; } } // Conquer // Recurse on smaller interval first to keep stack shallow assert i != j; if (i - p < r - j) { sortInternal( s, recordsPerSegment, recordSize, maxOffset, p, pN, pO, i, iN, iO, depth); p = j; pN = jN; pO = jO; } else { sortInternal( s, recordsPerSegment, recordSize, maxOffset, j, jN, jO, r, rN, rO, depth); r = i; rN = iN; rO = iO; } } }
Sort the given range of items using quick sort. If the recursion depth falls below {@link #getMaxDepth}, then switch to {@link HeapSort}. @param s paged sortable @param recordsPerSegment number of records per memory segment @param recordSize number of bytes per record @param maxOffset offset of a last record in a memory segment @param p index of first record in range @param pN page number of first record in range @param pO page offset of first record in range @param r index of last-plus-one'th record in range @param rN page number of last-plus-one'th record in range @param rO page offset of last-plus-one'th record in range @param depth recursion depth @see #sort(IndexedSortable, int, int)
sortInternal
java
apache/flink
flink-runtime/src/main/java/org/apache/flink/runtime/operators/sort/QuickSort.java
https://github.com/apache/flink/blob/master/flink-runtime/src/main/java/org/apache/flink/runtime/operators/sort/QuickSort.java
Apache-2.0
@Override public void go() throws IOException, InterruptedException { final MutableObjectIterator<E> reader = this.reader; E current = reader.next(readTarget); while (isRunning() && (current != null)) { sorterGateway.writeRecord(current); current = reader.next(current); } sorterGateway.finishReading(); }
Creates a new reading thread. @param exceptionHandler The exception handler to call for all exceptions. @param reader The reader to pull the data from. @param dispatcher The queues used to pass buffers between the threads.
go
java
apache/flink
flink-runtime/src/main/java/org/apache/flink/runtime/operators/sort/ReadingThread.java
https://github.com/apache/flink/blob/master/flink-runtime/src/main/java/org/apache/flink/runtime/operators/sort/ReadingThread.java
Apache-2.0
public void writeRecord(E record) throws IOException, InterruptedException { if (currentBuffer == null) { this.currentBuffer = this.dispatcher.take(SortStage.READ); if (!currentBuffer.getBuffer().isEmpty()) { throw new IOException("New buffer is not empty."); } } InMemorySorter<E> sorter = currentBuffer.getBuffer(); long occupancyPreWrite = sorter.getOccupancy(); if (!sorter.write(record)) { long recordSize = sorter.getCapacity() - occupancyPreWrite; signalSpillingIfNecessary(recordSize); boolean isLarge = occupancyPreWrite == 0; if (isLarge) { // did not fit in a fresh buffer, must be large... writeLarge(record, sorter); this.currentBuffer.getBuffer().reset(); } else { this.dispatcher.send(SortStage.SORT, currentBuffer); this.currentBuffer = null; writeRecord(record); } } else { long recordSize = sorter.getOccupancy() - occupancyPreWrite; signalSpillingIfNecessary(recordSize); } }
Writes the given record for sorting.
writeRecord
java
apache/flink
flink-runtime/src/main/java/org/apache/flink/runtime/operators/sort/SorterInputGateway.java
https://github.com/apache/flink/blob/master/flink-runtime/src/main/java/org/apache/flink/runtime/operators/sort/SorterInputGateway.java
Apache-2.0
@Override public synchronized void close() { if (this.closed) { return; } this.closed = true; for (Iterator<FileIOChannel> channels = this.openChannels.iterator(); channels.hasNext(); ) { try { final FileIOChannel channel = channels.next(); channels.remove(); channel.closeAndDelete(); } catch (Throwable ignored) { } } for (Iterator<FileIOChannel.ID> channels = this.channelsToDeleteAtShutdown.iterator(); channels.hasNext(); ) { try { final FileIOChannel.ID channel = channels.next(); channels.remove(); final File f = new File(channel.getPath()); if (f.exists()) { f.delete(); } } catch (Throwable ignored) { } } }
Removes a channel reader/writer from the list of channels that are to be removed at shutdown. @param channel The channel reader/writer.
close
java
apache/flink
flink-runtime/src/main/java/org/apache/flink/runtime/operators/sort/SpillChannelManager.java
https://github.com/apache/flink/blob/master/flink-runtime/src/main/java/org/apache/flink/runtime/operators/sort/SpillChannelManager.java
Apache-2.0
private void disposeSortBuffers(boolean releaseMemory) { CircularElement<E> element; while ((element = this.dispatcher.poll(SortStage.READ)) != null) { element.getBuffer().dispose(); if (releaseMemory) { this.memManager.release(element.getMemory()); } } }
Releases the memory that is registered for in-memory sorted run generation.
disposeSortBuffers
java
apache/flink
flink-runtime/src/main/java/org/apache/flink/runtime/operators/sort/SpillingThread.java
https://github.com/apache/flink/blob/master/flink-runtime/src/main/java/org/apache/flink/runtime/operators/sort/SpillingThread.java
Apache-2.0
private MergeIterator<E> getMergingIterator( final List<ChannelWithBlockCount> channelIDs, final List<List<MemorySegment>> inputSegments, List<FileIOChannel> readerList, MutableObjectIterator<E> largeRecords) throws IOException { // create one iterator per channel id LOG.debug("Performing merge of {} sorted streams.", channelIDs.size()); final List<MutableObjectIterator<E>> iterators = new ArrayList<>(channelIDs.size() + 1); for (int i = 0; i < channelIDs.size(); i++) { final ChannelWithBlockCount channel = channelIDs.get(i); final List<MemorySegment> segsForChannel = inputSegments.get(i); // create a reader. if there are multiple segments for the reader, issue multiple // together per I/O request final BlockChannelReader<MemorySegment> reader = this.ioManager.createBlockChannelReader(channel.getChannel()); readerList.add(reader); spillChannelManager.registerOpenChannelToBeRemovedAtShutdown(reader); spillChannelManager.unregisterChannelToBeRemovedAtShutdown(channel.getChannel()); // wrap channel reader as a view, to get block spanning record deserialization final ChannelReaderInputView inView = new ChannelReaderInputView( reader, segsForChannel, channel.getBlockCount(), false); iterators.add(new ChannelReaderInputViewIterator<>(inView, null, this.serializer)); } if (largeRecords != null) { iterators.add(largeRecords); } return new MergeIterator<>(iterators, this.comparator); }
Returns an iterator that iterates over the merged result from all given channels. @param channelIDs The channels that are to be merged and returned. @param inputSegments The buffers to be used for reading. The list contains for each channel one list of input segments. The size of the <code>inputSegments</code> list must be equal to that of the <code>channelIDs</code> list. @return An iterator over the merged records of the input channels. @throws IOException Thrown, if the readers encounter an I/O problem.
getMergingIterator
java
apache/flink
flink-runtime/src/main/java/org/apache/flink/runtime/operators/sort/SpillingThread.java
https://github.com/apache/flink/blob/master/flink-runtime/src/main/java/org/apache/flink/runtime/operators/sort/SpillingThread.java
Apache-2.0
private List<ChannelWithBlockCount> mergeChannelList( final List<ChannelWithBlockCount> channelIDs, final List<MemorySegment> allReadBuffers, final List<MemorySegment> writeBuffers) throws IOException { // A channel list with length maxFanIn<sup>i</sup> can be merged to maxFanIn files in i-1 // rounds where every merge // is a full merge with maxFanIn input channels. A partial round includes merges with fewer // than maxFanIn // inputs. It is most efficient to perform the partial round first. final double scale = Math.ceil(Math.log(channelIDs.size()) / Math.log(this.maxFanIn)) - 1; final int numStart = channelIDs.size(); final int numEnd = (int) Math.pow(this.maxFanIn, scale); final int numMerges = (int) Math.ceil((numStart - numEnd) / (double) (this.maxFanIn - 1)); final int numNotMerged = numEnd - numMerges; final int numToMerge = numStart - numNotMerged; // unmerged channel IDs are copied directly to the result list final List<ChannelWithBlockCount> mergedChannelIDs = new ArrayList<>(numEnd); mergedChannelIDs.addAll(channelIDs.subList(0, numNotMerged)); final int channelsToMergePerStep = (int) Math.ceil(numToMerge / (double) numMerges); // allocate the memory for the merging step final List<List<MemorySegment>> readBuffers = new ArrayList<>(channelsToMergePerStep); getSegmentsForReaders(readBuffers, allReadBuffers, channelsToMergePerStep); final List<ChannelWithBlockCount> channelsToMergeThisStep = new ArrayList<>(channelsToMergePerStep); int channelNum = numNotMerged; while (isRunning() && channelNum < channelIDs.size()) { channelsToMergeThisStep.clear(); for (int i = 0; i < channelsToMergePerStep && channelNum < channelIDs.size(); i++, channelNum++) { channelsToMergeThisStep.add(channelIDs.get(channelNum)); } mergedChannelIDs.add(mergeChannels(channelsToMergeThisStep, readBuffers, writeBuffers)); } return mergedChannelIDs; }
Merges the given sorted runs to a smaller number of sorted runs. @param channelIDs The IDs of the sorted runs that need to be merged. @param allReadBuffers @param writeBuffers The buffers to be used by the writers. @return A list of the IDs of the merged channels. @throws IOException Thrown, if the readers or writers encountered an I/O problem.
mergeChannelList
java
apache/flink
flink-runtime/src/main/java/org/apache/flink/runtime/operators/sort/SpillingThread.java
https://github.com/apache/flink/blob/master/flink-runtime/src/main/java/org/apache/flink/runtime/operators/sort/SpillingThread.java
Apache-2.0
private ChannelWithBlockCount mergeChannels( List<ChannelWithBlockCount> channelIDs, List<List<MemorySegment>> readBuffers, List<MemorySegment> writeBuffers) throws IOException { // the list with the readers, to be closed at shutdown final List<FileIOChannel> channelAccesses = new ArrayList<>(channelIDs.size()); // the list with the target iterators final MergeIterator<E> mergeIterator = getMergingIterator(channelIDs, readBuffers, channelAccesses, null); // create a new channel writer final FileIOChannel.ID mergedChannelID = this.ioManager.createChannel(); spillChannelManager.registerChannelToBeRemovedAtShutdown(mergedChannelID); final BlockChannelWriter<MemorySegment> writer = this.ioManager.createBlockChannelWriter(mergedChannelID); spillChannelManager.registerOpenChannelToBeRemovedAtShutdown(writer); final ChannelWriterOutputView output = new ChannelWriterOutputView(writer, writeBuffers, this.memManager.getPageSize()); openSpillingBehaviour(); spillingBehaviour.mergeRecords(mergeIterator, output); output.close(); final int numBlocksWritten = output.getBlockCount(); // register merged result to be removed at shutdown spillChannelManager.unregisterOpenChannelToBeRemovedAtShutdown(writer); // remove the merged channel readers from the clear-at-shutdown list for (FileIOChannel access : channelAccesses) { access.closeAndDelete(); spillChannelManager.unregisterOpenChannelToBeRemovedAtShutdown(access); } return new ChannelWithBlockCount(mergedChannelID, numBlocksWritten); }
Merges the sorted runs described by the given Channel IDs into a single sorted run. The merging process uses the given read and write buffers. @param channelIDs The IDs of the runs' channels. @param readBuffers The buffers for the readers that read the sorted runs. @param writeBuffers The buffers for the writer that writes the merged channel. @return The ID and number of blocks of the channel that describes the merged run.
mergeChannels
java
apache/flink
flink-runtime/src/main/java/org/apache/flink/runtime/operators/sort/SpillingThread.java
https://github.com/apache/flink/blob/master/flink-runtime/src/main/java/org/apache/flink/runtime/operators/sort/SpillingThread.java
Apache-2.0
private void getSegmentsForReaders( List<List<MemorySegment>> target, List<MemorySegment> memory, int numChannels) { // determine the memory to use per channel and the number of buffers final int numBuffers = memory.size(); final int buffersPerChannelLowerBound = numBuffers / numChannels; final int numChannelsWithOneMore = numBuffers % numChannels; final Iterator<MemorySegment> segments = memory.iterator(); // collect memory for the channels that get one segment more for (int i = 0; i < numChannelsWithOneMore; i++) { final ArrayList<MemorySegment> segs = new ArrayList<>(buffersPerChannelLowerBound + 1); target.add(segs); for (int k = buffersPerChannelLowerBound; k >= 0; k--) { segs.add(segments.next()); } } // collect memory for the remaining channels for (int i = numChannelsWithOneMore; i < numChannels; i++) { final ArrayList<MemorySegment> segs = new ArrayList<>(buffersPerChannelLowerBound); target.add(segs); for (int k = buffersPerChannelLowerBound; k > 0; k--) { segs.add(segments.next()); } } }
Divides the given collection of memory buffers among {@code numChannels} sublists. @param target The list into which the lists with buffers for the channels are put. @param memory A list containing the memory buffers to be distributed. The buffers are not removed from this list. @param numChannels The number of channels for which to allocate buffers. Must not be zero.
getSegmentsForReaders
java
apache/flink
flink-runtime/src/main/java/org/apache/flink/runtime/operators/sort/SpillingThread.java
https://github.com/apache/flink/blob/master/flink-runtime/src/main/java/org/apache/flink/runtime/operators/sort/SpillingThread.java
Apache-2.0
@Override public void mapPartition(Iterable<IN> values, Collector<Tuple2<Integer, IN>> out) throws Exception { List<Object> broadcastVariable = getRuntimeContext().getBroadcastVariable("RangeBoundaries"); if (broadcastVariable == null || broadcastVariable.size() != 1) { throw new RuntimeException( "AssignRangePartition require a single RangeBoundaries as broadcast input."); } Object[][] boundaryObjects = (Object[][]) broadcastVariable.get(0); RangeBoundaries rangeBoundaries = new CommonRangeBoundaries(typeComparator.createComparator(), boundaryObjects); Tuple2<Integer, IN> tupleWithPartitionId = new Tuple2<>(); for (IN record : values) { tupleWithPartitionId.f0 = rangeBoundaries.getRangeIndex(record); tupleWithPartitionId.f1 = record; out.collect(tupleWithPartitionId); } }
This mapPartition function require a DataSet with RangeBoundaries as broadcast input, it generate Tuple2 which includes range index and record itself as output. @param <IN> The original data type.
mapPartition
java
apache/flink
flink-runtime/src/main/java/org/apache/flink/runtime/operators/udf/AssignRangeIndex.java
https://github.com/apache/flink/blob/master/flink-runtime/src/main/java/org/apache/flink/runtime/operators/udf/AssignRangeIndex.java
Apache-2.0
public void set(int index) { Preconditions.checkArgument(index < bitLength && index >= 0); int byteIndex = index >>> 3; byte current = memorySegment.get(offset + byteIndex); current |= (1 << (index & BYTE_INDEX_MASK)); memorySegment.put(offset + byteIndex, current); }
Sets the bit at specified index. @param index - position
set
java
apache/flink
flink-runtime/src/main/java/org/apache/flink/runtime/operators/util/BitSet.java
https://github.com/apache/flink/blob/master/flink-runtime/src/main/java/org/apache/flink/runtime/operators/util/BitSet.java
Apache-2.0
public boolean get(int index) { Preconditions.checkArgument(index < bitLength && index >= 0); int byteIndex = index >>> 3; byte current = memorySegment.get(offset + byteIndex); return (current & (1 << (index & BYTE_INDEX_MASK))) != 0; }
Returns true if the bit is set in the specified index. @param index - position @return - value at the bit position
get
java
apache/flink
flink-runtime/src/main/java/org/apache/flink/runtime/operators/util/BitSet.java
https://github.com/apache/flink/blob/master/flink-runtime/src/main/java/org/apache/flink/runtime/operators/util/BitSet.java
Apache-2.0
public byte[] toBytes() { checkState(!memorySegment.isOffHeap(), "Only support use heap memory for serialization"); return memorySegment.getArray(); }
Serializing {@link MemorySegment} to bytes, note that only heap memory is currently supported.
toBytes
java
apache/flink
flink-runtime/src/main/java/org/apache/flink/runtime/operators/util/BitSet.java
https://github.com/apache/flink/blob/master/flink-runtime/src/main/java/org/apache/flink/runtime/operators/util/BitSet.java
Apache-2.0
public void setBitsLocation(MemorySegment memorySegment, int offset) { this.bitSet.setMemorySegment(memorySegment, offset); }
A constructor to support rebuilding the BloomFilter from a serialized representation.
setBitsLocation
java
apache/flink
flink-runtime/src/main/java/org/apache/flink/runtime/operators/util/BloomFilter.java
https://github.com/apache/flink/blob/master/flink-runtime/src/main/java/org/apache/flink/runtime/operators/util/BloomFilter.java
Apache-2.0
public static int optimalNumOfBits(long inputEntries, double fpp) { int numBits = (int) (-inputEntries * Math.log(fpp) / (Math.log(2) * Math.log(2))); return numBits; }
Compute optimal bits number with given input entries and expected false positive probability. @param inputEntries @param fpp @return optimal bits number
optimalNumOfBits
java
apache/flink
flink-runtime/src/main/java/org/apache/flink/runtime/operators/util/BloomFilter.java
https://github.com/apache/flink/blob/master/flink-runtime/src/main/java/org/apache/flink/runtime/operators/util/BloomFilter.java
Apache-2.0
public static double estimateFalsePositiveProbability(long inputEntries, int bitSize) { int numFunction = optimalNumOfHashFunctions(inputEntries, bitSize); double p = Math.pow(Math.E, -(double) numFunction * inputEntries / bitSize); double estimatedFPP = Math.pow(1 - p, numFunction); return estimatedFPP; }
Compute the false positive probability based on given input entries and bits size. Note: this is just the math expected value, you should not expect the fpp in real case would under the return value for certain. @param inputEntries @param bitSize @return
estimateFalsePositiveProbability
java
apache/flink
flink-runtime/src/main/java/org/apache/flink/runtime/operators/util/BloomFilter.java
https://github.com/apache/flink/blob/master/flink-runtime/src/main/java/org/apache/flink/runtime/operators/util/BloomFilter.java
Apache-2.0
public static byte[] toBytes(BloomFilter filter) { byte[] data = filter.bitSet.toBytes(); int byteSize = data.length; byte[] bytes = new byte[8 + byteSize]; UNSAFE.putInt(bytes, BYTE_ARRAY_BASE_OFFSET, filter.numHashFunctions); UNSAFE.putInt(bytes, BYTE_ARRAY_BASE_OFFSET + 4, byteSize); UNSAFE.copyMemory( data, BYTE_ARRAY_BASE_OFFSET, bytes, BYTE_ARRAY_BASE_OFFSET + 8, byteSize); return bytes; }
Serializing to bytes, note that only heap memory is currently supported.
toBytes
java
apache/flink
flink-runtime/src/main/java/org/apache/flink/runtime/operators/util/BloomFilter.java
https://github.com/apache/flink/blob/master/flink-runtime/src/main/java/org/apache/flink/runtime/operators/util/BloomFilter.java
Apache-2.0
private static byte[] mergeSerializedBloomFilters( byte[] bf1Bytes, int bf1Start, int bf1Length, byte[] bf2Bytes, int bf2Start, int bf2Length) { if (bf1Length != bf2Length) { throw new IllegalArgumentException( String.format( "bf1Length %s does not match bf2Length %s when merging", bf1Length, bf2Length)); } // Validation on hash functions if (UNSAFE.getByte(bf1Bytes, BYTE_ARRAY_BASE_OFFSET + bf1Start) != UNSAFE.getByte(bf2Bytes, BYTE_ARRAY_BASE_OFFSET + bf2Start)) { throw new IllegalArgumentException( "bf1 numHashFunctions does not match bf2 when merging"); } for (int idx = 8 + BYTE_ARRAY_BASE_OFFSET; idx < bf1Length + BYTE_ARRAY_BASE_OFFSET; idx += 1) { byte l1 = UNSAFE.getByte(bf1Bytes, bf1Start + idx); byte l2 = UNSAFE.getByte(bf2Bytes, bf2Start + idx); UNSAFE.putByte(bf1Bytes, bf1Start + idx, (byte) (l1 | l2)); } return bf1Bytes; }
Merge the bf2 bytes to bf1. After merge completes, the contents of bf1 will be changed.
mergeSerializedBloomFilters
java
apache/flink
flink-runtime/src/main/java/org/apache/flink/runtime/operators/util/BloomFilter.java
https://github.com/apache/flink/blob/master/flink-runtime/src/main/java/org/apache/flink/runtime/operators/util/BloomFilter.java
Apache-2.0
public static IntegerResourceVersion valueOf(int value) { Preconditions.checkArgument(value >= 0); return new IntegerResourceVersion(value); }
Create a {@link IntegerResourceVersion} with given integer value. @param value resource version integer value. The value should not be negative. @return {@link IntegerResourceVersion} with given value.
valueOf
java
apache/flink
flink-runtime/src/main/java/org/apache/flink/runtime/persistence/IntegerResourceVersion.java
https://github.com/apache/flink/blob/master/flink-runtime/src/main/java/org/apache/flink/runtime/persistence/IntegerResourceVersion.java
Apache-2.0
public TypeSerializer<K> getKeySerializer() { return keySerializer; }
@return The serializer for the key the state is associated to.
getKeySerializer
java
apache/flink
flink-runtime/src/main/java/org/apache/flink/runtime/query/KvStateInfo.java
https://github.com/apache/flink/blob/master/flink-runtime/src/main/java/org/apache/flink/runtime/query/KvStateInfo.java
Apache-2.0
public TypeSerializer<N> getNamespaceSerializer() { return namespaceSerializer; }
@return The serializer for the namespace the state is associated to.
getNamespaceSerializer
java
apache/flink
flink-runtime/src/main/java/org/apache/flink/runtime/query/KvStateInfo.java
https://github.com/apache/flink/blob/master/flink-runtime/src/main/java/org/apache/flink/runtime/query/KvStateInfo.java
Apache-2.0
public TypeSerializer<V> getStateValueSerializer() { return stateValueSerializer; }
@return The serializer for the values kept in the state.
getStateValueSerializer
java
apache/flink
flink-runtime/src/main/java/org/apache/flink/runtime/query/KvStateInfo.java
https://github.com/apache/flink/blob/master/flink-runtime/src/main/java/org/apache/flink/runtime/query/KvStateInfo.java
Apache-2.0
public JobVertexID getJobVertexId() { return jobVertexId; }
Returns the JobVertexID the KvState instances belong to. @return JobVertexID the KvState instances belong to
getJobVertexId
java
apache/flink
flink-runtime/src/main/java/org/apache/flink/runtime/query/KvStateLocation.java
https://github.com/apache/flink/blob/master/flink-runtime/src/main/java/org/apache/flink/runtime/query/KvStateLocation.java
Apache-2.0
public int getNumKeyGroups() { return numKeyGroups; }
Returns the number of key groups of the operator the KvState instances belong to. @return Number of key groups of the operator the KvState instances belong to
getNumKeyGroups
java
apache/flink
flink-runtime/src/main/java/org/apache/flink/runtime/query/KvStateLocation.java
https://github.com/apache/flink/blob/master/flink-runtime/src/main/java/org/apache/flink/runtime/query/KvStateLocation.java
Apache-2.0
public String getRegistrationName() { return registrationName; }
Returns the name under which the KvState instances have been registered. @return Name under which the KvState instances have been registered.
getRegistrationName
java
apache/flink
flink-runtime/src/main/java/org/apache/flink/runtime/query/KvStateLocation.java
https://github.com/apache/flink/blob/master/flink-runtime/src/main/java/org/apache/flink/runtime/query/KvStateLocation.java
Apache-2.0
public int getNumRegisteredKeyGroups() { return numRegisteredKeyGroups; }
Returns the current number of registered key groups. @return Number of registered key groups.
getNumRegisteredKeyGroups
java
apache/flink
flink-runtime/src/main/java/org/apache/flink/runtime/query/KvStateLocation.java
https://github.com/apache/flink/blob/master/flink-runtime/src/main/java/org/apache/flink/runtime/query/KvStateLocation.java
Apache-2.0
public KvStateID getKvStateID(int keyGroupIndex) { if (keyGroupIndex < 0 || keyGroupIndex >= numKeyGroups) { throw new IndexOutOfBoundsException("Key group index"); } return kvStateIds[keyGroupIndex]; }
Returns the registered KvStateID for the key group index or <code>null</code> if none is registered yet. @param keyGroupIndex Key group index to get ID for. @return KvStateID for the key group index or <code>null</code> if none is registered yet @throws IndexOutOfBoundsException If key group index < 0 or >= Number of key groups
getKvStateID
java
apache/flink
flink-runtime/src/main/java/org/apache/flink/runtime/query/KvStateLocation.java
https://github.com/apache/flink/blob/master/flink-runtime/src/main/java/org/apache/flink/runtime/query/KvStateLocation.java
Apache-2.0
public InetSocketAddress getKvStateServerAddress(int keyGroupIndex) { if (keyGroupIndex < 0 || keyGroupIndex >= numKeyGroups) { throw new IndexOutOfBoundsException("Key group index"); } return kvStateAddresses[keyGroupIndex]; }
Returns the registered server address for the key group index or <code>null</code> if none is registered yet. @param keyGroupIndex Key group index to get server address for. @return the server address for the key group index or <code>null</code> if none is registered yet @throws IndexOutOfBoundsException If key group index < 0 or >= Number of key groups
getKvStateServerAddress
java
apache/flink
flink-runtime/src/main/java/org/apache/flink/runtime/query/KvStateLocation.java
https://github.com/apache/flink/blob/master/flink-runtime/src/main/java/org/apache/flink/runtime/query/KvStateLocation.java
Apache-2.0
public void registerKvState( KeyGroupRange keyGroupRange, KvStateID kvStateId, InetSocketAddress kvStateAddress) { if (keyGroupRange.getStartKeyGroup() < 0 || keyGroupRange.getEndKeyGroup() >= numKeyGroups) { throw new IndexOutOfBoundsException("Key group index"); } for (int kgIdx = keyGroupRange.getStartKeyGroup(); kgIdx <= keyGroupRange.getEndKeyGroup(); ++kgIdx) { if (kvStateIds[kgIdx] == null && kvStateAddresses[kgIdx] == null) { numRegisteredKeyGroups++; } kvStateIds[kgIdx] = kvStateId; kvStateAddresses[kgIdx] = kvStateAddress; } }
Registers a KvState instance for the given key group index. @param keyGroupRange Key group range to register @param kvStateId ID of the KvState instance at the key group index. @param kvStateAddress Server address of the KvState instance at the key group index. @throws IndexOutOfBoundsException If key group range start < 0 or key group range end >= Number of key groups
registerKvState
java
apache/flink
flink-runtime/src/main/java/org/apache/flink/runtime/query/KvStateLocation.java
https://github.com/apache/flink/blob/master/flink-runtime/src/main/java/org/apache/flink/runtime/query/KvStateLocation.java
Apache-2.0
public KvStateLocation getKvStateLocation(String registrationName) { return lookupTable.get(registrationName); }
Returns the {@link KvStateLocation} for the registered KvState instance or <code>null</code> if no location information is available. @param registrationName Name under which the KvState instance is registered. @return Location information or <code>null</code>.
getKvStateLocation
java
apache/flink
flink-runtime/src/main/java/org/apache/flink/runtime/query/KvStateLocationRegistry.java
https://github.com/apache/flink/blob/master/flink-runtime/src/main/java/org/apache/flink/runtime/query/KvStateLocationRegistry.java
Apache-2.0
public void notifyKvStateRegistered( JobVertexID jobVertexId, KeyGroupRange keyGroupRange, String registrationName, KvStateID kvStateId, InetSocketAddress kvStateServerAddress) { KvStateLocation location = lookupTable.get(registrationName); if (location == null) { // First registration for this operator, create the location info ExecutionJobVertex vertex = jobVertices.get(jobVertexId); if (vertex != null) { int parallelism = vertex.getMaxParallelism(); location = new KvStateLocation(jobId, jobVertexId, parallelism, registrationName); lookupTable.put(registrationName, location); } else { throw new IllegalArgumentException("Unknown JobVertexID " + jobVertexId); } } // Duplicated name if vertex IDs don't match if (!location.getJobVertexId().equals(jobVertexId)) { IllegalStateException duplicate = new IllegalStateException( "Registration name clash. KvState with name '" + registrationName + "' has already been registered by another operator (" + location.getJobVertexId() + ")."); ExecutionJobVertex vertex = jobVertices.get(jobVertexId); if (vertex != null) { vertex.fail(new SuppressRestartsException(duplicate)); } throw duplicate; } location.registerKvState(keyGroupRange, kvStateId, kvStateServerAddress); }
Notifies the registry about a registered KvState instance. @param jobVertexId JobVertexID the KvState instance belongs to @param keyGroupRange Key group range the KvState instance belongs to @param registrationName Name under which the KvState has been registered @param kvStateId ID of the registered KvState instance @param kvStateServerAddress Server address where to find the KvState instance @throws IllegalArgumentException If JobVertexID does not belong to job @throws IllegalArgumentException If state has been registered with same name by another operator. @throws IndexOutOfBoundsException If key group index is out of bounds.
notifyKvStateRegistered
java
apache/flink
flink-runtime/src/main/java/org/apache/flink/runtime/query/KvStateLocationRegistry.java
https://github.com/apache/flink/blob/master/flink-runtime/src/main/java/org/apache/flink/runtime/query/KvStateLocationRegistry.java
Apache-2.0
public void notifyKvStateUnregistered( JobVertexID jobVertexId, KeyGroupRange keyGroupRange, String registrationName) { KvStateLocation location = lookupTable.get(registrationName); if (location != null) { // Duplicate name if vertex IDs don't match if (!location.getJobVertexId().equals(jobVertexId)) { throw new IllegalArgumentException( "Another operator (" + location.getJobVertexId() + ") registered the KvState " + "under '" + registrationName + "'."); } location.unregisterKvState(keyGroupRange); if (location.getNumRegisteredKeyGroups() == 0) { lookupTable.remove(registrationName); } } else { throw new IllegalArgumentException( "Unknown registration name '" + registrationName + "'. " + "Probably registration/unregistration race."); } }
Notifies the registry about an unregistered KvState instance. @param jobVertexId JobVertexID the KvState instance belongs to @param keyGroupRange Key group index the KvState instance belongs to @param registrationName Name under which the KvState has been registered @throws IllegalArgumentException If another operator registered the state instance @throws IllegalArgumentException If the registration name is not known
notifyKvStateUnregistered
java
apache/flink
flink-runtime/src/main/java/org/apache/flink/runtime/query/KvStateLocationRegistry.java
https://github.com/apache/flink/blob/master/flink-runtime/src/main/java/org/apache/flink/runtime/query/KvStateLocationRegistry.java
Apache-2.0
public void registerListener(JobID jobId, KvStateRegistryListener listener) { final KvStateRegistryListener previousValue = listeners.putIfAbsent(jobId, listener); if (previousValue != null) { throw new IllegalStateException("Listener already registered under " + jobId + '.'); } }
Registers a listener with the registry. @param jobId identifying the job for which to register a {@link KvStateRegistryListener} @param listener The registry listener. @throws IllegalStateException If there is a registered listener
registerListener
java
apache/flink
flink-runtime/src/main/java/org/apache/flink/runtime/query/KvStateRegistry.java
https://github.com/apache/flink/blob/master/flink-runtime/src/main/java/org/apache/flink/runtime/query/KvStateRegistry.java
Apache-2.0
public KvStateID registerKvState( JobID jobId, JobVertexID jobVertexId, KeyGroupRange keyGroupRange, String registrationName, InternalKvState<?, ?, ?> kvState, ClassLoader userClassLoader) { KvStateID kvStateId = new KvStateID(); if (registeredKvStates.putIfAbsent(kvStateId, new KvStateEntry<>(kvState, userClassLoader)) == null) { final KvStateRegistryListener listener = getKvStateRegistryListener(jobId); if (listener != null) { listener.notifyKvStateRegistered( jobId, jobVertexId, keyGroupRange, registrationName, kvStateId); } return kvStateId; } else { throw new IllegalStateException( "State \"" + registrationName + " \"(id=" + kvStateId + ") appears registered although it should not."); } }
Registers the KvState instance and returns the assigned ID. @param jobId JobId the KvState instance belongs to @param jobVertexId JobVertexID the KvState instance belongs to @param keyGroupRange Key group range the KvState instance belongs to @param registrationName Name under which the KvState is registered @param kvState KvState instance to be registered @return Assigned KvStateID
registerKvState
java
apache/flink
flink-runtime/src/main/java/org/apache/flink/runtime/query/KvStateRegistry.java
https://github.com/apache/flink/blob/master/flink-runtime/src/main/java/org/apache/flink/runtime/query/KvStateRegistry.java
Apache-2.0
public void unregisterKvState( JobID jobId, JobVertexID jobVertexId, KeyGroupRange keyGroupRange, String registrationName, KvStateID kvStateId) { KvStateEntry<?, ?, ?> entry = registeredKvStates.remove(kvStateId); if (entry != null) { entry.clear(); final KvStateRegistryListener listener = getKvStateRegistryListener(jobId); if (listener != null) { listener.notifyKvStateUnregistered( jobId, jobVertexId, keyGroupRange, registrationName); } } }
Unregisters the KvState instance identified by the given KvStateID. @param jobId JobId the KvState instance belongs to @param kvStateId KvStateID to identify the KvState instance @param keyGroupRange Key group range the KvState instance belongs to
unregisterKvState
java
apache/flink
flink-runtime/src/main/java/org/apache/flink/runtime/query/KvStateRegistry.java
https://github.com/apache/flink/blob/master/flink-runtime/src/main/java/org/apache/flink/runtime/query/KvStateRegistry.java
Apache-2.0
public KvStateEntry<?, ?, ?> getKvState(KvStateID kvStateId) { return registeredKvStates.get(kvStateId); }
Returns the {@link KvStateEntry} containing the requested instance as identified by the given KvStateID, along with its {@link KvStateInfo} or <code>null</code> if none is registered. @param kvStateId KvStateID to identify the KvState instance @return The {@link KvStateEntry} instance identified by the KvStateID or <code>null</code> if there is none
getKvState
java
apache/flink
flink-runtime/src/main/java/org/apache/flink/runtime/query/KvStateRegistry.java
https://github.com/apache/flink/blob/master/flink-runtime/src/main/java/org/apache/flink/runtime/query/KvStateRegistry.java
Apache-2.0
public void registerKvState( KeyGroupRange keyGroupRange, String registrationName, InternalKvState<?, ?, ?> kvState, ClassLoader userClassLoader) { KvStateID kvStateId = registry.registerKvState( jobId, jobVertexId, keyGroupRange, registrationName, kvState, userClassLoader); registeredKvStates.add(new KvStateInfo(keyGroupRange, registrationName, kvStateId)); }
Registers the KvState instance at the KvStateRegistry. @param keyGroupRange Key group range the KvState instance belongs to @param registrationName The registration name (not necessarily the same as the KvState name defined in the state descriptor used to create the KvState instance) @param kvState The
registerKvState
java
apache/flink
flink-runtime/src/main/java/org/apache/flink/runtime/query/TaskKvStateRegistry.java
https://github.com/apache/flink/blob/master/flink-runtime/src/main/java/org/apache/flink/runtime/query/TaskKvStateRegistry.java
Apache-2.0
public void unregisterAll() { for (KvStateInfo kvState : registeredKvStates) { registry.unregisterKvState( jobId, jobVertexId, kvState.keyGroupRange, kvState.registrationName, kvState.kvStateId); } }
Unregisters all registered KvState instances from the KvStateRegistry.
unregisterAll
java
apache/flink
flink-runtime/src/main/java/org/apache/flink/runtime/query/TaskKvStateRegistry.java
https://github.com/apache/flink/blob/master/flink-runtime/src/main/java/org/apache/flink/runtime/query/TaskKvStateRegistry.java
Apache-2.0
public void start() { checkState(!closed, "The RPC connection is already closed"); checkState( !isConnected() && pendingRegistration == null, "The RPC connection is already started"); final RetryingRegistration<F, G, S, R> newRegistration = createNewRegistration(); if (REGISTRATION_UPDATER.compareAndSet(this, null, newRegistration)) { newRegistration.startRegistration(); } else { // concurrent start operation newRegistration.cancel(); } }
Flag indicating that the RPC connection is closed.
start
java
apache/flink
flink-runtime/src/main/java/org/apache/flink/runtime/registration/RegisteredRpcConnection.java
https://github.com/apache/flink/blob/master/flink-runtime/src/main/java/org/apache/flink/runtime/registration/RegisteredRpcConnection.java
Apache-2.0
public boolean tryReconnect() { checkState(isConnected(), "Cannot reconnect to an unknown destination."); if (closed) { return false; } else { final RetryingRegistration<F, G, S, R> currentPendingRegistration = pendingRegistration; if (currentPendingRegistration != null) { currentPendingRegistration.cancel(); } final RetryingRegistration<F, G, S, R> newRegistration = createNewRegistration(); if (REGISTRATION_UPDATER.compareAndSet( this, currentPendingRegistration, newRegistration)) { newRegistration.startRegistration(); } else { // concurrent modification newRegistration.cancel(); return false; } // double check for concurrent close operations if (closed) { newRegistration.cancel(); return false; } else { return true; } } }
Tries to reconnect to the {@link #targetAddress} by cancelling the pending registration and starting a new pending registration. @return {@code false} if the connection has been closed or a concurrent modification has happened; otherwise {@code true}
tryReconnect
java
apache/flink
flink-runtime/src/main/java/org/apache/flink/runtime/registration/RegisteredRpcConnection.java
https://github.com/apache/flink/blob/master/flink-runtime/src/main/java/org/apache/flink/runtime/registration/RegisteredRpcConnection.java
Apache-2.0
@Override public String toString() { return "Registration Successful"; }
Base class for a successful registration. Concrete registration implementations will typically extend this class to attach more information.
toString
java
apache/flink
flink-runtime/src/main/java/org/apache/flink/runtime/registration/RegistrationResponse.java
https://github.com/apache/flink/blob/master/flink-runtime/src/main/java/org/apache/flink/runtime/registration/RegistrationResponse.java
Apache-2.0
@Override public String toString() { return "Registration Rejected"; }
A rejected (declined) registration. <p>A rejection indicates a permanent problem which prevents the registration between the target and the caller which cannot be solved by retrying the connection. Consequently, the {@link RetryingRegistration} will stop when it receives a {@link Rejection} response from the target. Moreover, a target should respond with {@link Rejection} if it realizes that it cannot work with the caller.
toString
java
apache/flink
flink-runtime/src/main/java/org/apache/flink/runtime/registration/RegistrationResponse.java
https://github.com/apache/flink/blob/master/flink-runtime/src/main/java/org/apache/flink/runtime/registration/RegistrationResponse.java
Apache-2.0
public CompletableFuture<RetryingRegistrationResult<G, S, R>> getFuture() { return completionFuture; }
This utility class implements the basis of registering one component at another component, for example registering the TaskExecutor at the ResourceManager. This {@code RetryingRegistration} implements both the initial address resolution and the retries-with-backoff strategy. <p>The registration gives access to a future that is completed upon successful registration. The registration can be canceled, for example when the target where it tries to register at loses leader status. @param <F> The type of the fencing token @param <G> The type of the gateway to connect to. @param <S> The type of the successful registration responses. @param <R> The type of the registration rejection responses.
getFuture
java
apache/flink
flink-runtime/src/main/java/org/apache/flink/runtime/registration/RetryingRegistration.java
https://github.com/apache/flink/blob/master/flink-runtime/src/main/java/org/apache/flink/runtime/registration/RetryingRegistration.java
Apache-2.0
public boolean isCanceled() { return canceled; }
Checks if the registration was canceled. @return True if the registration was canceled, false otherwise.
isCanceled
java
apache/flink
flink-runtime/src/main/java/org/apache/flink/runtime/registration/RetryingRegistration.java
https://github.com/apache/flink/blob/master/flink-runtime/src/main/java/org/apache/flink/runtime/registration/RetryingRegistration.java
Apache-2.0
@SuppressWarnings("unchecked") public void startRegistration() { if (canceled) { // we already got canceled return; } try { // trigger resolution of the target address to a callable gateway final CompletableFuture<G> rpcGatewayFuture; if (FencedRpcGateway.class.isAssignableFrom(targetType)) { rpcGatewayFuture = (CompletableFuture<G>) rpcService.connect( targetAddress, fencingToken, targetType.asSubclass(FencedRpcGateway.class)); } else { rpcGatewayFuture = rpcService.connect(targetAddress, targetType); } // upon success, start the registration attempts CompletableFuture<Void> rpcGatewayAcceptFuture = rpcGatewayFuture.thenAcceptAsync( (G rpcGateway) -> { log.info("Resolved {} address, beginning registration", targetName); register( rpcGateway, 1, retryingRegistrationConfiguration .getInitialRegistrationTimeoutMillis()); }, rpcService.getScheduledExecutor()); // upon failure, retry, unless this is cancelled rpcGatewayAcceptFuture.whenCompleteAsync( (Void v, Throwable failure) -> { if (failure != null && !canceled) { final Throwable strippedFailure = ExceptionUtils.stripCompletionException(failure); if (log.isDebugEnabled()) { log.debug( "Could not resolve {} address {}, retrying in {} ms.", targetName, targetAddress, retryingRegistrationConfiguration.getErrorDelayMillis(), strippedFailure); } else { log.info( "Could not resolve {} address {}, retrying in {} ms: {}", targetName, targetAddress, retryingRegistrationConfiguration.getErrorDelayMillis(), strippedFailure.getMessage()); } startRegistrationLater( retryingRegistrationConfiguration.getErrorDelayMillis()); } }, rpcService.getScheduledExecutor()); } catch (Throwable t) { completionFuture.completeExceptionally(t); cancel(); } }
This method resolves the target address to a callable gateway and starts the registration after that.
startRegistration
java
apache/flink
flink-runtime/src/main/java/org/apache/flink/runtime/registration/RetryingRegistration.java
https://github.com/apache/flink/blob/master/flink-runtime/src/main/java/org/apache/flink/runtime/registration/RetryingRegistration.java
Apache-2.0
@Override public WorkerResourceSpec createDefaultWorkerResourceSpec(Configuration configuration) { return WorkerResourceSpec.ZERO; }
Implementation of {@link WorkerResourceSpecFactory} that creates arbitrary {@link WorkerResourceSpec}. Used for scenarios where the values in the default {@link WorkerResourceSpec} does not matter.
createDefaultWorkerResourceSpec
java
apache/flink
flink-runtime/src/main/java/org/apache/flink/runtime/resourcemanager/ArbitraryWorkerResourceSpecFactory.java
https://github.com/apache/flink/blob/master/flink-runtime/src/main/java/org/apache/flink/runtime/resourcemanager/ArbitraryWorkerResourceSpecFactory.java
Apache-2.0
private RegistrationResponse registerJobMasterInternal( final JobMasterGateway jobMasterGateway, JobID jobId, String jobManagerAddress, ResourceID jobManagerResourceId) { if (jobManagerRegistrations.containsKey(jobId)) { JobManagerRegistration oldJobManagerRegistration = jobManagerRegistrations.get(jobId); if (Objects.equals( oldJobManagerRegistration.getJobMasterId(), jobMasterGateway.getFencingToken())) { // same registration log.debug( "Job manager {}@{} was already registered.", jobMasterGateway.getFencingToken(), jobManagerAddress); } else { // tell old job manager that he is no longer the job leader closeJobManagerConnection( oldJobManagerRegistration.getJobID(), ResourceRequirementHandling.RETAIN, new Exception("New job leader for job " + jobId + " found.")); JobManagerRegistration jobManagerRegistration = new JobManagerRegistration(jobId, jobManagerResourceId, jobMasterGateway); jobManagerRegistrations.put(jobId, jobManagerRegistration); jmResourceIdRegistrations.put(jobManagerResourceId, jobManagerRegistration); blocklistHandler.registerBlocklistListener(jobMasterGateway); } } else { // new registration for the job JobManagerRegistration jobManagerRegistration = new JobManagerRegistration(jobId, jobManagerResourceId, jobMasterGateway); jobManagerRegistrations.put(jobId, jobManagerRegistration); jmResourceIdRegistrations.put(jobManagerResourceId, jobManagerRegistration); blocklistHandler.registerBlocklistListener(jobMasterGateway); } log.info( "Registered job manager {}@{} for job {}.", jobMasterGateway.getFencingToken(), jobManagerAddress, jobId); jobManagerHeartbeatManager.monitorTarget( jobManagerResourceId, new JobMasterHeartbeatSender(jobMasterGateway)); return new JobMasterRegistrationSuccess(getFencingToken(), resourceId); }
Registers a new JobMaster. @param jobMasterGateway to communicate with the registering JobMaster @param jobId of the job for which the JobMaster is responsible @param jobManagerAddress address of the JobMaster @param jobManagerResourceId ResourceID of the JobMaster @return RegistrationResponse
registerJobMasterInternal
java
apache/flink
flink-runtime/src/main/java/org/apache/flink/runtime/resourcemanager/ResourceManager.java
https://github.com/apache/flink/blob/master/flink-runtime/src/main/java/org/apache/flink/runtime/resourcemanager/ResourceManager.java
Apache-2.0
protected void closeJobManagerConnection( JobID jobId, ResourceRequirementHandling resourceRequirementHandling, Exception cause) { JobManagerRegistration jobManagerRegistration = jobManagerRegistrations.remove(jobId); if (jobManagerRegistration != null) { final ResourceID jobManagerResourceId = jobManagerRegistration.getJobManagerResourceID(); final JobMasterGateway jobMasterGateway = jobManagerRegistration.getJobManagerGateway(); final JobMasterId jobMasterId = jobManagerRegistration.getJobMasterId(); log.info( "Disconnect job manager {}@{} for job {} from the resource manager.", jobMasterId, jobMasterGateway.getAddress(), jobId); jobManagerHeartbeatManager.unmonitorTarget(jobManagerResourceId); jmResourceIdRegistrations.remove(jobManagerResourceId); blocklistHandler.deregisterBlocklistListener(jobMasterGateway); if (resourceRequirementHandling == ResourceRequirementHandling.CLEAR) { slotManager.clearResourceRequirements(jobId); } // tell the job manager about the disconnect jobMasterGateway.disconnectResourceManager(getFencingToken(), cause); } else { log.debug("There was no registered job manager for job {}.", jobId); } }
This method should be called by the framework once it detects that a currently registered job manager has failed. @param jobId identifying the job whose leader shall be disconnected. @param resourceRequirementHandling indicating how existing resource requirements for the corresponding job should be handled @param cause The exception which cause the JobManager failed.
closeJobManagerConnection
java
apache/flink
flink-runtime/src/main/java/org/apache/flink/runtime/resourcemanager/ResourceManager.java
https://github.com/apache/flink/blob/master/flink-runtime/src/main/java/org/apache/flink/runtime/resourcemanager/ResourceManager.java
Apache-2.0
public void stopWorkerIfSupported(WorkerType worker) { if (resourceAllocator.isSupported()) { resourceAllocator.cleaningUpDisconnectedResource(worker.getResourceID()); } }
Stops the given worker if supported. @param worker The worker.
stopWorkerIfSupported
java
apache/flink
flink-runtime/src/main/java/org/apache/flink/runtime/resourcemanager/ResourceManager.java
https://github.com/apache/flink/blob/master/flink-runtime/src/main/java/org/apache/flink/runtime/resourcemanager/ResourceManager.java
Apache-2.0
protected boolean supportMultiLeaderSession() { return true; }
This indicates whether the process should be terminated after losing leadership.
supportMultiLeaderSession
java
apache/flink
flink-runtime/src/main/java/org/apache/flink/runtime/resourcemanager/ResourceManagerFactory.java
https://github.com/apache/flink/blob/master/flink-runtime/src/main/java/org/apache/flink/runtime/resourcemanager/ResourceManagerFactory.java
Apache-2.0
public static ResourceManagerId fromUuid(UUID uuid) { return new ResourceManagerId(uuid); }
Creates a ResourceManagerId that corresponds to the given UUID.
fromUuid
java
apache/flink
flink-runtime/src/main/java/org/apache/flink/runtime/resourcemanager/ResourceManagerId.java
https://github.com/apache/flink/blob/master/flink-runtime/src/main/java/org/apache/flink/runtime/resourcemanager/ResourceManagerId.java
Apache-2.0
public static ResourceManagerId fromUuidOrNull(@Nullable UUID uuid) { return uuid == null ? null : new ResourceManagerId(uuid); }
If the given uuid is null, this returns null, otherwise a ResourceManagerId that corresponds to the UUID, via {@link #ResourceManagerId(UUID)}.
fromUuidOrNull
java
apache/flink
flink-runtime/src/main/java/org/apache/flink/runtime/resourcemanager/ResourceManagerId.java
https://github.com/apache/flink/blob/master/flink-runtime/src/main/java/org/apache/flink/runtime/resourcemanager/ResourceManagerId.java
Apache-2.0
protected WorkerResourceSpec workerResourceSpecFromConfigAndCpu( Configuration configuration, CPUResource cpuResource) { final TaskExecutorProcessSpec taskExecutorProcessSpec = TaskExecutorProcessUtils.newProcessSpecBuilder(configuration) .withCpuCores(cpuResource) .build(); return WorkerResourceSpec.fromTaskExecutorProcessSpec(taskExecutorProcessSpec); }
Factory for creating deployment specific default {@link WorkerResourceSpec}.
workerResourceSpecFromConfigAndCpu
java
apache/flink
flink-runtime/src/main/java/org/apache/flink/runtime/resourcemanager/WorkerResourceSpecFactory.java
https://github.com/apache/flink/blob/master/flink-runtime/src/main/java/org/apache/flink/runtime/resourcemanager/WorkerResourceSpecFactory.java
Apache-2.0
public static PrioritizedResourceMatchingStrategy leastUtilization() { return new PrioritizedResourceMatchingStrategy( Comparator.comparingDouble(i -> i.utilization)); }
Returns a {@link PrioritizedResourceMatchingStrategy} that prioritizes the resource with the least utilization, used to evenly distribute slots to workers. @return least utilization prioritized resource matching strategy.
leastUtilization
java
apache/flink
flink-runtime/src/main/java/org/apache/flink/runtime/resourcemanager/slotmanager/DefaultResourceAllocationStrategy.java
https://github.com/apache/flink/blob/master/flink-runtime/src/main/java/org/apache/flink/runtime/resourcemanager/slotmanager/DefaultResourceAllocationStrategy.java
Apache-2.0
@Override public void suspend() { if (!started) { return; } LOG.info("Suspending the slot manager."); slotManagerMetricGroup.close(); // stop the timeout checks for the TaskManagers if (clusterReconciliationCheck != null) { clusterReconciliationCheck.cancel(false); clusterReconciliationCheck = null; } slotStatusSyncer.close(); taskManagerTracker.clear(); resourceTracker.clear(); unfulfillableJobs.clear(); resourceManagerId = null; resourceAllocator = null; resourceEventListener = null; started = false; }
Suspends the component. This clears the internal state of the slot manager.
suspend
java
apache/flink
flink-runtime/src/main/java/org/apache/flink/runtime/resourcemanager/slotmanager/FineGrainedSlotManager.java
https://github.com/apache/flink/blob/master/flink-runtime/src/main/java/org/apache/flink/runtime/resourcemanager/slotmanager/FineGrainedSlotManager.java
Apache-2.0
@Override public boolean reportSlotStatus(InstanceID instanceId, SlotReport slotReport) { checkInit(); LOG.debug("Received slot report from instance {}: {}.", instanceId, slotReport); if (taskManagerTracker.getRegisteredTaskManager(instanceId).isPresent()) { if (!slotStatusSyncer.reportSlotStatus(instanceId, slotReport)) { checkResourceRequirementsWithDelay(); } return true; } else { LOG.debug( "Received slot report for unknown task manager with instance id {}. Ignoring this report.", instanceId); return false; } }
Reports the current slot allocations for a task manager identified by the given instance id. @param instanceId identifying the task manager for which to report the slot status @param slotReport containing the status for all of its slots @return true if the slot status has been updated successfully, otherwise false
reportSlotStatus
java
apache/flink
flink-runtime/src/main/java/org/apache/flink/runtime/resourcemanager/slotmanager/FineGrainedSlotManager.java
https://github.com/apache/flink/blob/master/flink-runtime/src/main/java/org/apache/flink/runtime/resourcemanager/slotmanager/FineGrainedSlotManager.java
Apache-2.0
@Override public void freeSlot(SlotID slotId, AllocationID allocationId) { checkInit(); LOG.debug("Freeing slot {}.", allocationId); if (taskManagerTracker.getAllocatedOrPendingSlot(allocationId).isPresent()) { slotStatusSyncer.freeSlot(allocationId); checkResourceRequirementsWithDelay(); } else { LOG.debug( "Trying to free a slot {} which has not been allocated. Ignoring this message.", allocationId); } }
Free the given slot from the given allocation. If the slot is still allocated by the given allocation id, then the slot will be freed. @param slotId identifying the slot to free, will be ignored @param allocationId with which the slot is presumably allocated
freeSlot
java
apache/flink
flink-runtime/src/main/java/org/apache/flink/runtime/resourcemanager/slotmanager/FineGrainedSlotManager.java
https://github.com/apache/flink/blob/master/flink-runtime/src/main/java/org/apache/flink/runtime/resourcemanager/slotmanager/FineGrainedSlotManager.java
Apache-2.0
private void checkResourceRequirementsWithDelay() { if (requirementsCheckDelay.toMillis() <= 0) { checkResourceRequirements(); } else { if (requirementsCheckFuture == null || requirementsCheckFuture.isDone()) { requirementsCheckFuture = new CompletableFuture<>(); scheduledExecutor.schedule( () -> mainThreadExecutor.execute( () -> { checkResourceRequirements(); Preconditions.checkNotNull(requirementsCheckFuture) .complete(null); }), requirementsCheckDelay.toMillis(), TimeUnit.MILLISECONDS); } } }
Depending on the implementation of {@link ResourceAllocationStrategy}, checking resource requirements and potentially making a re-allocation can be heavy. In order to cover more changes with each check, thus reduce the frequency of unnecessary re-allocations, the checks are performed with a slight delay.
checkResourceRequirementsWithDelay
java
apache/flink
flink-runtime/src/main/java/org/apache/flink/runtime/resourcemanager/slotmanager/FineGrainedSlotManager.java
https://github.com/apache/flink/blob/master/flink-runtime/src/main/java/org/apache/flink/runtime/resourcemanager/slotmanager/FineGrainedSlotManager.java
Apache-2.0
default boolean isMatchingRequirement(ResourceProfile required) { return getResourceProfile().isMatching(required); }
Returns true if the required {@link ResourceProfile} can be fulfilled by this slot. @param required resources @return true if the this slot can fulfill the resource requirements
isMatchingRequirement
java
apache/flink
flink-runtime/src/main/java/org/apache/flink/runtime/resourcemanager/slotmanager/TaskManagerSlotInformation.java
https://github.com/apache/flink/blob/master/flink-runtime/src/main/java/org/apache/flink/runtime/resourcemanager/slotmanager/TaskManagerSlotInformation.java
Apache-2.0
@Override public WebMonitorEndpoint<RestfulGateway> createRestEndpoint( Configuration configuration, LeaderGatewayRetriever<DispatcherGateway> dispatcherGatewayRetriever, LeaderGatewayRetriever<ResourceManagerGateway> resourceManagerGatewayRetriever, TransientBlobService transientBlobService, ScheduledExecutorService executor, MetricFetcher metricFetcher, LeaderElection leaderElection, FatalErrorHandler fatalErrorHandler) throws Exception { final RestHandlerConfiguration restHandlerConfiguration = RestHandlerConfiguration.fromConfiguration(configuration); return new MiniDispatcherRestEndpoint( dispatcherGatewayRetriever, configuration, restHandlerConfiguration, resourceManagerGatewayRetriever, transientBlobService, executor, metricFetcher, leaderElection, RestEndpointFactory.createExecutionGraphCache(restHandlerConfiguration), fatalErrorHandler); }
{@link RestEndpointFactory} which creates a {@link MiniDispatcherRestEndpoint}.
createRestEndpoint
java
apache/flink
flink-runtime/src/main/java/org/apache/flink/runtime/rest/ApplicationRestEndpointFactory.java
https://github.com/apache/flink/blob/master/flink-runtime/src/main/java/org/apache/flink/runtime/rest/ApplicationRestEndpointFactory.java
Apache-2.0
public HttpMethod getNettyHttpMethod() { return nettyHttpMethod; }
This class wraps netty's {@link HttpMethod}s into an enum, allowing us to use them in switches.
getNettyHttpMethod
java
apache/flink
flink-runtime/src/main/java/org/apache/flink/runtime/rest/HttpMethodWrapper.java
https://github.com/apache/flink/blob/master/flink-runtime/src/main/java/org/apache/flink/runtime/rest/HttpMethodWrapper.java
Apache-2.0
@Nullable public SSLHandlerFactory getSslHandlerFactory() { return sslHandlerFactory; }
Returns the {@link SSLEngine} that the REST client endpoint should use. @return SSLEngine that the REST client endpoint should use, or null if SSL was disabled
getSslHandlerFactory
java
apache/flink
flink-runtime/src/main/java/org/apache/flink/runtime/rest/RestClientConfiguration.java
https://github.com/apache/flink/blob/master/flink-runtime/src/main/java/org/apache/flink/runtime/rest/RestClientConfiguration.java
Apache-2.0
public int getMaxContentLength() { return maxContentLength; }
Returns the max content length that the REST client endpoint could handle. @return max content length that the REST client endpoint could handle
getMaxContentLength
java
apache/flink
flink-runtime/src/main/java/org/apache/flink/runtime/rest/RestClientConfiguration.java
https://github.com/apache/flink/blob/master/flink-runtime/src/main/java/org/apache/flink/runtime/rest/RestClientConfiguration.java
Apache-2.0
public static RestClientConfiguration fromConfiguration(Configuration config) throws ConfigurationException { Preconditions.checkNotNull(config); final SSLHandlerFactory sslHandlerFactory; if (SecurityOptions.isRestSSLEnabled(config)) { try { sslHandlerFactory = SSLUtils.createRestClientSSLEngineFactory(config); } catch (Exception e) { throw new ConfigurationException( "Failed to initialize SSLContext for the REST client", e); } } else { sslHandlerFactory = null; } final long connectionTimeout = config.get(RestOptions.CONNECTION_TIMEOUT).toMillis(); final long idlenessTimeout = config.get(RestOptions.IDLENESS_TIMEOUT).toMillis(); int maxContentLength = config.get(RestOptions.CLIENT_MAX_CONTENT_LENGTH); return new RestClientConfiguration( sslHandlerFactory, connectionTimeout, idlenessTimeout, maxContentLength); }
Creates and returns a new {@link RestClientConfiguration} from the given {@link Configuration}. @param config configuration from which the REST client endpoint configuration should be created from @return REST client endpoint configuration @throws ConfigurationException if SSL was configured incorrectly
fromConfiguration
java
apache/flink
flink-runtime/src/main/java/org/apache/flink/runtime/rest/RestClientConfiguration.java
https://github.com/apache/flink/blob/master/flink-runtime/src/main/java/org/apache/flink/runtime/rest/RestClientConfiguration.java
Apache-2.0
static ExecutionGraphCache createExecutionGraphCache( RestHandlerConfiguration restConfiguration) { return new DefaultExecutionGraphCache( restConfiguration.getTimeout(), Duration.ofMillis(restConfiguration.getRefreshInterval())); }
{@link WebMonitorEndpoint} factory. @param <T> type of the {@link RestfulGateway}
createExecutionGraphCache
java
apache/flink
flink-runtime/src/main/java/org/apache/flink/runtime/rest/RestEndpointFactory.java
https://github.com/apache/flink/blob/master/flink-runtime/src/main/java/org/apache/flink/runtime/rest/RestEndpointFactory.java
Apache-2.0
@VisibleForTesting List<InboundChannelHandlerFactory> getInboundChannelHandlerFactories() { return inboundChannelHandlerFactories; }
An abstract class for netty-based REST server endpoints.
getInboundChannelHandlerFactories
java
apache/flink
flink-runtime/src/main/java/org/apache/flink/runtime/rest/RestServerEndpoint.java
https://github.com/apache/flink/blob/master/flink-runtime/src/main/java/org/apache/flink/runtime/rest/RestServerEndpoint.java
Apache-2.0
@Nullable public InetSocketAddress getServerAddress() { synchronized (lock) { assertRestServerHasBeenStarted(); Channel server = this.serverChannel; if (server != null) { try { return ((InetSocketAddress) server.localAddress()); } catch (Exception e) { log.error("Cannot access local server address", e); } } return null; } }
Returns the address on which this endpoint is accepting requests. @return address on which this endpoint is accepting requests or null if none
getServerAddress
java
apache/flink
flink-runtime/src/main/java/org/apache/flink/runtime/rest/RestServerEndpoint.java
https://github.com/apache/flink/blob/master/flink-runtime/src/main/java/org/apache/flink/runtime/rest/RestServerEndpoint.java
Apache-2.0
static void createUploadDir( final Path uploadDir, final Logger log, final boolean initialCreation) throws IOException { if (!Files.exists(uploadDir)) { if (initialCreation) { log.info("Upload directory {} does not exist. ", uploadDir); } else { log.warn( "Upload directory {} has been deleted externally. " + "Previously uploaded files are no longer available.", uploadDir); } checkAndCreateUploadDir(uploadDir, log); } }
Creates the upload dir if needed.
createUploadDir
java
apache/flink
flink-runtime/src/main/java/org/apache/flink/runtime/rest/RestServerEndpoint.java
https://github.com/apache/flink/blob/master/flink-runtime/src/main/java/org/apache/flink/runtime/rest/RestServerEndpoint.java
Apache-2.0
@Override public int compare( Tuple2<RestHandlerSpecification, ChannelInboundHandler> o1, Tuple2<RestHandlerSpecification, ChannelInboundHandler> o2) { final int urlComparisonResult = CASE_INSENSITIVE_ORDER.compare( o1.f0.getTargetRestEndpointURL(), o2.f0.getTargetRestEndpointURL()); if (urlComparisonResult != 0) { return urlComparisonResult; } else { Collection<? extends RestAPIVersion> o1APIVersions = o1.f0.getSupportedAPIVersions(); RestAPIVersion o1Version = Collections.min(o1APIVersions); Collection<? extends RestAPIVersion> o2APIVersions = o2.f0.getSupportedAPIVersions(); RestAPIVersion o2Version = Collections.min(o2APIVersions); return o1Version.compareTo(o2Version); } }
Comparator for Rest URLs. <p>The comparator orders the Rest URLs such that URLs with path parameters are ordered behind those without parameters. E.g.: /jobs /jobs/overview /jobs/:jobid /jobs/:jobid/config /:* <p>IMPORTANT: This comparator is highly specific to how Netty path parameters are encoded. Namely with a preceding ':' character.
compare
java
apache/flink
flink-runtime/src/main/java/org/apache/flink/runtime/rest/RestServerEndpoint.java
https://github.com/apache/flink/blob/master/flink-runtime/src/main/java/org/apache/flink/runtime/rest/RestServerEndpoint.java
Apache-2.0
@Override public int compare(String s1, String s2) { int n1 = s1.length(); int n2 = s2.length(); int min = Math.min(n1, n2); for (int i = 0; i < min; i++) { char c1 = s1.charAt(i); char c2 = s2.charAt(i); if (c1 != c2) { c1 = Character.toUpperCase(c1); c2 = Character.toUpperCase(c2); if (c1 != c2) { c1 = Character.toLowerCase(c1); c2 = Character.toLowerCase(c2); if (c1 != c2) { if (c1 == ':') { // c2 is less than c1 because it is also different return 1; } else if (c2 == ':') { // c1 is less than c2 return -1; } else { return c1 - c2; } } } } } return n1 - n2; }
Comparator for Rest URLs. <p>The comparator orders the Rest URLs such that URLs with path parameters are ordered behind those without parameters. E.g.: /jobs /jobs/overview /jobs/:jobid /jobs/:jobid/config /:* <p>IMPORTANT: This comparator is highly specific to how Netty path parameters are encoded. Namely with a preceding ':' character.
compare
java
apache/flink
flink-runtime/src/main/java/org/apache/flink/runtime/rest/RestServerEndpoint.java
https://github.com/apache/flink/blob/master/flink-runtime/src/main/java/org/apache/flink/runtime/rest/RestServerEndpoint.java
Apache-2.0
public String getRestBindAddress() { return restBindAddress; }
Returns the address that the REST server endpoint should bind itself to. @return address that the REST server endpoint should bind itself to
getRestBindAddress
java
apache/flink
flink-runtime/src/main/java/org/apache/flink/runtime/rest/RestServerEndpointConfiguration.java
https://github.com/apache/flink/blob/master/flink-runtime/src/main/java/org/apache/flink/runtime/rest/RestServerEndpointConfiguration.java
Apache-2.0
public String getRestBindPortRange() { return restBindPortRange; }
Returns the port range that the REST server endpoint should listen on. @return port range that the REST server endpoint should listen on
getRestBindPortRange
java
apache/flink
flink-runtime/src/main/java/org/apache/flink/runtime/rest/RestServerEndpointConfiguration.java
https://github.com/apache/flink/blob/master/flink-runtime/src/main/java/org/apache/flink/runtime/rest/RestServerEndpointConfiguration.java
Apache-2.0
public Path getUploadDir() { return uploadDir; }
Returns the directory used to temporarily store multipart/form-data uploads.
getUploadDir
java
apache/flink
flink-runtime/src/main/java/org/apache/flink/runtime/rest/RestServerEndpointConfiguration.java
https://github.com/apache/flink/blob/master/flink-runtime/src/main/java/org/apache/flink/runtime/rest/RestServerEndpointConfiguration.java
Apache-2.0
public Map<String, String> getResponseHeaders() { return responseHeaders; }
Response headers that should be added to every HTTP response.
getResponseHeaders
java
apache/flink
flink-runtime/src/main/java/org/apache/flink/runtime/rest/RestServerEndpointConfiguration.java
https://github.com/apache/flink/blob/master/flink-runtime/src/main/java/org/apache/flink/runtime/rest/RestServerEndpointConfiguration.java
Apache-2.0
public static RestServerEndpointConfiguration fromConfiguration(Configuration config) throws ConfigurationException { Preconditions.checkNotNull(config); final String restAddress = Preconditions.checkNotNull( config.get(RestOptions.ADDRESS), "%s must be set", RestOptions.ADDRESS.key()); final String restBindAddress = config.get(RestOptions.BIND_ADDRESS); final String portRangeDefinition = config.get(RestOptions.BIND_PORT); final SSLHandlerFactory sslHandlerFactory; if (SecurityOptions.isRestSSLEnabled(config)) { try { sslHandlerFactory = SSLUtils.createRestServerSSLEngineFactory(config); } catch (Exception e) { throw new ConfigurationException( "Failed to initialize SSLEngineFactory for REST server endpoint.", e); } } else { sslHandlerFactory = null; } final Path uploadDir = Paths.get( config.get(WebOptions.UPLOAD_DIR, config.get(WebOptions.TMP_DIR)), "flink-web-upload"); final int maxContentLength = config.get(RestOptions.SERVER_MAX_CONTENT_LENGTH); final Map<String, String> responseHeaders = Collections.singletonMap( HttpHeaderNames.ACCESS_CONTROL_ALLOW_ORIGIN.toString(), config.get(WebOptions.ACCESS_CONTROL_ALLOW_ORIGIN)); return new RestServerEndpointConfiguration( restAddress, restBindAddress, portRangeDefinition, sslHandlerFactory, uploadDir, maxContentLength, responseHeaders); }
Creates and returns a new {@link RestServerEndpointConfiguration} from the given {@link Configuration}. @param config configuration from which the REST server endpoint configuration should be created from @return REST server endpoint configuration @throws ConfigurationException if SSL was configured incorrectly
fromConfiguration
java
apache/flink
flink-runtime/src/main/java/org/apache/flink/runtime/rest/RestServerEndpointConfiguration.java
https://github.com/apache/flink/blob/master/flink-runtime/src/main/java/org/apache/flink/runtime/rest/RestServerEndpointConfiguration.java
Apache-2.0
@Override public WebMonitorEndpoint<DispatcherGateway> createRestEndpoint( Configuration configuration, LeaderGatewayRetriever<DispatcherGateway> dispatcherGatewayRetriever, LeaderGatewayRetriever<ResourceManagerGateway> resourceManagerGatewayRetriever, TransientBlobService transientBlobService, ScheduledExecutorService executor, MetricFetcher metricFetcher, LeaderElection leaderElection, FatalErrorHandler fatalErrorHandler) throws Exception { final RestHandlerConfiguration restHandlerConfiguration = RestHandlerConfiguration.fromConfiguration(configuration); return new DispatcherRestEndpoint( dispatcherGatewayRetriever, configuration, restHandlerConfiguration, resourceManagerGatewayRetriever, transientBlobService, executor, metricFetcher, leaderElection, RestEndpointFactory.createExecutionGraphCache(restHandlerConfiguration), fatalErrorHandler); }
{@link RestEndpointFactory} which creates a {@link DispatcherRestEndpoint}.
createRestEndpoint
java
apache/flink
flink-runtime/src/main/java/org/apache/flink/runtime/rest/SessionRestEndpointFactory.java
https://github.com/apache/flink/blob/master/flink-runtime/src/main/java/org/apache/flink/runtime/rest/SessionRestEndpointFactory.java
Apache-2.0
public R getRequestBody() { return requestBody; }
Returns the request body. @return request body
getRequestBody
java
apache/flink
flink-runtime/src/main/java/org/apache/flink/runtime/rest/handler/HandlerRequest.java
https://github.com/apache/flink/blob/master/flink-runtime/src/main/java/org/apache/flink/runtime/rest/handler/HandlerRequest.java
Apache-2.0
public <X, PP extends MessagePathParameter<X>> X getPathParameter(Class<PP> parameterClass) { @SuppressWarnings("unchecked") PP pathParameter = (PP) pathParameters.get(parameterClass); Preconditions.checkState( pathParameter != null, "No parameter could be found for the given class."); return pathParameter.getValue(); }
Returns the value of the {@link MessagePathParameter} for the given class. @param parameterClass class of the parameter @param <X> the value type that the parameter contains @param <PP> type of the path parameter @return path parameter value for the given class @throws IllegalStateException if no value is defined for the given parameter class
getPathParameter
java
apache/flink
flink-runtime/src/main/java/org/apache/flink/runtime/rest/handler/HandlerRequest.java
https://github.com/apache/flink/blob/master/flink-runtime/src/main/java/org/apache/flink/runtime/rest/handler/HandlerRequest.java
Apache-2.0
@VisibleForTesting public static <R extends RequestBody, M extends MessageParameters> HandlerRequest<R> create( R requestBody, M messageParameters) { return create(requestBody, messageParameters, Collections.emptyList()); }
Short-cut for {@link #create(RequestBody, MessageParameters, Collection)} without any uploaded files.
create
java
apache/flink
flink-runtime/src/main/java/org/apache/flink/runtime/rest/handler/HandlerRequest.java
https://github.com/apache/flink/blob/master/flink-runtime/src/main/java/org/apache/flink/runtime/rest/handler/HandlerRequest.java
Apache-2.0
@VisibleForTesting public static <R extends RequestBody, M extends MessageParameters> HandlerRequest<R> create( R requestBody, M messageParameters, Collection<File> uploadedFiles) { return new HandlerRequest<R>( requestBody, mapParameters(messageParameters.getPathParameters()), mapParameters(messageParameters.getQueryParameters()), uploadedFiles); }
Creates a new {@link HandlerRequest}. The given {@link MessageParameters} are expected to be resolved.
create
java
apache/flink
flink-runtime/src/main/java/org/apache/flink/runtime/rest/handler/HandlerRequest.java
https://github.com/apache/flink/blob/master/flink-runtime/src/main/java/org/apache/flink/runtime/rest/handler/HandlerRequest.java
Apache-2.0
@Override protected boolean onAdvance(final int phase, final int registeredParties) { terminationFuture.complete(null); return true; }
Tracks in-flight client requests. @see AbstractHandler
onAdvance
java
apache/flink
flink-runtime/src/main/java/org/apache/flink/runtime/rest/handler/InFlightRequestTracker.java
https://github.com/apache/flink/blob/master/flink-runtime/src/main/java/org/apache/flink/runtime/rest/handler/InFlightRequestTracker.java
Apache-2.0
public boolean registerRequest() { return phaser.register() >= 0; }
Registers an in-flight request. @return {@code true} if the request could be registered; {@code false} if the tracker has already been terminated.
registerRequest
java
apache/flink
flink-runtime/src/main/java/org/apache/flink/runtime/rest/handler/InFlightRequestTracker.java
https://github.com/apache/flink/blob/master/flink-runtime/src/main/java/org/apache/flink/runtime/rest/handler/InFlightRequestTracker.java
Apache-2.0
public CompletableFuture<Void> awaitAsync() { phaser.arriveAndDeregister(); return terminationFuture; }
Returns a future that completes when the in-flight requests that were registered prior to calling this method are deregistered.
awaitAsync
java
apache/flink
flink-runtime/src/main/java/org/apache/flink/runtime/rest/handler/InFlightRequestTracker.java
https://github.com/apache/flink/blob/master/flink-runtime/src/main/java/org/apache/flink/runtime/rest/handler/InFlightRequestTracker.java
Apache-2.0
@Override public CompletableFuture<TriggerResponse> handleRequest( @Nonnull HandlerRequest<B> request, @Nonnull T gateway) throws RestHandlerException { final CompletableFuture<R> resultFuture = triggerOperation(request, gateway); final K operationKey = createOperationKey(request); completedOperationCache.registerOngoingOperation(operationKey, resultFuture); return CompletableFuture.completedFuture( new TriggerResponse(operationKey.getTriggerId())); }
Handler which is responsible for triggering an asynchronous operation. After the operation has been triggered, it stores the result future in the {@link #completedOperationCache}. @param <T> type of the gateway @param <B> type of the request @param <M> type of the message parameters
handleRequest
java
apache/flink
flink-runtime/src/main/java/org/apache/flink/runtime/rest/handler/async/AbstractAsynchronousOperationHandlers.java
https://github.com/apache/flink/blob/master/flink-runtime/src/main/java/org/apache/flink/runtime/rest/handler/async/AbstractAsynchronousOperationHandlers.java
Apache-2.0
public boolean containsOperation(final K operationKey) { return registeredOperationTriggers.containsKey(operationKey) || completedOperations.getIfPresent(operationKey) != null; }
Returns whether this cache contains an operation under the given operation key.
containsOperation
java
apache/flink
flink-runtime/src/main/java/org/apache/flink/runtime/rest/handler/async/CompletedOperationCache.java
https://github.com/apache/flink/blob/master/flink-runtime/src/main/java/org/apache/flink/runtime/rest/handler/async/CompletedOperationCache.java
Apache-2.0
private static <R extends Serializable> ResultAccessTracker<R> inProgress() { return new ResultAccessTracker<>(); }
Future that completes if {@link #operationResult} is accessed after it finished.
inProgress
java
apache/flink
flink-runtime/src/main/java/org/apache/flink/runtime/rest/handler/async/CompletedOperationCache.java
https://github.com/apache/flink/blob/master/flink-runtime/src/main/java/org/apache/flink/runtime/rest/handler/async/CompletedOperationCache.java
Apache-2.0
public ResultAccessTracker<R> finishOperation(final OperationResult<R> operationResult) { checkState(!this.operationResult.isFinished()); return new ResultAccessTracker<>(checkNotNull(operationResult), this.accessed); }
Creates a new instance of the tracker with the result of the asynchronous operation set.
finishOperation
java
apache/flink
flink-runtime/src/main/java/org/apache/flink/runtime/rest/handler/async/CompletedOperationCache.java
https://github.com/apache/flink/blob/master/flink-runtime/src/main/java/org/apache/flink/runtime/rest/handler/async/CompletedOperationCache.java
Apache-2.0
public TriggerId getTriggerId() { return triggerId; }
Response to the triggering of an asynchronous operation.
getTriggerId
java
apache/flink
flink-runtime/src/main/java/org/apache/flink/runtime/rest/handler/async/TriggerResponse.java
https://github.com/apache/flink/blob/master/flink-runtime/src/main/java/org/apache/flink/runtime/rest/handler/async/TriggerResponse.java
Apache-2.0
@Override protected CompletableFuture<Void> respondToRequest( ChannelHandlerContext ctx, HttpRequest httpRequest, HandlerRequest<EmptyRequestBody> handlerRequest, RestfulGateway gateway) { File file = getFile(handlerRequest); if (file != null && file.exists()) { try { HandlerUtils.transferFile(ctx, file, httpRequest); } catch (FlinkException e) { throw new CompletionException( new FlinkException("Could not transfer file to client.", e)); } return CompletableFuture.completedFuture(null); } else { return HandlerUtils.sendErrorResponse( ctx, httpRequest, new ErrorResponseBody("This file does not exist in JobManager log dir."), HttpResponseStatus.NOT_FOUND, Collections.emptyMap()); } }
Base class for serving files from the JobManager.
respondToRequest
java
apache/flink
flink-runtime/src/main/java/org/apache/flink/runtime/rest/handler/cluster/AbstractJobManagerFileHandler.java
https://github.com/apache/flink/blob/master/flink-runtime/src/main/java/org/apache/flink/runtime/rest/handler/cluster/AbstractJobManagerFileHandler.java
Apache-2.0
@Override protected CompletableFuture<ConfigurationInfo> handleRequest( @Nonnull HandlerRequest<EmptyRequestBody> request, @Nonnull RestfulGateway gateway) throws RestHandlerException { return CompletableFuture.completedFuture(clusterConfig); }
Handler which serves the cluster's configuration.
handleRequest
java
apache/flink
flink-runtime/src/main/java/org/apache/flink/runtime/rest/handler/cluster/ClusterConfigHandler.java
https://github.com/apache/flink/blob/master/flink-runtime/src/main/java/org/apache/flink/runtime/rest/handler/cluster/ClusterConfigHandler.java
Apache-2.0
@Override public CompletableFuture<ClusterOverviewWithVersion> handleRequest( @Nonnull HandlerRequest<EmptyRequestBody> request, @Nonnull RestfulGateway gateway) { CompletableFuture<ClusterOverview> overviewFuture = gateway.requestClusterOverview(timeout); return overviewFuture.thenApply( statusOverview -> ClusterOverviewWithVersion.fromStatusOverview( statusOverview, version, commitID)); }
Handler which returns the cluster overview information with version.
handleRequest
java
apache/flink
flink-runtime/src/main/java/org/apache/flink/runtime/rest/handler/cluster/ClusterOverviewHandler.java
https://github.com/apache/flink/blob/master/flink-runtime/src/main/java/org/apache/flink/runtime/rest/handler/cluster/ClusterOverviewHandler.java
Apache-2.0
@Override public CompletableFuture<DashboardConfiguration> handleRequest( @Nonnull HandlerRequest<EmptyRequestBody> request, @Nonnull RestfulGateway gateway) { return CompletableFuture.completedFuture(dashboardConfiguration); }
Handler which returns the dashboard configuration.
handleRequest
java
apache/flink
flink-runtime/src/main/java/org/apache/flink/runtime/rest/handler/cluster/DashboardConfigHandler.java
https://github.com/apache/flink/blob/master/flink-runtime/src/main/java/org/apache/flink/runtime/rest/handler/cluster/DashboardConfigHandler.java
Apache-2.0
@Override protected File getFile(HandlerRequest<EmptyRequestBody> handlerRequest) { if (logDir == null) { return null; } // wrapping around another File instantiation is a simple way to remove any path information // - we're // solely interested in the filename String filename = new File(handlerRequest.getPathParameter(LogFileNamePathParameter.class)).getName(); return new File(logDir, filename); }
Rest handler which serves the custom log file from JobManager.
getFile
java
apache/flink
flink-runtime/src/main/java/org/apache/flink/runtime/rest/handler/cluster/JobManagerCustomLogHandler.java
https://github.com/apache/flink/blob/master/flink-runtime/src/main/java/org/apache/flink/runtime/rest/handler/cluster/JobManagerCustomLogHandler.java
Apache-2.0
@Override protected CompletableFuture<EnvironmentInfo> handleRequest( @Nonnull HandlerRequest<EmptyRequestBody> request, @Nonnull RestfulGateway gateway) throws RestHandlerException { return CompletableFuture.completedFuture(environmentInfo); }
Handler which serves the jobmanager's environment variables.
handleRequest
java
apache/flink
flink-runtime/src/main/java/org/apache/flink/runtime/rest/handler/cluster/JobManagerEnvironmentHandler.java
https://github.com/apache/flink/blob/master/flink-runtime/src/main/java/org/apache/flink/runtime/rest/handler/cluster/JobManagerEnvironmentHandler.java
Apache-2.0
@Override protected File getFile(HandlerRequest<EmptyRequestBody> handlerRequest) { return file; }
Rest handler which serves the log files from JobManager.
getFile
java
apache/flink
flink-runtime/src/main/java/org/apache/flink/runtime/rest/handler/cluster/JobManagerLogFileHandler.java
https://github.com/apache/flink/blob/master/flink-runtime/src/main/java/org/apache/flink/runtime/rest/handler/cluster/JobManagerLogFileHandler.java
Apache-2.0
@Override protected CompletableFuture<LogListInfo> handleRequest( @Nonnull HandlerRequest<EmptyRequestBody> request, @Nonnull RestfulGateway gateway) throws RestHandlerException { if (logDir == null) { return CompletableFuture.completedFuture(new LogListInfo(Collections.emptyList())); } final File[] logFiles = logDir.listFiles(); if (logFiles == null) { return FutureUtils.completedExceptionally( new IOException("Could not list files in " + logDir)); } final List<LogInfo> logs = Arrays.stream(logFiles) .filter(File::isFile) .map( logFile -> new LogInfo( logFile.getName(), logFile.length(), logFile.lastModified())) .collect(Collectors.toList()); return CompletableFuture.completedFuture(new LogListInfo(logs)); }
Handler which serves detailed JobManager log list information.
handleRequest
java
apache/flink
flink-runtime/src/main/java/org/apache/flink/runtime/rest/handler/cluster/JobManagerLogListHandler.java
https://github.com/apache/flink/blob/master/flink-runtime/src/main/java/org/apache/flink/runtime/rest/handler/cluster/JobManagerLogListHandler.java
Apache-2.0
@Override protected File getFile(HandlerRequest<EmptyRequestBody> handlerRequest) { if (profilingResultDir == null) { return null; } String filename = new File(handlerRequest.getPathParameter(ProfilingFileNamePathParameter.class)) .getName(); return new File(profilingResultDir, filename); }
Rest handler which serves the profiler result file from JobManager.
getFile
java
apache/flink
flink-runtime/src/main/java/org/apache/flink/runtime/rest/handler/cluster/JobManagerProfilingFileHandler.java
https://github.com/apache/flink/blob/master/flink-runtime/src/main/java/org/apache/flink/runtime/rest/handler/cluster/JobManagerProfilingFileHandler.java
Apache-2.0
@Override protected CompletableFuture<ProfilingInfo> handleRequest( @Nonnull HandlerRequest<ProfilingRequestBody> request, @Nonnull RestfulGateway gateway) throws RestHandlerException { ProfilingRequestBody profilingRequest = request.getRequestBody(); int duration = profilingRequest.getDuration(); if (duration <= 0 || duration > maxDurationInSeconds) { return FutureUtils.completedExceptionally( new IllegalArgumentException( String.format( "`duration` must be set between (0s, %ds].", maxDurationInSeconds))); } return profilingService.requestProfiling( "JobManager", duration, profilingRequest.getMode()); }
Rest handler which serves the profiling info from the JobManager.
handleRequest
java
apache/flink
flink-runtime/src/main/java/org/apache/flink/runtime/rest/handler/cluster/JobManagerProfilingHandler.java
https://github.com/apache/flink/blob/master/flink-runtime/src/main/java/org/apache/flink/runtime/rest/handler/cluster/JobManagerProfilingHandler.java
Apache-2.0
@Override protected CompletableFuture<ProfilingInfoList> handleRequest( @Nonnull HandlerRequest<EmptyRequestBody> request, @Nonnull RestfulGateway gateway) throws RestHandlerException { return profilingService.getProfilingList("JobManager").thenApply(ProfilingInfoList::new); }
Rest handler which serves the profiling list from the JobManager.
handleRequest
java
apache/flink
flink-runtime/src/main/java/org/apache/flink/runtime/rest/handler/cluster/JobManagerProfilingListHandler.java
https://github.com/apache/flink/blob/master/flink-runtime/src/main/java/org/apache/flink/runtime/rest/handler/cluster/JobManagerProfilingListHandler.java
Apache-2.0