code
stringlengths
25
201k
docstring
stringlengths
19
96.2k
func_name
stringlengths
0
235
language
stringclasses
1 value
repo
stringlengths
8
51
path
stringlengths
11
314
url
stringlengths
62
377
license
stringclasses
7 values
@Override public final int getMinOffloadingSize() { return blobServiceConfiguration.get(BlobServerOptions.OFFLOAD_MINSIZE); }
Returns the configuration used by the BLOB server. @return configuration
getMinOffloadingSize
java
apache/flink
flink-runtime/src/main/java/org/apache/flink/runtime/blob/BlobServer.java
https://github.com/apache/flink/blob/master/flink-runtime/src/main/java/org/apache/flink/runtime/blob/BlobServer.java
Apache-2.0
@Override public int getPort() { return this.serverSocket.getLocalPort(); }
Returns the port on which the server is listening. @return port on which the server is listening
getPort
java
apache/flink
flink-runtime/src/main/java/org/apache/flink/runtime/blob/BlobServer.java
https://github.com/apache/flink/blob/master/flink-runtime/src/main/java/org/apache/flink/runtime/blob/BlobServer.java
Apache-2.0
public boolean isShutdown() { return this.shutdownRequested.get(); }
Tests whether the BLOB server has been requested to shut down. @return True, if the server has been requested to shut down, false otherwise.
isShutdown
java
apache/flink
flink-runtime/src/main/java/org/apache/flink/runtime/blob/BlobServer.java
https://github.com/apache/flink/blob/master/flink-runtime/src/main/java/org/apache/flink/runtime/blob/BlobServer.java
Apache-2.0
List<BlobServerConnection> getCurrentActiveConnections() { synchronized (activeConnections) { return new ArrayList<>(activeConnections); } }
Returns all the current active connections in the BlobServer. @return the list of all the active in current BlobServer
getCurrentActiveConnections
java
apache/flink
flink-runtime/src/main/java/org/apache/flink/runtime/blob/BlobServer.java
https://github.com/apache/flink/blob/master/flink-runtime/src/main/java/org/apache/flink/runtime/blob/BlobServer.java
Apache-2.0
@Override public void run() { try { final InputStream inputStream = this.clientSocket.getInputStream(); final OutputStream outputStream = this.clientSocket.getOutputStream(); while (true) { // Read the requested operation final int operation = inputStream.read(); if (operation < 0) { // done, no one is asking anything from us return; } switch (operation) { case PUT_OPERATION: put(inputStream, outputStream, new byte[BUFFER_SIZE]); break; case GET_OPERATION: get(inputStream, outputStream, new byte[BUFFER_SIZE]); break; default: throw new IOException("Unknown operation " + operation); } } } catch (SocketException e) { // this happens when the remote site closes the connection LOG.debug("Socket connection closed", e); } catch (Throwable t) { LOG.error( "Error while executing BLOB connection from {}.", clientSocket.getRemoteSocketAddress(), t); } finally { closeSilently(clientSocket, LOG); blobServer.unregisterConnection(this); } }
Main connection work method. Accepts requests until the other side closes the connection.
run
java
apache/flink
flink-runtime/src/main/java/org/apache/flink/runtime/blob/BlobServerConnection.java
https://github.com/apache/flink/blob/master/flink-runtime/src/main/java/org/apache/flink/runtime/blob/BlobServerConnection.java
Apache-2.0
public void close() { closeSilently(clientSocket, LOG); interrupt(); }
Closes the connection socket and lets the thread exit.
close
java
apache/flink
flink-runtime/src/main/java/org/apache/flink/runtime/blob/BlobServerConnection.java
https://github.com/apache/flink/blob/master/flink-runtime/src/main/java/org/apache/flink/runtime/blob/BlobServerConnection.java
Apache-2.0
private void get(InputStream inputStream, OutputStream outputStream, byte[] buf) throws IOException { /* * Retrieve the file from the (distributed?) BLOB store and store it * locally, then send it to the service which requested it. * * Instead, we could send it from the distributed store directly but * chances are high that if there is one request, there will be more * so a local cache makes more sense. */ final File blobFile; final JobID jobId; final BlobKey blobKey; try { // read HEADER contents: job ID, key, HA mode/permanent or transient BLOB final int mode = inputStream.read(); if (mode < 0) { throw new EOFException("Premature end of GET request"); } // Receive the jobId and key if (mode == JOB_UNRELATED_CONTENT) { jobId = null; } else if (mode == JOB_RELATED_CONTENT) { byte[] jidBytes = new byte[JobID.SIZE]; readFully(inputStream, jidBytes, 0, JobID.SIZE, "JobID"); jobId = JobID.fromByteArray(jidBytes); } else { throw new IOException("Unknown type of BLOB addressing: " + mode + '.'); } blobKey = BlobKey.readFromInputStream(inputStream); checkArgument( blobKey instanceof TransientBlobKey || jobId != null, "Invalid BLOB addressing for permanent BLOBs"); if (LOG.isDebugEnabled()) { LOG.debug( "Received GET request for BLOB {}/{} from {}.", jobId, blobKey, clientSocket.getInetAddress()); } // up to here, an error can give a good message } catch (Throwable t) { LOG.error("GET operation from {} failed.", clientSocket.getInetAddress(), t); try { writeErrorToStream(outputStream, t); } catch (IOException e) { // since we are in an exception case, it means that we could not send the error // ignore this } clientSocket.close(); return; } try { readLock.lock(); try { // copy the file to local store if it does not exist yet try { blobFile = blobServer.getFileInternal(jobId, blobKey); // enforce a 2GB max for now (otherwise the protocol's length field needs to be // increased) if (blobFile.length() > Integer.MAX_VALUE) { throw new IOException("BLOB size exceeds the maximum size (2 GB)."); } outputStream.write(RETURN_OKAY); } catch (Throwable t) { LOG.error( "GET operation failed for BLOB {}/{} from {}.", jobId, blobKey, clientSocket.getInetAddress(), t); try { writeErrorToStream(outputStream, t); } catch (IOException e) { // since we are in an exception case, it means that we could not send the // error // ignore this } clientSocket.close(); return; } // from here on, we started sending data, so all we can do is close the connection // when something happens int blobLen = (int) blobFile.length(); writeLength(blobLen, outputStream); try (FileInputStream fis = new FileInputStream(blobFile)) { int bytesRemaining = blobLen; while (bytesRemaining > 0) { int read = fis.read(buf); if (read < 0) { throw new IOException( "Premature end of BLOB file stream for " + blobFile.getAbsolutePath()); } outputStream.write(buf, 0, read); bytesRemaining -= read; } } } finally { readLock.unlock(); } // on successful transfer, delete transient files int result = inputStream.read(); if (result < 0) { throw new EOFException("Premature end of GET request"); } else if (blobKey instanceof TransientBlobKey && result == RETURN_OKAY) { // ignore the result from the operation if (!blobServer.deleteInternal(jobId, (TransientBlobKey) blobKey)) { LOG.warn( "DELETE operation failed for BLOB {}/{} from {}.", jobId, blobKey, clientSocket.getInetAddress()); } } } catch (SocketException e) { // happens when the other side disconnects LOG.debug("Socket connection closed", e); } catch (Throwable t) { LOG.error("GET operation failed", t); clientSocket.close(); } }
Handles an incoming GET request from a BLOB client. <p>Transient BLOB files are deleted after a successful read operation by the client. Note that we do not enforce atomicity here, i.e. multiple clients reading from the same BLOB may still succeed. @param inputStream the input stream to read incoming data from @param outputStream the output stream to send data back to the client @param buf an auxiliary buffer for data serialization/deserialization @throws IOException thrown if an I/O error occurs while reading/writing data from/to the respective streams
get
java
apache/flink
flink-runtime/src/main/java/org/apache/flink/runtime/blob/BlobServerConnection.java
https://github.com/apache/flink/blob/master/flink-runtime/src/main/java/org/apache/flink/runtime/blob/BlobServerConnection.java
Apache-2.0
private void put(InputStream inputStream, OutputStream outputStream, byte[] buf) throws IOException { File incomingFile = null; try { // read HEADER contents: job ID, HA mode/permanent or transient BLOB final int mode = inputStream.read(); if (mode < 0) { throw new EOFException("Premature end of PUT request"); } final JobID jobId; if (mode == JOB_UNRELATED_CONTENT) { jobId = null; } else if (mode == JOB_RELATED_CONTENT) { byte[] jidBytes = new byte[JobID.SIZE]; readFully(inputStream, jidBytes, 0, JobID.SIZE, "JobID"); jobId = JobID.fromByteArray(jidBytes); } else { throw new IOException("Unknown type of BLOB addressing."); } final BlobKey.BlobType blobType; { final int read = inputStream.read(); if (read < 0) { throw new EOFException("Read an incomplete BLOB type"); } else if (read == TRANSIENT_BLOB.ordinal()) { blobType = TRANSIENT_BLOB; } else if (read == PERMANENT_BLOB.ordinal()) { blobType = PERMANENT_BLOB; checkArgument(jobId != null, "Invalid BLOB addressing for permanent BLOBs"); } else { throw new IOException("Invalid data received for the BLOB type: " + read); } } if (LOG.isDebugEnabled()) { LOG.debug( "Received PUT request for BLOB of job {} with from {}.", jobId, clientSocket.getInetAddress()); } incomingFile = blobServer.createTemporaryFilename(); byte[] digest = readFileFully(inputStream, incomingFile, buf); BlobKey blobKey = blobServer.moveTempFileToStore(incomingFile, jobId, digest, blobType); // Return computed key to client for validation outputStream.write(RETURN_OKAY); blobKey.writeToOutputStream(outputStream); } catch (SocketException e) { // happens when the other side disconnects LOG.debug("Socket connection closed", e); } catch (Throwable t) { LOG.error("PUT operation failed", t); try { writeErrorToStream(outputStream, t); } catch (IOException e) { // since we are in an exception case, it means not much that we could not send the // error // ignore this } clientSocket.close(); } finally { if (incomingFile != null) { if (!incomingFile.delete() && incomingFile.exists()) { LOG.warn( "Cannot delete BLOB server staging file " + incomingFile.getAbsolutePath()); } } } }
Handles an incoming PUT request from a BLOB client. @param inputStream The input stream to read incoming data from @param outputStream The output stream to send data back to the client @param buf An auxiliary buffer for data serialization/deserialization @throws IOException thrown if an I/O error occurs while reading/writing data from/to the respective streams
put
java
apache/flink
flink-runtime/src/main/java/org/apache/flink/runtime/blob/BlobServerConnection.java
https://github.com/apache/flink/blob/master/flink-runtime/src/main/java/org/apache/flink/runtime/blob/BlobServerConnection.java
Apache-2.0
private static byte[] readFileFully( final InputStream inputStream, final File incomingFile, final byte[] buf) throws IOException { MessageDigest md = BlobUtils.createMessageDigest(); try (FileOutputStream fos = new FileOutputStream(incomingFile)) { while (true) { final int bytesExpected = readLength(inputStream); if (bytesExpected == -1) { // done break; } if (bytesExpected > BUFFER_SIZE) { throw new IOException("Unexpected number of incoming bytes: " + bytesExpected); } readFully(inputStream, buf, 0, bytesExpected, "buffer"); fos.write(buf, 0, bytesExpected); md.update(buf, 0, bytesExpected); } return md.digest(); } }
Reads a full file from <tt>inputStream</tt> into <tt>incomingFile</tt> returning its checksum. @param inputStream stream to read from @param incomingFile file to write to @param buf An auxiliary buffer for data serialization/deserialization @return the received file's content hash @throws IOException thrown if an I/O error occurs while reading/writing data from/to the respective streams
readFileFully
java
apache/flink
flink-runtime/src/main/java/org/apache/flink/runtime/blob/BlobServerConnection.java
https://github.com/apache/flink/blob/master/flink-runtime/src/main/java/org/apache/flink/runtime/blob/BlobServerConnection.java
Apache-2.0
private static void writeErrorToStream(OutputStream out, Throwable t) throws IOException { byte[] bytes = InstantiationUtil.serializeObject(t); out.write(RETURN_ERROR); writeLength(bytes.length, out); out.write(bytes); }
Writes to the output stream the error return code, and the given exception in serialized form. @param out Thr output stream to write to. @param t The exception to send. @throws IOException Thrown, if the output stream could not be written to.
writeErrorToStream
java
apache/flink
flink-runtime/src/main/java/org/apache/flink/runtime/blob/BlobServerConnection.java
https://github.com/apache/flink/blob/master/flink-runtime/src/main/java/org/apache/flink/runtime/blob/BlobServerConnection.java
Apache-2.0
public static BlobServer createBlobServer( Configuration configuration, Reference<File> fallbackStorageDirectory, BlobStore blobStore) throws IOException { final Reference<File> storageDirectory = createBlobStorageDirectory(configuration, fallbackStorageDirectory); return new BlobServer(configuration, storageDirectory, blobStore); }
Creates the {@link BlobServer} from the given configuration, fallback storage directory and blob store. @param configuration for the BlobServer @param fallbackStorageDirectory fallback storage directory that is used if no other directory has been explicitly configured @param blobStore blob store to use for this blob server @return new blob server instance @throws IOException if we could not create the blob storage directory
createBlobServer
java
apache/flink
flink-runtime/src/main/java/org/apache/flink/runtime/blob/BlobUtils.java
https://github.com/apache/flink/blob/master/flink-runtime/src/main/java/org/apache/flink/runtime/blob/BlobUtils.java
Apache-2.0
static File getIncomingDirectory(File storageDir) throws IOException { final File incomingDir = new File(storageDir, "incoming"); Files.createDirectories(incomingDir.toPath()); return incomingDir; }
Returns the BLOB service's directory for incoming (job-unrelated) files. The directory is created if it does not exist yet. @param storageDir storage directory used be the BLOB service @return the BLOB service's directory for incoming files @throws IOException if creating the directory fails
getIncomingDirectory
java
apache/flink
flink-runtime/src/main/java/org/apache/flink/runtime/blob/BlobUtils.java
https://github.com/apache/flink/blob/master/flink-runtime/src/main/java/org/apache/flink/runtime/blob/BlobUtils.java
Apache-2.0
static File getStorageLocation(File storageDir, @Nullable JobID jobId, BlobKey key) throws IOException { File file = new File(getStorageLocationPath(storageDir.getAbsolutePath(), jobId, key)); Files.createDirectories(file.getParentFile().toPath()); return file; }
Returns the (designated) physical storage location of the BLOB with the given key. @param storageDir storage directory used be the BLOB service @param key the key identifying the BLOB @param jobId ID of the job for the incoming files (or <tt>null</tt> if job-unrelated) @return the (designated) physical storage location of the BLOB @throws IOException if creating the directory fails
getStorageLocation
java
apache/flink
flink-runtime/src/main/java/org/apache/flink/runtime/blob/BlobUtils.java
https://github.com/apache/flink/blob/master/flink-runtime/src/main/java/org/apache/flink/runtime/blob/BlobUtils.java
Apache-2.0
static String getStorageLocationPath(String storageDir, @Nullable JobID jobId) { if (jobId == null) { // format: $base/no_job return String.format("%s/%s", storageDir, NO_JOB_DIR_PREFIX); } else { // format: $base/job_$jobId return String.format("%s/%s%s", storageDir, JOB_DIR_PREFIX, jobId.toString()); } }
Returns the BLOB server's storage directory for BLOBs belonging to the job with the given ID <em>without</em> creating the directory. @param storageDir storage directory used be the BLOB service @param jobId the ID of the job to return the storage directory for @return the storage directory for BLOBs belonging to the job with the given ID
getStorageLocationPath
java
apache/flink
flink-runtime/src/main/java/org/apache/flink/runtime/blob/BlobUtils.java
https://github.com/apache/flink/blob/master/flink-runtime/src/main/java/org/apache/flink/runtime/blob/BlobUtils.java
Apache-2.0
static String getStorageLocationPath(String storageDir, @Nullable JobID jobId, BlobKey key) { if (jobId == null) { // format: $base/no_job/blob_$key return String.format( "%s/%s/%s%s", storageDir, NO_JOB_DIR_PREFIX, BLOB_FILE_PREFIX, key.toString()); } else { // format: $base/job_$jobId/blob_$key return String.format( "%s/%s%s/%s%s", storageDir, JOB_DIR_PREFIX, jobId.toString(), BLOB_FILE_PREFIX, key.toString()); } }
Returns the path for the given blob key. <p>The returned path can be used with the (local or HA) BLOB store file system back-end for recovery purposes and follows the same scheme as {@link #getStorageLocation(File, JobID, BlobKey)}. @param storageDir storage directory used be the BLOB service @param key the key identifying the BLOB @param jobId ID of the job for the incoming files @return the path to the given BLOB
getStorageLocationPath
java
apache/flink
flink-runtime/src/main/java/org/apache/flink/runtime/blob/BlobUtils.java
https://github.com/apache/flink/blob/master/flink-runtime/src/main/java/org/apache/flink/runtime/blob/BlobUtils.java
Apache-2.0
static MessageDigest createMessageDigest() { try { return MessageDigest.getInstance(HASHING_ALGORITHM); } catch (NoSuchAlgorithmException e) { throw new RuntimeException( "Cannot instantiate the message digest algorithm " + HASHING_ALGORITHM, e); } }
Creates a new instance of the message digest to use for the BLOB key computation. @return a new instance of the message digest to use for the BLOB key computation
createMessageDigest
java
apache/flink
flink-runtime/src/main/java/org/apache/flink/runtime/blob/BlobUtils.java
https://github.com/apache/flink/blob/master/flink-runtime/src/main/java/org/apache/flink/runtime/blob/BlobUtils.java
Apache-2.0
static void writeLength(int length, OutputStream outputStream) throws IOException { byte[] buf = new byte[4]; buf[0] = (byte) (length & 0xff); buf[1] = (byte) ((length >> 8) & 0xff); buf[2] = (byte) ((length >> 16) & 0xff); buf[3] = (byte) ((length >> 24) & 0xff); outputStream.write(buf, 0, 4); }
Auxiliary method to write the length of an upcoming data chunk to an output stream. @param length the length of the upcoming data chunk in bytes @param outputStream the output stream to write the length to @throws IOException thrown if an I/O error occurs while writing to the output stream
writeLength
java
apache/flink
flink-runtime/src/main/java/org/apache/flink/runtime/blob/BlobUtils.java
https://github.com/apache/flink/blob/master/flink-runtime/src/main/java/org/apache/flink/runtime/blob/BlobUtils.java
Apache-2.0
static int readLength(InputStream inputStream) throws IOException { byte[] buf = new byte[4]; int bytesRead = 0; while (bytesRead < 4) { final int read = inputStream.read(buf, bytesRead, 4 - bytesRead); if (read < 0) { throw new EOFException("Read an incomplete length"); } bytesRead += read; } bytesRead = buf[0] & 0xff; bytesRead |= (buf[1] & 0xff) << 8; bytesRead |= (buf[2] & 0xff) << 16; bytesRead |= (buf[3] & 0xff) << 24; return bytesRead; }
Auxiliary method to read the length of an upcoming data chunk from an input stream. @param inputStream the input stream to read the length from @return the length of the upcoming data chunk in bytes @throws IOException thrown if an I/O error occurs while reading from the input stream
readLength
java
apache/flink
flink-runtime/src/main/java/org/apache/flink/runtime/blob/BlobUtils.java
https://github.com/apache/flink/blob/master/flink-runtime/src/main/java/org/apache/flink/runtime/blob/BlobUtils.java
Apache-2.0
static Throwable readExceptionFromStream(InputStream in) throws IOException { int len = readLength(in); byte[] bytes = new byte[len]; readFully(in, bytes, 0, len, "Error message"); try { return (Throwable) InstantiationUtil.deserializeObject(bytes, ClassLoader.getSystemClassLoader()); } catch (ClassNotFoundException e) { // should never occur throw new IOException("Could not transfer error message", e); } }
Reads exception from given {@link InputStream}. @param in the input stream to read from @return exception that was read @throws IOException thrown if an I/O error occurs while reading from the input stream
readExceptionFromStream
java
apache/flink
flink-runtime/src/main/java/org/apache/flink/runtime/blob/BlobUtils.java
https://github.com/apache/flink/blob/master/flink-runtime/src/main/java/org/apache/flink/runtime/blob/BlobUtils.java
Apache-2.0
@Override public void registerJob(JobID jobId) { checkNotNull(jobId); synchronized (jobRefCounters) { RefCount ref = jobRefCounters.get(jobId); if (ref == null) { ref = new RefCount(); jobRefCounters.put(jobId, ref); } else { // reset cleanup timeout ref.keepUntil = -1; } ++ref.references; } }
Registers use of job-related BLOBs. <p>Using any other method to access BLOBs, e.g. {@link #getFile}, is only valid within calls to <tt>registerJob(JobID)</tt> and {@link #releaseJob(JobID)}. @param jobId ID of the job this blob belongs to @see #releaseJob(JobID)
registerJob
java
apache/flink
flink-runtime/src/main/java/org/apache/flink/runtime/blob/PermanentBlobCache.java
https://github.com/apache/flink/blob/master/flink-runtime/src/main/java/org/apache/flink/runtime/blob/PermanentBlobCache.java
Apache-2.0
@Override public File getFile(JobID jobId, PermanentBlobKey key) throws IOException { checkNotNull(jobId); return getFileInternal(jobId, key); }
Returns the path to a local copy of the file associated with the provided job ID and blob key. <p>We will first attempt to serve the BLOB from the local storage. If the BLOB is not in there, we will try to download it from the HA store, or directly from the {@link BlobServer}. @param jobId ID of the job this blob belongs to @param key blob key associated with the requested file @return The path to the file. @throws java.io.FileNotFoundException if the BLOB does not exist; @throws IOException if any other error occurs when retrieving the file
getFile
java
apache/flink
flink-runtime/src/main/java/org/apache/flink/runtime/blob/PermanentBlobCache.java
https://github.com/apache/flink/blob/master/flink-runtime/src/main/java/org/apache/flink/runtime/blob/PermanentBlobCache.java
Apache-2.0
private boolean deleteFile(JobID jobId, BlobKey blobKey) { final File localFile = new File( BlobUtils.getStorageLocationPath( storageDir.deref().getAbsolutePath(), jobId, blobKey)); if (!localFile.delete() && localFile.exists()) { log.warn( "Failed to delete locally cached BLOB {} at {}", blobKey, localFile.getAbsolutePath()); return false; } return true; }
Delete the blob file with the given key. @param jobId ID of the job this blob belongs to (or <tt>null</tt> if job-unrelated) @param blobKey The key of the desired BLOB.
deleteFile
java
apache/flink
flink-runtime/src/main/java/org/apache/flink/runtime/blob/PermanentBlobCache.java
https://github.com/apache/flink/blob/master/flink-runtime/src/main/java/org/apache/flink/runtime/blob/PermanentBlobCache.java
Apache-2.0
@VisibleForTesting Map<JobID, RefCount> getJobRefCounters() { return jobRefCounters; }
Returns the job reference counters - for testing purposes only! @return job reference counters (internal state!)
getJobRefCounters
java
apache/flink
flink-runtime/src/main/java/org/apache/flink/runtime/blob/PermanentBlobCache.java
https://github.com/apache/flink/blob/master/flink-runtime/src/main/java/org/apache/flink/runtime/blob/PermanentBlobCache.java
Apache-2.0
default byte[] readFile(JobID jobId, PermanentBlobKey key) throws IOException { // The default implementation doesn't guarantee that the file won't be deleted concurrently // by other threads while reading the contents. return FileUtils.readAllBytes(getFile(jobId, key).toPath()); }
Returns the content of the file for the BLOB with the provided job ID the blob key. <p>Compared to {@code getFile}, {@code readFile} will attempt to read the entire file after retrieving it. If file reading and file retrieving is done in the same WRITE lock, it can avoid the scenario that the path to the file is deleted concurrently by other threads when the file is retrieved but not read yet. @param jobId ID of the job this blob belongs to @param key BLOB key associated with the requested file @return The content of the BLOB. @throws java.io.FileNotFoundException if the BLOB does not exist; @throws IOException if any other error occurs when retrieving the file.
readFile
java
apache/flink
flink-runtime/src/main/java/org/apache/flink/runtime/blob/PermanentBlobService.java
https://github.com/apache/flink/blob/master/flink-runtime/src/main/java/org/apache/flink/runtime/blob/PermanentBlobService.java
Apache-2.0
private boolean deleteInternal(@Nullable JobID jobId, TransientBlobKey key) { final File localFile = new File( BlobUtils.getStorageLocationPath( storageDir.deref().getAbsolutePath(), jobId, key)); readWriteLock.writeLock().lock(); try { if (!localFile.delete() && localFile.exists()) { log.warn( "Failed to delete locally cached BLOB {} at {}", key, localFile.getAbsolutePath()); return false; } else { // this needs to happen inside the write lock in case of concurrent getFile() calls blobExpiryTimes.remove(Tuple2.of(jobId, key)); } } finally { readWriteLock.writeLock().unlock(); } return true; }
Deletes the file associated with the blob key in this BLOB cache. @param jobId ID of the job this blob belongs to (or <tt>null</tt> if job-unrelated) @param key blob key associated with the file to be deleted @return <tt>true</tt> if the given blob is successfully deleted or non-existing; <tt>false</tt> otherwise
deleteInternal
java
apache/flink
flink-runtime/src/main/java/org/apache/flink/runtime/blob/TransientBlobCache.java
https://github.com/apache/flink/blob/master/flink-runtime/src/main/java/org/apache/flink/runtime/blob/TransientBlobCache.java
Apache-2.0
@Override public void run() { // let's cache the current time - we do not operate on a millisecond precision anyway final long currentTimeMillis = System.currentTimeMillis(); // iterate through all entries and remove those where the current time is past their expiry Set<Map.Entry<Tuple2<JobID, TransientBlobKey>, Long>> entries = new HashSet<>(blobExpiryTimes.entrySet()); for (Map.Entry<Tuple2<JobID, TransientBlobKey>, Long> entry : entries) { if (currentTimeMillis >= entry.getValue()) { JobID jobId = entry.getKey().f0; TransientBlobKey blobKey = entry.getKey().f1; cleanupCallback.accept(jobId, blobKey); } } }
Cleans up transient BLOBs whose TTL is up, tolerating that files do not exist (anymore).
run
java
apache/flink
flink-runtime/src/main/java/org/apache/flink/runtime/blob/TransientBlobCleanupTask.java
https://github.com/apache/flink/blob/master/flink-runtime/src/main/java/org/apache/flink/runtime/blob/TransientBlobCleanupTask.java
Apache-2.0
@Override public BlocklistHandler create( BlocklistContext blocklistContext, Function<ResourceID, String> taskManagerNodeIdRetriever, ComponentMainThreadExecutor mainThreadExecutor, Logger log) { return new DefaultBlocklistHandler( new DefaultBlocklistTracker(), blocklistContext, taskManagerNodeIdRetriever, timeoutCheckInterval, mainThreadExecutor, log); }
The factory to instantiate {@link DefaultBlocklistHandler}.
create
java
apache/flink
flink-runtime/src/main/java/org/apache/flink/runtime/blocklist/DefaultBlocklistHandler.java
https://github.com/apache/flink/blob/master/flink-runtime/src/main/java/org/apache/flink/runtime/blocklist/DefaultBlocklistHandler.java
Apache-2.0
@Override public BlocklistHandler create( BlocklistContext blocklistContext, Function<ResourceID, String> taskManagerNodeIdRetriever, ComponentMainThreadExecutor mainThreadExecutor, Logger log) { return new NoOpBlocklistHandler(); }
The factory to instantiate {@link NoOpBlocklistHandler}.
create
java
apache/flink
flink-runtime/src/main/java/org/apache/flink/runtime/blocklist/NoOpBlocklistHandler.java
https://github.com/apache/flink/blob/master/flink-runtime/src/main/java/org/apache/flink/runtime/blocklist/NoOpBlocklistHandler.java
Apache-2.0
public Class<?> getType() { return type; }
Indicates that a broadcast variable was initialized with a {@link DefaultBroadcastVariableInitializer} as a non-{@link java.util.List} type, and later accessed using {@link RuntimeContext#getBroadcastVariable(String)} which may only return lists.
getType
java
apache/flink
flink-runtime/src/main/java/org/apache/flink/runtime/broadcast/InitializationTypeConflictException.java
https://github.com/apache/flink/blob/master/flink-runtime/src/main/java/org/apache/flink/runtime/broadcast/InitializationTypeConflictException.java
Apache-2.0
public long getTriggerTimestamp() { return triggerTimestamp; }
Returns the timestamp when the checkpoint was triggered. @return Timestamp when the checkpoint was triggered.
getTriggerTimestamp
java
apache/flink
flink-runtime/src/main/java/org/apache/flink/runtime/checkpoint/AbstractCheckpointStats.java
https://github.com/apache/flink/blob/master/flink-runtime/src/main/java/org/apache/flink/runtime/checkpoint/AbstractCheckpointStats.java
Apache-2.0
public CheckpointProperties getProperties() { return props; }
Returns the properties of this checkpoint. @return Properties of this checkpoint.
getProperties
java
apache/flink
flink-runtime/src/main/java/org/apache/flink/runtime/checkpoint/AbstractCheckpointStats.java
https://github.com/apache/flink/blob/master/flink-runtime/src/main/java/org/apache/flink/runtime/checkpoint/AbstractCheckpointStats.java
Apache-2.0
public int getNumberOfSubtasks() { return numberOfSubtasks; }
Returns the total number of subtasks involved in this checkpoint. @return Total number of subtasks involved in this checkpoint.
getNumberOfSubtasks
java
apache/flink
flink-runtime/src/main/java/org/apache/flink/runtime/checkpoint/AbstractCheckpointStats.java
https://github.com/apache/flink/blob/master/flink-runtime/src/main/java/org/apache/flink/runtime/checkpoint/AbstractCheckpointStats.java
Apache-2.0
public TaskStateStats getTaskStateStats(JobVertexID jobVertexId) { return taskStats.get(jobVertexId); }
Returns the task state stats for the given job vertex ID or <code>null</code> if no task with such an ID is available. @param jobVertexId Job vertex ID of the task stats to look up. @return The task state stats instance for the given ID or <code>null</code>.
getTaskStateStats
java
apache/flink
flink-runtime/src/main/java/org/apache/flink/runtime/checkpoint/AbstractCheckpointStats.java
https://github.com/apache/flink/blob/master/flink-runtime/src/main/java/org/apache/flink/runtime/checkpoint/AbstractCheckpointStats.java
Apache-2.0
public Collection<TaskStateStats> getAllTaskStateStats() { return taskStats.values(); }
Returns all task state stats instances. @return All task state stats instances.
getAllTaskStateStats
java
apache/flink
flink-runtime/src/main/java/org/apache/flink/runtime/checkpoint/AbstractCheckpointStats.java
https://github.com/apache/flink/blob/master/flink-runtime/src/main/java/org/apache/flink/runtime/checkpoint/AbstractCheckpointStats.java
Apache-2.0
public long getLatestAckTimestamp() { SubtaskStateStats subtask = getLatestAcknowledgedSubtaskStats(); if (subtask != null) { return subtask.getAckTimestamp(); } else { return -1; } }
Returns the ack timestamp of the latest acknowledged subtask or <code>-1</code> if none was acknowledged yet. @return Ack timestamp of the latest acknowledged subtask or <code>-1</code>.
getLatestAckTimestamp
java
apache/flink
flink-runtime/src/main/java/org/apache/flink/runtime/checkpoint/AbstractCheckpointStats.java
https://github.com/apache/flink/blob/master/flink-runtime/src/main/java/org/apache/flink/runtime/checkpoint/AbstractCheckpointStats.java
Apache-2.0
public long getEndToEndDuration() { SubtaskStateStats subtask = getLatestAcknowledgedSubtaskStats(); if (subtask != null) { return Math.max(0, subtask.getAckTimestamp() - triggerTimestamp); } else { return -1; } }
Returns the duration of this checkpoint calculated as the time since triggering until the latest acknowledged subtask or <code>-1</code> if no subtask was acknowledged yet. @return Duration of this checkpoint or <code>-1</code> if no subtask was acknowledged yet.
getEndToEndDuration
java
apache/flink
flink-runtime/src/main/java/org/apache/flink/runtime/checkpoint/AbstractCheckpointStats.java
https://github.com/apache/flink/blob/master/flink-runtime/src/main/java/org/apache/flink/runtime/checkpoint/AbstractCheckpointStats.java
Apache-2.0
protected static Optional<Long> findLowest(Deque<CompletedCheckpoint> unSubsumedCheckpoints) { for (CompletedCheckpoint p : unSubsumedCheckpoints) { if (!p.getProperties().isSavepoint()) { return Optional.of(p.getCheckpointID()); } } return Optional.empty(); }
Unregister shared states that are no longer in use. Should be called after completing a checkpoint (even if no checkpoint was subsumed, so that state added by an aborted checkpoints and not used later can be removed).
findLowest
java
apache/flink
flink-runtime/src/main/java/org/apache/flink/runtime/checkpoint/AbstractCompleteCheckpointStore.java
https://github.com/apache/flink/blob/master/flink-runtime/src/main/java/org/apache/flink/runtime/checkpoint/AbstractCompleteCheckpointStore.java
Apache-2.0
default CompletableFuture<Void> discardAsync(Executor ioExecutor) { return FutureUtils.runAsync(this::discard, ioExecutor); }
Extra interface for discarding the checkpoint.
discardAsync
java
apache/flink
flink-runtime/src/main/java/org/apache/flink/runtime/checkpoint/Checkpoint.java
https://github.com/apache/flink/blob/master/flink-runtime/src/main/java/org/apache/flink/runtime/checkpoint/Checkpoint.java
Apache-2.0
public boolean addMasterHook(MasterTriggerRestoreHook<?> hook) { checkNotNull(hook); final String id = hook.getIdentifier(); checkArgument(!StringUtils.isNullOrWhitespaceOnly(id), "The hook has a null or empty id"); synchronized (lock) { if (!masterHooks.containsKey(id)) { masterHooks.put(id, hook); return true; } else { return false; } } }
Adds the given master hook to the checkpoint coordinator. This method does nothing, if the checkpoint coordinator already contained a hook with the same ID (as defined via {@link MasterTriggerRestoreHook#getIdentifier()}). @param hook The hook to add. @return True, if the hook was added, false if the checkpoint coordinator already contained a hook with the same ID.
addMasterHook
java
apache/flink
flink-runtime/src/main/java/org/apache/flink/runtime/checkpoint/CheckpointCoordinator.java
https://github.com/apache/flink/blob/master/flink-runtime/src/main/java/org/apache/flink/runtime/checkpoint/CheckpointCoordinator.java
Apache-2.0
public int getNumberOfRegisteredMasterHooks() { synchronized (lock) { return masterHooks.size(); } }
Gets the number of currently register master hooks.
getNumberOfRegisteredMasterHooks
java
apache/flink
flink-runtime/src/main/java/org/apache/flink/runtime/checkpoint/CheckpointCoordinator.java
https://github.com/apache/flink/blob/master/flink-runtime/src/main/java/org/apache/flink/runtime/checkpoint/CheckpointCoordinator.java
Apache-2.0
public void setIsProcessingBacklog(OperatorID operatorID, boolean isProcessingBacklog) { synchronized (lock) { if (isProcessingBacklog) { backlogOperators.add(operatorID); } else { backlogOperators.remove(operatorID); } long currentCheckpointInterval = getCurrentCheckpointInterval(); if (currentCheckpointInterval != CheckpointCoordinatorConfiguration.DISABLED_CHECKPOINT_INTERVAL) { long currentRelativeTime = clock.relativeTimeMillis(); if (currentRelativeTime + currentCheckpointInterval < nextCheckpointTriggeringRelativeTime) { rescheduleTrigger(currentRelativeTime, currentCheckpointInterval); } } } }
Reports whether a source operator is currently processing backlog. <p>If any source operator is processing backlog, the checkpoint interval would be decided by {@code execution.checkpointing.interval-during-backlog} instead of {@code execution.checkpointing.interval}. <p>If a source has not invoked this method, the source is considered to have isProcessingBacklog=false. If a source operator has invoked this method multiple times, the last reported value is used. @param operatorID the operator ID of the source operator. @param isProcessingBacklog whether the source operator is processing backlog.
setIsProcessingBacklog
java
apache/flink
flink-runtime/src/main/java/org/apache/flink/runtime/checkpoint/CheckpointCoordinator.java
https://github.com/apache/flink/blob/master/flink-runtime/src/main/java/org/apache/flink/runtime/checkpoint/CheckpointCoordinator.java
Apache-2.0
public CompletableFuture<CompletedCheckpoint> triggerSavepoint( @Nullable final String targetLocation, final SavepointFormatType formatType) { final CheckpointProperties properties = CheckpointProperties.forSavepoint(!unalignedCheckpointsEnabled, formatType); return triggerSavepointInternal(properties, targetLocation); }
Triggers a savepoint with the given savepoint directory as a target. @param targetLocation Target location for the savepoint, optional. If null, the state backend's configured default will be used. @return A future to the completed checkpoint @throws IllegalStateException If no savepoint directory has been specified and no default savepoint directory has been configured
triggerSavepoint
java
apache/flink
flink-runtime/src/main/java/org/apache/flink/runtime/checkpoint/CheckpointCoordinator.java
https://github.com/apache/flink/blob/master/flink-runtime/src/main/java/org/apache/flink/runtime/checkpoint/CheckpointCoordinator.java
Apache-2.0
public CompletableFuture<CompletedCheckpoint> triggerCheckpoint(boolean isPeriodic) { return triggerCheckpointFromCheckpointThread(checkpointProperties, null, isPeriodic); }
Triggers a new standard checkpoint and uses the given timestamp as the checkpoint timestamp. The return value is a future. It completes when the checkpoint triggered finishes or an error occurred. @param isPeriodic Flag indicating whether this triggered checkpoint is periodic. @return a future to the completed checkpoint.
triggerCheckpoint
java
apache/flink
flink-runtime/src/main/java/org/apache/flink/runtime/checkpoint/CheckpointCoordinator.java
https://github.com/apache/flink/blob/master/flink-runtime/src/main/java/org/apache/flink/runtime/checkpoint/CheckpointCoordinator.java
Apache-2.0
private CompletableFuture<Void> snapshotMasterState(PendingCheckpoint checkpoint) { if (masterHooks.isEmpty()) { return CompletableFuture.completedFuture(null); } final long checkpointID = checkpoint.getCheckpointID(); final long timestamp = checkpoint.getCheckpointTimestamp(); final CompletableFuture<Void> masterStateCompletableFuture = new CompletableFuture<>(); for (MasterTriggerRestoreHook<?> masterHook : masterHooks.values()) { MasterHooks.triggerHook(masterHook, checkpointID, timestamp, executor) .whenCompleteAsync( (masterState, throwable) -> { try { synchronized (lock) { if (masterStateCompletableFuture.isDone()) { return; } if (checkpoint.isDisposed()) { throw new IllegalStateException( "Checkpoint " + checkpointID + " has been discarded"); } if (throwable == null) { checkpoint.acknowledgeMasterState( masterHook.getIdentifier(), masterState); if (checkpoint.areMasterStatesFullyAcknowledged()) { masterStateCompletableFuture.complete(null); } } else { masterStateCompletableFuture.completeExceptionally( throwable); } } } catch (Throwable t) { masterStateCompletableFuture.completeExceptionally(t); } }, timer); } return masterStateCompletableFuture; }
Snapshot master hook states asynchronously. @param checkpoint the pending checkpoint @return the future represents master hook states are finished or not
snapshotMasterState
java
apache/flink
flink-runtime/src/main/java/org/apache/flink/runtime/checkpoint/CheckpointCoordinator.java
https://github.com/apache/flink/blob/master/flink-runtime/src/main/java/org/apache/flink/runtime/checkpoint/CheckpointCoordinator.java
Apache-2.0
private void onTriggerSuccess() { isTriggering = false; executeQueuedRequest(); }
Trigger request is successful. NOTE, it must be invoked if trigger request is successful.
onTriggerSuccess
java
apache/flink
flink-runtime/src/main/java/org/apache/flink/runtime/checkpoint/CheckpointCoordinator.java
https://github.com/apache/flink/blob/master/flink-runtime/src/main/java/org/apache/flink/runtime/checkpoint/CheckpointCoordinator.java
Apache-2.0
public void receiveDeclineMessage(DeclineCheckpoint message, String taskManagerLocationInfo) { if (shutdown || message == null) { return; } if (!job.equals(message.getJob())) { throw new IllegalArgumentException( "Received DeclineCheckpoint message for job " + message.getJob() + " from " + taskManagerLocationInfo + " while this coordinator handles job " + job); } final long checkpointId = message.getCheckpointId(); final CheckpointException checkpointException = message.getSerializedCheckpointException().unwrap(); final String reason = checkpointException.getMessage(); PendingCheckpoint checkpoint; synchronized (lock) { // we need to check inside the lock for being shutdown as well, otherwise we // get races and invalid error log messages if (shutdown) { return; } checkpoint = pendingCheckpoints.get(checkpointId); if (checkpoint != null) { Preconditions.checkState( !checkpoint.isDisposed(), "Received message for discarded but non-removed checkpoint " + checkpointId); LOG.info( "Decline checkpoint {} by task {} of job {} at {}.", checkpointId, message.getTaskExecutionId(), job, taskManagerLocationInfo, checkpointException.getCause()); abortPendingCheckpoint( checkpoint, checkpointException, message.getTaskExecutionId()); } else if (LOG.isDebugEnabled()) { if (recentExpiredCheckpoints.contains(checkpointId)) { // message is for an expired checkpoint LOG.debug( "Received another decline message for now expired checkpoint attempt {} from task {} of job {} at {} : {}", checkpointId, message.getTaskExecutionId(), job, taskManagerLocationInfo, reason); } else { // message is for an unknown checkpoint. might be so old that we don't even // remember it any more LOG.debug( "Received decline message for unknown (too old?) checkpoint attempt {} from task {} of job {} at {} : {}", checkpointId, message.getTaskExecutionId(), job, taskManagerLocationInfo, reason); } } } }
Receives a {@link DeclineCheckpoint} message for a pending checkpoint. @param message Checkpoint decline from the task manager @param taskManagerLocationInfo The location info of the decline checkpoint message's sender
receiveDeclineMessage
java
apache/flink
flink-runtime/src/main/java/org/apache/flink/runtime/checkpoint/CheckpointCoordinator.java
https://github.com/apache/flink/blob/master/flink-runtime/src/main/java/org/apache/flink/runtime/checkpoint/CheckpointCoordinator.java
Apache-2.0
public boolean receiveAcknowledgeMessage( AcknowledgeCheckpoint message, String taskManagerLocationInfo) throws CheckpointException { if (shutdown || message == null) { return false; } if (!job.equals(message.getJob())) { LOG.error( "Received wrong AcknowledgeCheckpoint message for job {} from {} : {}", job, taskManagerLocationInfo, message); return false; } final long checkpointId = message.getCheckpointId(); synchronized (lock) { // we need to check inside the lock for being shutdown as well, otherwise we // get races and invalid error log messages if (shutdown) { return false; } final PendingCheckpoint checkpoint = pendingCheckpoints.get(checkpointId); if (message.getSubtaskState() != null) { // Register shared state regardless of checkpoint state and task ACK state. // This way, shared state is // 1. kept if the message is late or state will be used by the task otherwise // 2. removed eventually upon checkpoint subsumption (or job cancellation) // Do not register savepoints' shared state, as Flink is not in charge of // savepoints' lifecycle if (checkpoint == null || !checkpoint.getProps().isSavepoint()) { message.getSubtaskState() .registerSharedStates( completedCheckpointStore.getSharedStateRegistry(), checkpointId); } } if (checkpoint != null && !checkpoint.isDisposed()) { switch (checkpoint.acknowledgeTask( message.getTaskExecutionId(), message.getSubtaskState(), message.getCheckpointMetrics())) { case SUCCESS: LOG.debug( "Received acknowledge message for checkpoint {} from task {} of job {} at {}.", checkpointId, message.getTaskExecutionId(), message.getJob(), taskManagerLocationInfo); if (checkpoint.isFullyAcknowledged()) { completePendingCheckpoint(checkpoint); } break; case DUPLICATE: LOG.debug( "Received a duplicate acknowledge message for checkpoint {}, task {}, job {}, location {}.", message.getCheckpointId(), message.getTaskExecutionId(), message.getJob(), taskManagerLocationInfo); break; case UNKNOWN: LOG.warn( "Could not acknowledge the checkpoint {} for task {} of job {} at {}, " + "because the task's execution attempt id was unknown. Discarding " + "the state handle to avoid lingering state.", message.getCheckpointId(), message.getTaskExecutionId(), message.getJob(), taskManagerLocationInfo); discardSubtaskState( message.getJob(), message.getTaskExecutionId(), message.getCheckpointId(), message.getSubtaskState()); break; case DISCARDED: LOG.warn( "Could not acknowledge the checkpoint {} for task {} of job {} at {}, " + "because the pending checkpoint had been discarded. Discarding the " + "state handle tp avoid lingering state.", message.getCheckpointId(), message.getTaskExecutionId(), message.getJob(), taskManagerLocationInfo); discardSubtaskState( message.getJob(), message.getTaskExecutionId(), message.getCheckpointId(), message.getSubtaskState()); } return true; } else if (checkpoint != null) { // this should not happen throw new IllegalStateException( "Received message for discarded but non-removed checkpoint " + checkpointId); } else { reportCheckpointMetrics( message.getCheckpointId(), message.getTaskExecutionId(), message.getCheckpointMetrics()); boolean wasPendingCheckpoint; // message is for an unknown checkpoint, or comes too late (checkpoint disposed) if (recentExpiredCheckpoints.contains(checkpointId)) { wasPendingCheckpoint = true; LOG.warn( "Received late message for now expired checkpoint attempt {} from task " + "{} of job {} at {}.", checkpointId, message.getTaskExecutionId(), message.getJob(), taskManagerLocationInfo); } else { LOG.debug( "Received message for an unknown checkpoint {} from task {} of job {} at {}.", checkpointId, message.getTaskExecutionId(), message.getJob(), taskManagerLocationInfo); wasPendingCheckpoint = false; } // try to discard the state so that we don't have lingering state lying around discardSubtaskState( message.getJob(), message.getTaskExecutionId(), message.getCheckpointId(), message.getSubtaskState()); return wasPendingCheckpoint; } } }
Receives an AcknowledgeCheckpoint message and returns whether the message was associated with a pending checkpoint. @param message Checkpoint ack from the task manager @param taskManagerLocationInfo The location of the acknowledge checkpoint message's sender @return Flag indicating whether the ack'd checkpoint was associated with a pending checkpoint. @throws CheckpointException If the checkpoint cannot be added to the completed checkpoint store.
receiveAcknowledgeMessage
java
apache/flink
flink-runtime/src/main/java/org/apache/flink/runtime/checkpoint/CheckpointCoordinator.java
https://github.com/apache/flink/blob/master/flink-runtime/src/main/java/org/apache/flink/runtime/checkpoint/CheckpointCoordinator.java
Apache-2.0
public OptionalLong restoreLatestCheckpointedStateToSubtasks( final Set<ExecutionJobVertex> tasks) throws Exception { // when restoring subtasks only we accept potentially unmatched state for the // following reasons // - the set frequently does not include all Job Vertices (only the ones that are part // of the restarted region), meaning there will be unmatched state by design. // - because what we might end up restoring from an original savepoint with unmatched // state, if there is was no checkpoint yet. return restoreLatestCheckpointedStateInternal( tasks, OperatorCoordinatorRestoreBehavior .SKIP, // local/regional recovery does not reset coordinators false, // recovery might come before first successful checkpoint true, false); // see explanation above }
Restores the latest checkpointed state to a set of subtasks. This method represents a "local" or "regional" failover and does restore states to coordinators. Note that a regional failover might still include all tasks. @param tasks Set of job vertices to restore. State for these vertices is restored via {@link Execution#setInitialState(JobManagerTaskRestore)}. @return An {@code OptionalLong} with the checkpoint ID, if state was restored, an empty {@code OptionalLong} otherwise. @throws IllegalStateException If the CheckpointCoordinator is shut down. @throws IllegalStateException If no completed checkpoint is available and the <code> failIfNoCheckpoint</code> flag has been set. @throws IllegalStateException If the checkpoint contains state that cannot be mapped to any job vertex in <code>tasks</code> and the <code>allowNonRestoredState</code> flag has not been set. @throws IllegalStateException If the max parallelism changed for an operator that restores state from this checkpoint. @throws IllegalStateException If the parallelism changed for an operator that restores <i>non-partitioned</i> state from this checkpoint.
restoreLatestCheckpointedStateToSubtasks
java
apache/flink
flink-runtime/src/main/java/org/apache/flink/runtime/checkpoint/CheckpointCoordinator.java
https://github.com/apache/flink/blob/master/flink-runtime/src/main/java/org/apache/flink/runtime/checkpoint/CheckpointCoordinator.java
Apache-2.0
public boolean restoreLatestCheckpointedStateToAll( final Set<ExecutionJobVertex> tasks, final boolean allowNonRestoredState) throws Exception { final OptionalLong restoredCheckpointId = restoreLatestCheckpointedStateInternal( tasks, OperatorCoordinatorRestoreBehavior .RESTORE_OR_RESET, // global recovery restores coordinators, or // resets them to empty false, // recovery might come before first successful checkpoint allowNonRestoredState, false); return restoredCheckpointId.isPresent(); }
Restores the latest checkpointed state to all tasks and all coordinators. This method represents a "global restore"-style operation where all stateful tasks and coordinators from the given set of Job Vertices are restored. are restored to their latest checkpointed state. @param tasks Set of job vertices to restore. State for these vertices is restored via {@link Execution#setInitialState(JobManagerTaskRestore)}. @param allowNonRestoredState Allow checkpoint state that cannot be mapped to any job vertex in tasks. @return <code>true</code> if state was restored, <code>false</code> otherwise. @throws IllegalStateException If the CheckpointCoordinator is shut down. @throws IllegalStateException If no completed checkpoint is available and the <code> failIfNoCheckpoint</code> flag has been set. @throws IllegalStateException If the checkpoint contains state that cannot be mapped to any job vertex in <code>tasks</code> and the <code>allowNonRestoredState</code> flag has not been set. @throws IllegalStateException If the max parallelism changed for an operator that restores state from this checkpoint. @throws IllegalStateException If the parallelism changed for an operator that restores <i>non-partitioned</i> state from this checkpoint.
restoreLatestCheckpointedStateToAll
java
apache/flink
flink-runtime/src/main/java/org/apache/flink/runtime/checkpoint/CheckpointCoordinator.java
https://github.com/apache/flink/blob/master/flink-runtime/src/main/java/org/apache/flink/runtime/checkpoint/CheckpointCoordinator.java
Apache-2.0
public boolean restoreInitialCheckpointIfPresent(final Set<ExecutionJobVertex> tasks) throws Exception { final OptionalLong restoredCheckpointId = restoreLatestCheckpointedStateInternal( tasks, OperatorCoordinatorRestoreBehavior.RESTORE_IF_CHECKPOINT_PRESENT, false, // initial checkpoints exist only on JobManager failover. ok if not // present. false, true); // JobManager failover means JobGraphs match exactly. return restoredCheckpointId.isPresent(); }
Restores the latest checkpointed at the beginning of the job execution. If there is a checkpoint, this method acts like a "global restore"-style operation where all stateful tasks and coordinators from the given set of Job Vertices are restored. @param tasks Set of job vertices to restore. State for these vertices is restored via {@link Execution#setInitialState(JobManagerTaskRestore)}. @return True, if a checkpoint was found and its state was restored, false otherwise.
restoreInitialCheckpointIfPresent
java
apache/flink
flink-runtime/src/main/java/org/apache/flink/runtime/checkpoint/CheckpointCoordinator.java
https://github.com/apache/flink/blob/master/flink-runtime/src/main/java/org/apache/flink/runtime/checkpoint/CheckpointCoordinator.java
Apache-2.0
@Override public void jobStatusChanges(JobID jobId, JobStatus newJobStatus, long timestamp) { if (newJobStatus == JobStatus.RUNNING && allTasksOutputNonBlocking) { // start the checkpoint scheduler if there is no blocking edge coordinator.startCheckpointScheduler(); } else { // anything else should stop the trigger for now coordinator.stopCheckpointScheduler(); } }
This actor listens to changes in the JobStatus and activates or deactivates the periodic checkpoint scheduler.
jobStatusChanges
java
apache/flink
flink-runtime/src/main/java/org/apache/flink/runtime/checkpoint/CheckpointCoordinatorDeActivator.java
https://github.com/apache/flink/blob/master/flink-runtime/src/main/java/org/apache/flink/runtime/checkpoint/CheckpointCoordinatorDeActivator.java
Apache-2.0
public CheckpointFailureReason getCheckpointFailureReason() { return checkpointFailureReason; }
Base class for checkpoint related exceptions.
getCheckpointFailureReason
java
apache/flink
flink-runtime/src/main/java/org/apache/flink/runtime/checkpoint/CheckpointException.java
https://github.com/apache/flink/blob/master/flink-runtime/src/main/java/org/apache/flink/runtime/checkpoint/CheckpointException.java
Apache-2.0
void handleJobLevelCheckpointException( CheckpointProperties checkpointProperties, CheckpointException exception, long checkpointId) { if (!checkpointProperties.isSavepoint()) { checkFailureAgainstCounter(exception, checkpointId, failureCallback::failJob); } }
Handle job level checkpoint exception with a handler callback. @param exception the checkpoint exception. @param checkpointId the failed checkpoint id used to count the continuous failure number based on checkpoint id sequence. In trigger phase, we may not get the checkpoint id when the failure happens before the checkpoint id generation. In this case, it will be specified a negative latest generated checkpoint id as a special flag.
handleJobLevelCheckpointException
java
apache/flink
flink-runtime/src/main/java/org/apache/flink/runtime/checkpoint/CheckpointFailureManager.java
https://github.com/apache/flink/blob/master/flink-runtime/src/main/java/org/apache/flink/runtime/checkpoint/CheckpointFailureManager.java
Apache-2.0
public String message() { return message; }
Various reasons why a checkpoint was failure.
message
java
apache/flink
flink-runtime/src/main/java/org/apache/flink/runtime/checkpoint/CheckpointFailureReason.java
https://github.com/apache/flink/blob/master/flink-runtime/src/main/java/org/apache/flink/runtime/checkpoint/CheckpointFailureReason.java
Apache-2.0
public boolean isPreFlight() { return preFlight; }
@return true if this value indicates a failure reason happening before a checkpoint is passed to a job's tasks.
isPreFlight
java
apache/flink
flink-runtime/src/main/java/org/apache/flink/runtime/checkpoint/CheckpointFailureReason.java
https://github.com/apache/flink/blob/master/flink-runtime/src/main/java/org/apache/flink/runtime/checkpoint/CheckpointFailureReason.java
Apache-2.0
public SnapshotType getCheckpointType() { return checkpointType; }
Returns the type of checkpoint to perform.
getCheckpointType
java
apache/flink
flink-runtime/src/main/java/org/apache/flink/runtime/checkpoint/CheckpointOptions.java
https://github.com/apache/flink/blob/master/flink-runtime/src/main/java/org/apache/flink/runtime/checkpoint/CheckpointOptions.java
Apache-2.0
boolean forceCheckpoint() { return forced; }
Returns whether the checkpoint should be forced. <p>Forced checkpoints ignore the configured maximum number of concurrent checkpoints and minimum time between checkpoints. Furthermore, they are not subsumed by more recent checkpoints as long as they are pending. @return <code>true</code> if the checkpoint should be forced; <code>false</code> otherwise. @see CheckpointCoordinator @see PendingCheckpoint
forceCheckpoint
java
apache/flink
flink-runtime/src/main/java/org/apache/flink/runtime/checkpoint/CheckpointProperties.java
https://github.com/apache/flink/blob/master/flink-runtime/src/main/java/org/apache/flink/runtime/checkpoint/CheckpointProperties.java
Apache-2.0
public boolean isUnclaimed() { return unclaimed; }
Returns whether the checkpoint should be restored in a {@link RecoveryClaimMode#NO_CLAIM} mode.
isUnclaimed
java
apache/flink
flink-runtime/src/main/java/org/apache/flink/runtime/checkpoint/CheckpointProperties.java
https://github.com/apache/flink/blob/master/flink-runtime/src/main/java/org/apache/flink/runtime/checkpoint/CheckpointProperties.java
Apache-2.0
boolean discardOnSubsumed() { return discardSubsumed; }
Returns whether the checkpoint should be discarded when it is subsumed. <p>A checkpoint is subsumed when the maximum number of retained checkpoints is reached and a more recent checkpoint completes.. @return <code>true</code> if the checkpoint should be discarded when it is subsumed; <code> false</code> otherwise. @see CompletedCheckpointStore
discardOnSubsumed
java
apache/flink
flink-runtime/src/main/java/org/apache/flink/runtime/checkpoint/CheckpointProperties.java
https://github.com/apache/flink/blob/master/flink-runtime/src/main/java/org/apache/flink/runtime/checkpoint/CheckpointProperties.java
Apache-2.0
boolean discardOnJobFinished() { return discardFinished; }
Returns whether the checkpoint should be discarded when the owning job reaches the {@link JobStatus#FINISHED} state. @return <code>true</code> if the checkpoint should be discarded when the owning job reaches the {@link JobStatus#FINISHED} state; <code>false</code> otherwise. @see CompletedCheckpointStore
discardOnJobFinished
java
apache/flink
flink-runtime/src/main/java/org/apache/flink/runtime/checkpoint/CheckpointProperties.java
https://github.com/apache/flink/blob/master/flink-runtime/src/main/java/org/apache/flink/runtime/checkpoint/CheckpointProperties.java
Apache-2.0
boolean discardOnJobCancelled() { return discardCancelled; }
Returns whether the checkpoint should be discarded when the owning job reaches the {@link JobStatus#CANCELED} state. @return <code>true</code> if the checkpoint should be discarded when the owning job reaches the {@link JobStatus#CANCELED} state; <code>false</code> otherwise. @see CompletedCheckpointStore
discardOnJobCancelled
java
apache/flink
flink-runtime/src/main/java/org/apache/flink/runtime/checkpoint/CheckpointProperties.java
https://github.com/apache/flink/blob/master/flink-runtime/src/main/java/org/apache/flink/runtime/checkpoint/CheckpointProperties.java
Apache-2.0
boolean discardOnJobFailed() { return discardFailed; }
Returns whether the checkpoint should be discarded when the owning job reaches the {@link JobStatus#FAILED} state. @return <code>true</code> if the checkpoint should be discarded when the owning job reaches the {@link JobStatus#FAILED} state; <code>false</code> otherwise. @see CompletedCheckpointStore
discardOnJobFailed
java
apache/flink
flink-runtime/src/main/java/org/apache/flink/runtime/checkpoint/CheckpointProperties.java
https://github.com/apache/flink/blob/master/flink-runtime/src/main/java/org/apache/flink/runtime/checkpoint/CheckpointProperties.java
Apache-2.0
boolean discardOnJobSuspended() { return discardSuspended; }
Returns whether the checkpoint should be discarded when the owning job reaches the {@link JobStatus#SUSPENDED} state. @return <code>true</code> if the checkpoint should be discarded when the owning job reaches the {@link JobStatus#SUSPENDED} state; <code>false</code> otherwise. @see CompletedCheckpointStore
discardOnJobSuspended
java
apache/flink
flink-runtime/src/main/java/org/apache/flink/runtime/checkpoint/CheckpointProperties.java
https://github.com/apache/flink/blob/master/flink-runtime/src/main/java/org/apache/flink/runtime/checkpoint/CheckpointProperties.java
Apache-2.0
public boolean isSavepoint() { return checkpointType.isSavepoint(); }
Returns whether the checkpoint properties describe a standard savepoint. @return <code>true</code> if the properties describe a savepoint, <code>false</code> otherwise.
isSavepoint
java
apache/flink
flink-runtime/src/main/java/org/apache/flink/runtime/checkpoint/CheckpointProperties.java
https://github.com/apache/flink/blob/master/flink-runtime/src/main/java/org/apache/flink/runtime/checkpoint/CheckpointProperties.java
Apache-2.0
public static CheckpointProperties forCheckpoint(CheckpointRetentionPolicy policy) { switch (policy) { case NEVER_RETAIN_AFTER_TERMINATION: return CHECKPOINT_NEVER_RETAINED; case RETAIN_ON_FAILURE: return CHECKPOINT_RETAINED_ON_FAILURE; case RETAIN_ON_CANCELLATION: return CHECKPOINT_RETAINED_ON_CANCELLATION; default: throw new IllegalArgumentException("unknown policy: " + policy); } }
Creates the checkpoint properties for a checkpoint. <p>Checkpoints may be queued in case too many other checkpoints are currently happening. They are garbage collected automatically, except when the owning job terminates in state {@link JobStatus#FAILED}. The user is required to configure the clean up behaviour on job cancellation. @return Checkpoint properties for an external checkpoint.
forCheckpoint
java
apache/flink
flink-runtime/src/main/java/org/apache/flink/runtime/checkpoint/CheckpointProperties.java
https://github.com/apache/flink/blob/master/flink-runtime/src/main/java/org/apache/flink/runtime/checkpoint/CheckpointProperties.java
Apache-2.0
Optional<CheckpointTriggerRequest> chooseQueuedRequestToExecute( boolean isTriggering, long lastCompletionMs) { Optional<CheckpointTriggerRequest> request = chooseRequestToExecute(isTriggering, lastCompletionMs); request.ifPresent(CheckpointRequestDecider::logInQueueTime); return request; }
Choose one of the queued requests to execute, if any. @return request that should be executed
chooseQueuedRequestToExecute
java
apache/flink
flink-runtime/src/main/java/org/apache/flink/runtime/checkpoint/CheckpointRequestDecider.java
https://github.com/apache/flink/blob/master/flink-runtime/src/main/java/org/apache/flink/runtime/checkpoint/CheckpointRequestDecider.java
Apache-2.0
public void addSubsumedCheckpoint(CompletedCheckpoint completedCheckpoint) { synchronized (lock) { subsumedCheckpoints.add(completedCheckpoint); } }
Add one subsumed checkpoint to CheckpointsCleaner, the subsumed checkpoint would be discarded at {@link #cleanSubsumedCheckpoints(long, Set, Runnable, Executor)}. @param completedCheckpoint which is subsumed.
addSubsumedCheckpoint
java
apache/flink
flink-runtime/src/main/java/org/apache/flink/runtime/checkpoint/CheckpointsCleaner.java
https://github.com/apache/flink/blob/master/flink-runtime/src/main/java/org/apache/flink/runtime/checkpoint/CheckpointsCleaner.java
Apache-2.0
public long getNumberOfRestoredCheckpoints() { return numRestoredCheckpoints; }
Returns the number of restored checkpoints. @return Number of restored checkpoints.
getNumberOfRestoredCheckpoints
java
apache/flink
flink-runtime/src/main/java/org/apache/flink/runtime/checkpoint/CheckpointStatsCounts.java
https://github.com/apache/flink/blob/master/flink-runtime/src/main/java/org/apache/flink/runtime/checkpoint/CheckpointStatsCounts.java
Apache-2.0
public long getTotalNumberOfCheckpoints() { return numTotalCheckpoints; }
Returns the total number of checkpoints (in progress, completed, failed). @return Total number of checkpoints.
getTotalNumberOfCheckpoints
java
apache/flink
flink-runtime/src/main/java/org/apache/flink/runtime/checkpoint/CheckpointStatsCounts.java
https://github.com/apache/flink/blob/master/flink-runtime/src/main/java/org/apache/flink/runtime/checkpoint/CheckpointStatsCounts.java
Apache-2.0
public int getNumberOfInProgressCheckpoints() { return numInProgressCheckpoints; }
Returns the number of in progress checkpoints. @return Number of in progress checkpoints.
getNumberOfInProgressCheckpoints
java
apache/flink
flink-runtime/src/main/java/org/apache/flink/runtime/checkpoint/CheckpointStatsCounts.java
https://github.com/apache/flink/blob/master/flink-runtime/src/main/java/org/apache/flink/runtime/checkpoint/CheckpointStatsCounts.java
Apache-2.0
public long getNumberOfCompletedCheckpoints() { return numCompletedCheckpoints; }
Returns the number of completed checkpoints. @return Number of completed checkpoints.
getNumberOfCompletedCheckpoints
java
apache/flink
flink-runtime/src/main/java/org/apache/flink/runtime/checkpoint/CheckpointStatsCounts.java
https://github.com/apache/flink/blob/master/flink-runtime/src/main/java/org/apache/flink/runtime/checkpoint/CheckpointStatsCounts.java
Apache-2.0
public long getNumberOfFailedCheckpoints() { return numFailedCheckpoints; }
Returns the number of failed checkpoints. @return Number of failed checkpoints.
getNumberOfFailedCheckpoints
java
apache/flink
flink-runtime/src/main/java/org/apache/flink/runtime/checkpoint/CheckpointStatsCounts.java
https://github.com/apache/flink/blob/master/flink-runtime/src/main/java/org/apache/flink/runtime/checkpoint/CheckpointStatsCounts.java
Apache-2.0
void incrementInProgressCheckpoints() { numInProgressCheckpoints++; numTotalCheckpoints++; }
Increments the number of total and in progress checkpoints.
incrementInProgressCheckpoints
java
apache/flink
flink-runtime/src/main/java/org/apache/flink/runtime/checkpoint/CheckpointStatsCounts.java
https://github.com/apache/flink/blob/master/flink-runtime/src/main/java/org/apache/flink/runtime/checkpoint/CheckpointStatsCounts.java
Apache-2.0
void incrementCompletedCheckpoints() { if (canDecrementOfInProgressCheckpointsNumber()) { numInProgressCheckpoints--; } numCompletedCheckpoints++; }
Increments the number of successfully completed checkpoints. <p>It is expected that this follows a previous call to {@link #incrementInProgressCheckpoints()}.
incrementCompletedCheckpoints
java
apache/flink
flink-runtime/src/main/java/org/apache/flink/runtime/checkpoint/CheckpointStatsCounts.java
https://github.com/apache/flink/blob/master/flink-runtime/src/main/java/org/apache/flink/runtime/checkpoint/CheckpointStatsCounts.java
Apache-2.0
void incrementFailedCheckpoints() { if (canDecrementOfInProgressCheckpointsNumber()) { numInProgressCheckpoints--; } numFailedCheckpoints++; }
Increments the number of failed checkpoints. <p>It is expected that this follows a previous call to {@link #incrementInProgressCheckpoints()}.
incrementFailedCheckpoints
java
apache/flink
flink-runtime/src/main/java/org/apache/flink/runtime/checkpoint/CheckpointStatsCounts.java
https://github.com/apache/flink/blob/master/flink-runtime/src/main/java/org/apache/flink/runtime/checkpoint/CheckpointStatsCounts.java
Apache-2.0
void incrementFailedCheckpointsWithoutInProgress() { numFailedCheckpoints++; numTotalCheckpoints++; }
Increments the number of failed checkpoints without in progress checkpoint. For example, it should be callback when triggering checkpoint failure before creating PendingCheckpoint.
incrementFailedCheckpointsWithoutInProgress
java
apache/flink
flink-runtime/src/main/java/org/apache/flink/runtime/checkpoint/CheckpointStatsCounts.java
https://github.com/apache/flink/blob/master/flink-runtime/src/main/java/org/apache/flink/runtime/checkpoint/CheckpointStatsCounts.java
Apache-2.0
void addInProgressCheckpoint(PendingCheckpointStats pending) { if (readOnly) { throw new UnsupportedOperationException( "Can't create a snapshot of a read-only history."); } if (maxSize == 0) { return; } checkNotNull(pending, "Pending checkpoint"); // Grow the array if required. This happens only for the first entries // and makes the iterator logic easier, because we don't have any // null elements with the growing array. if (checkpointsArray.length < maxSize) { checkpointsArray = Arrays.copyOf(checkpointsArray, checkpointsArray.length + 1); } // Wrap around if we are at the end. The next pos is the least recently // added checkpoint. if (nextPos == checkpointsArray.length) { nextPos = 0; } checkpointsArray[nextPos++] = pending; recentCheckpoints.put(pending.checkpointId, pending); }
Adds an in progress checkpoint to the checkpoint history. @param pending In progress checkpoint to add.
addInProgressCheckpoint
java
apache/flink
flink-runtime/src/main/java/org/apache/flink/runtime/checkpoint/CheckpointStatsHistory.java
https://github.com/apache/flink/blob/master/flink-runtime/src/main/java/org/apache/flink/runtime/checkpoint/CheckpointStatsHistory.java
Apache-2.0
boolean replacePendingCheckpointById(AbstractCheckpointStats completedOrFailed) { checkArgument( !completedOrFailed.getStatus().isInProgress(), "Not allowed to replace with in progress checkpoints."); if (readOnly) { throw new UnsupportedOperationException( "Can't create a snapshot of a read-only history."); } // Update the latest checkpoint stats if (completedOrFailed.getStatus().isCompleted()) { CompletedCheckpointStats completed = (CompletedCheckpointStats) completedOrFailed; if (completed.getProperties().isSavepoint() && (latestSavepoint == null || completed.getCheckpointId() > latestSavepoint.getCheckpointId())) { latestSavepoint = completed; } else if (latestCompletedCheckpoint == null || completed.getCheckpointId() > latestCompletedCheckpoint.getCheckpointId()) { latestCompletedCheckpoint = completed; } } else if (completedOrFailed.getStatus().isFailed()) { FailedCheckpointStats failed = (FailedCheckpointStats) completedOrFailed; if (latestFailedCheckpoint == null || failed.getCheckpointId() > latestFailedCheckpoint.getCheckpointId()) { latestFailedCheckpoint = failed; } } if (maxSize == 0) { return false; } long checkpointId = completedOrFailed.getCheckpointId(); recentCheckpoints.computeIfPresent( checkpointId, (unusedKey, unusedValue) -> completedOrFailed); // We start searching from the last inserted position. Since the entries // wrap around the array we search until we are at index 0 and then from // the end of the array until (start pos + 1). int startPos = nextPos == checkpointsArray.length ? checkpointsArray.length - 1 : nextPos - 1; for (int i = startPos; i >= 0; i--) { if (checkpointsArray[i].getCheckpointId() == checkpointId) { checkpointsArray[i] = completedOrFailed; return true; } } for (int i = checkpointsArray.length - 1; i > startPos; i--) { if (checkpointsArray[i].getCheckpointId() == checkpointId) { checkpointsArray[i] = completedOrFailed; return true; } } return false; }
Searches for the in progress checkpoint with the given ID and replaces it with the given completed or failed checkpoint. <p>This is bounded by the maximum number of concurrent in progress checkpointsArray, which means that the runtime of this is constant. @param completedOrFailed The completed or failed checkpoint to replace the in progress checkpoint with. @return <code>true</code> if the checkpoint was replaced or <code>false</code> otherwise.
replacePendingCheckpointById
java
apache/flink
flink-runtime/src/main/java/org/apache/flink/runtime/checkpoint/CheckpointStatsHistory.java
https://github.com/apache/flink/blob/master/flink-runtime/src/main/java/org/apache/flink/runtime/checkpoint/CheckpointStatsHistory.java
Apache-2.0
default void onCompletedCheckpoint() { // No-op. }
Called when a checkpoint was completed successfully.
onCompletedCheckpoint
java
apache/flink
flink-runtime/src/main/java/org/apache/flink/runtime/checkpoint/CheckpointStatsListener.java
https://github.com/apache/flink/blob/master/flink-runtime/src/main/java/org/apache/flink/runtime/checkpoint/CheckpointStatsListener.java
Apache-2.0
public CheckpointStatsCounts getCounts() { return counts; }
Returns the snapshotted checkpoint counts. @return Snapshotted checkpoint counts.
getCounts
java
apache/flink
flink-runtime/src/main/java/org/apache/flink/runtime/checkpoint/CheckpointStatsSnapshot.java
https://github.com/apache/flink/blob/master/flink-runtime/src/main/java/org/apache/flink/runtime/checkpoint/CheckpointStatsSnapshot.java
Apache-2.0
public CompletedCheckpointStatsSummarySnapshot getSummaryStats() { return summary; }
Returns the snapshotted completed checkpoint summary stats. @return Snapshotted completed checkpoint summary stats.
getSummaryStats
java
apache/flink
flink-runtime/src/main/java/org/apache/flink/runtime/checkpoint/CheckpointStatsSnapshot.java
https://github.com/apache/flink/blob/master/flink-runtime/src/main/java/org/apache/flink/runtime/checkpoint/CheckpointStatsSnapshot.java
Apache-2.0
public CheckpointStatsHistory getHistory() { return history; }
Returns the snapshotted checkpoint history. @return Snapshotted checkpoint history.
getHistory
java
apache/flink
flink-runtime/src/main/java/org/apache/flink/runtime/checkpoint/CheckpointStatsSnapshot.java
https://github.com/apache/flink/blob/master/flink-runtime/src/main/java/org/apache/flink/runtime/checkpoint/CheckpointStatsSnapshot.java
Apache-2.0
@Nullable public RestoredCheckpointStats getLatestRestoredCheckpoint() { return latestRestoredCheckpoint; }
Returns the latest restored checkpoint. @return Latest restored checkpoint or <code>null</code>.
getLatestRestoredCheckpoint
java
apache/flink
flink-runtime/src/main/java/org/apache/flink/runtime/checkpoint/CheckpointStatsSnapshot.java
https://github.com/apache/flink/blob/master/flink-runtime/src/main/java/org/apache/flink/runtime/checkpoint/CheckpointStatsSnapshot.java
Apache-2.0
public boolean isInProgress() { return this == IN_PROGRESS; }
Returns whether the checkpoint is in progress. @return <code>true</code> if checkpoint is in progress, <code>false</code> otherwise.
isInProgress
java
apache/flink
flink-runtime/src/main/java/org/apache/flink/runtime/checkpoint/CheckpointStatsStatus.java
https://github.com/apache/flink/blob/master/flink-runtime/src/main/java/org/apache/flink/runtime/checkpoint/CheckpointStatsStatus.java
Apache-2.0
public boolean isCompleted() { return this == COMPLETED; }
Returns whether the checkpoint has completed successfully. @return <code>true</code> if checkpoint has completed, <code>false</code> otherwise.
isCompleted
java
apache/flink
flink-runtime/src/main/java/org/apache/flink/runtime/checkpoint/CheckpointStatsStatus.java
https://github.com/apache/flink/blob/master/flink-runtime/src/main/java/org/apache/flink/runtime/checkpoint/CheckpointStatsStatus.java
Apache-2.0
public boolean isFailed() { return this == FAILED; }
Returns whether the checkpoint has failed. @return <code>true</code> if checkpoint has failed, <code>false</code> otherwise.
isFailed
java
apache/flink
flink-runtime/src/main/java/org/apache/flink/runtime/checkpoint/CheckpointStatsStatus.java
https://github.com/apache/flink/blob/master/flink-runtime/src/main/java/org/apache/flink/runtime/checkpoint/CheckpointStatsStatus.java
Apache-2.0
public String getExternalPath() { return externalPointer; }
Returns the external pointer of this checkpoint.
getExternalPath
java
apache/flink
flink-runtime/src/main/java/org/apache/flink/runtime/checkpoint/CompletedCheckpointStats.java
https://github.com/apache/flink/blob/master/flink-runtime/src/main/java/org/apache/flink/runtime/checkpoint/CompletedCheckpointStats.java
Apache-2.0
public boolean isDiscarded() { return discarded; }
Returns whether the checkpoint has been discarded. @return <code>true</code> if the checkpoint has been discarded, <code>false</code> otherwise.
isDiscarded
java
apache/flink
flink-runtime/src/main/java/org/apache/flink/runtime/checkpoint/CompletedCheckpointStats.java
https://github.com/apache/flink/blob/master/flink-runtime/src/main/java/org/apache/flink/runtime/checkpoint/CompletedCheckpointStats.java
Apache-2.0
void updateSummary(CompletedCheckpointStats completed) { stateSize.add(completed.getStateSize()); checkpointedSize.add(completed.getCheckpointedSize()); duration.add(completed.getEndToEndDuration()); processedData.add(completed.getProcessedData()); persistedData.add(completed.getPersistedData()); }
Updates the summary with the given completed checkpoint. @param completed Completed checkpoint to update the summary with.
updateSummary
java
apache/flink
flink-runtime/src/main/java/org/apache/flink/runtime/checkpoint/CompletedCheckpointStatsSummary.java
https://github.com/apache/flink/blob/master/flink-runtime/src/main/java/org/apache/flink/runtime/checkpoint/CompletedCheckpointStatsSummary.java
Apache-2.0
CompletedCheckpointStatsSummarySnapshot createSnapshot() { return new CompletedCheckpointStatsSummarySnapshot( duration.createSnapshot(), processedData.createSnapshot(), persistedData.createSnapshot(), stateSize.createSnapshot(), checkpointedSize.createSnapshot()); }
Creates a snapshot of the current state. @return A snapshot of the current state.
createSnapshot
java
apache/flink
flink-runtime/src/main/java/org/apache/flink/runtime/checkpoint/CompletedCheckpointStatsSummary.java
https://github.com/apache/flink/blob/master/flink-runtime/src/main/java/org/apache/flink/runtime/checkpoint/CompletedCheckpointStatsSummary.java
Apache-2.0
public StatsSummary getStateSizeStats() { return stateSize; }
Returns the summary stats for the state size of completed checkpoints. @return Summary stats for the state size.
getStateSizeStats
java
apache/flink
flink-runtime/src/main/java/org/apache/flink/runtime/checkpoint/CompletedCheckpointStatsSummary.java
https://github.com/apache/flink/blob/master/flink-runtime/src/main/java/org/apache/flink/runtime/checkpoint/CompletedCheckpointStatsSummary.java
Apache-2.0
default CompletedCheckpoint getLatestCheckpoint() { List<CompletedCheckpoint> allCheckpoints = getAllCheckpoints(); if (allCheckpoints.isEmpty()) { return null; } return allCheckpoints.get(allCheckpoints.size() - 1); }
Returns the latest {@link CompletedCheckpoint} instance or <code>null</code> if none was added.
getLatestCheckpoint
java
apache/flink
flink-runtime/src/main/java/org/apache/flink/runtime/checkpoint/CompletedCheckpointStore.java
https://github.com/apache/flink/blob/master/flink-runtime/src/main/java/org/apache/flink/runtime/checkpoint/CompletedCheckpointStore.java
Apache-2.0
default long getLatestCheckpointId() { try { List<CompletedCheckpoint> allCheckpoints = getAllCheckpoints(); if (allCheckpoints.isEmpty()) { return 0; } return allCheckpoints.get(allCheckpoints.size() - 1).getCheckpointID(); } catch (Throwable throwable) { LOG.warn("Get the latest completed checkpoints failed", throwable); return 0; } }
Returns the id of the latest completed checkpoints.
getLatestCheckpointId
java
apache/flink
flink-runtime/src/main/java/org/apache/flink/runtime/checkpoint/CompletedCheckpointStore.java
https://github.com/apache/flink/blob/master/flink-runtime/src/main/java/org/apache/flink/runtime/checkpoint/CompletedCheckpointStore.java
Apache-2.0
private void checkNoPartlyFinishedVertexUsedUnionListState( Map<JobVertexID, ExecutionJobVertex> partlyFinishedVertex, Map<OperatorID, OperatorState> operatorStates) { for (ExecutionJobVertex vertex : partlyFinishedVertex.values()) { if (hasUsedUnionListState(vertex, operatorStates)) { throw new PartialFinishingNotSupportedByStateException( String.format( "The vertex %s (id = %s) has used" + " UnionListState, but part of its tasks are FINISHED.", vertex.getName(), vertex.getJobVertexId())); } } }
If a job vertex using {@code UnionListState} has part of tasks FINISHED where others are still in RUNNING state, the checkpoint would be aborted since it might cause incomplete {@code UnionListState}.
checkNoPartlyFinishedVertexUsedUnionListState
java
apache/flink
flink-runtime/src/main/java/org/apache/flink/runtime/checkpoint/DefaultCheckpointPlan.java
https://github.com/apache/flink/blob/master/flink-runtime/src/main/java/org/apache/flink/runtime/checkpoint/DefaultCheckpointPlan.java
Apache-2.0
@Override public CompletableFuture<CheckpointPlan> calculateCheckpointPlan() { return CompletableFuture.supplyAsync( () -> { try { if (context.hasFinishedTasks() && !allowCheckpointsAfterTasksFinished) { throw new CheckpointException( "Some tasks of the job have already finished and checkpointing with finished tasks is not enabled.", CheckpointFailureReason.NOT_ALL_REQUIRED_TASKS_RUNNING); } checkAllTasksInitiated(); CheckpointPlan result = context.hasFinishedTasks() ? calculateAfterTasksFinished() : calculateWithAllTasksRunning(); checkTasksStarted(result.getTasksToWaitFor()); return result; } catch (Throwable throwable) { throw new CompletionException(throwable); } }, context.getMainExecutor()); }
Default implementation for {@link CheckpointPlanCalculator}. If all tasks are running, it directly marks all the sources as tasks to trigger, otherwise it would try to find the running tasks without running processors as tasks to trigger.
calculateCheckpointPlan
java
apache/flink
flink-runtime/src/main/java/org/apache/flink/runtime/checkpoint/DefaultCheckpointPlanCalculator.java
https://github.com/apache/flink/blob/master/flink-runtime/src/main/java/org/apache/flink/runtime/checkpoint/DefaultCheckpointPlanCalculator.java
Apache-2.0
private void checkAllTasksInitiated() throws CheckpointException { for (ExecutionVertex task : allTasks) { if (task.getCurrentExecutionAttempt() == null) { throw new CheckpointException( String.format( "task %s of job %s is not being executed at the moment. Aborting checkpoint.", task.getTaskNameWithSubtaskIndex(), jobId), CheckpointFailureReason.NOT_ALL_REQUIRED_TASKS_RUNNING); } } }
Checks if all tasks are attached with the current Execution already. This method should be called from JobMaster main thread executor. @throws CheckpointException if some tasks do not have attached Execution.
checkAllTasksInitiated
java
apache/flink
flink-runtime/src/main/java/org/apache/flink/runtime/checkpoint/DefaultCheckpointPlanCalculator.java
https://github.com/apache/flink/blob/master/flink-runtime/src/main/java/org/apache/flink/runtime/checkpoint/DefaultCheckpointPlanCalculator.java
Apache-2.0
private void checkTasksStarted(List<Execution> toTrigger) throws CheckpointException { for (Execution execution : toTrigger) { if (execution.getState() != ExecutionState.RUNNING) { throw new CheckpointException( String.format( "Checkpoint triggering task %s of job %s is not being executed at the moment. " + "Aborting checkpoint.", execution.getVertex().getTaskNameWithSubtaskIndex(), jobId), CheckpointFailureReason.NOT_ALL_REQUIRED_TASKS_RUNNING); } } }
Checks if all tasks to trigger have already been in RUNNING state. This method should be called from JobMaster main thread executor. @throws CheckpointException if some tasks to trigger have not turned into RUNNING yet.
checkTasksStarted
java
apache/flink
flink-runtime/src/main/java/org/apache/flink/runtime/checkpoint/DefaultCheckpointPlanCalculator.java
https://github.com/apache/flink/blob/master/flink-runtime/src/main/java/org/apache/flink/runtime/checkpoint/DefaultCheckpointPlanCalculator.java
Apache-2.0
@Override public boolean requiresExternalizedCheckpoints() { return true; }
Creates a {@link DefaultCompletedCheckpointStore} instance. @param maxNumberOfCheckpointsToRetain The maximum number of checkpoints to retain (at least 1). Adding more checkpoints than this results in older checkpoints being discarded. On recovery, we will only start with a single checkpoint. @param stateHandleStore Completed checkpoints in external store @param completedCheckpointStoreUtil utilities for completed checkpoint store @param executor to execute blocking calls
requiresExternalizedCheckpoints
java
apache/flink
flink-runtime/src/main/java/org/apache/flink/runtime/checkpoint/DefaultCompletedCheckpointStore.java
https://github.com/apache/flink/blob/master/flink-runtime/src/main/java/org/apache/flink/runtime/checkpoint/DefaultCompletedCheckpointStore.java
Apache-2.0
private boolean tryRemove(long checkpointId) throws Exception { return checkpointStateHandleStore.releaseAndTryRemove( completedCheckpointStoreUtil.checkpointIDToName(checkpointId)); }
Tries to remove the checkpoint identified by the given checkpoint id. @param checkpointId identifying the checkpoint to remove @return true if the checkpoint could be removed
tryRemove
java
apache/flink
flink-runtime/src/main/java/org/apache/flink/runtime/checkpoint/DefaultCompletedCheckpointStore.java
https://github.com/apache/flink/blob/master/flink-runtime/src/main/java/org/apache/flink/runtime/checkpoint/DefaultCompletedCheckpointStore.java
Apache-2.0
public static int getMaximumNumberOfRetainedCheckpoints(Configuration config, Logger logger) { final int maxNumberOfCheckpointsToRetain = config.get(CheckpointingOptions.MAX_RETAINED_CHECKPOINTS); if (maxNumberOfCheckpointsToRetain <= 0) { // warning and use 1 as the default value if the setting in // state.checkpoints.max-retained-checkpoints is not greater than 0. logger.warn( "The setting for '{} : {}' is invalid. Using default value of {}", CheckpointingOptions.MAX_RETAINED_CHECKPOINTS.key(), maxNumberOfCheckpointsToRetain, CheckpointingOptions.MAX_RETAINED_CHECKPOINTS.defaultValue()); return CheckpointingOptions.MAX_RETAINED_CHECKPOINTS.defaultValue(); } return maxNumberOfCheckpointsToRetain; }
Extracts maximum number of retained checkpoints configuration from the passed {@link Configuration}. The default value is used as a fallback if the passed value is a value larger than {@code 0}. @param config The configuration that is accessed. @param logger The {@link Logger} used for exposing the warning if the configured value is invalid. @return The maximum number of retained checkpoints based on the passed {@code Configuration}.
getMaximumNumberOfRetainedCheckpoints
java
apache/flink
flink-runtime/src/main/java/org/apache/flink/runtime/checkpoint/DefaultCompletedCheckpointStoreUtils.java
https://github.com/apache/flink/blob/master/flink-runtime/src/main/java/org/apache/flink/runtime/checkpoint/DefaultCompletedCheckpointStoreUtils.java
Apache-2.0