code
stringlengths
25
201k
docstring
stringlengths
19
96.2k
func_name
stringlengths
0
235
language
stringclasses
1 value
repo
stringlengths
8
51
path
stringlengths
11
314
url
stringlengths
62
377
license
stringclasses
7 values
private static <R extends JarRequestBody, M extends MessageParameters> List<String> getProgramArgs(HandlerRequest<R> request, Logger log) throws RestHandlerException { JarRequestBody requestBody = request.getRequestBody(); return fromRequestBodyOrQueryParameter( requestBody.getProgramArgumentsList(), () -> request.getQueryParameter(ProgramArgQueryParameter.class), null, log); }
Parse program arguments in jar run or plan request.
getProgramArgs
java
apache/flink
flink-runtime-web/src/main/java/org/apache/flink/runtime/webmonitor/handlers/utils/JarHandlerUtils.java
https://github.com/apache/flink/blob/master/flink-runtime-web/src/main/java/org/apache/flink/runtime/webmonitor/handlers/utils/JarHandlerUtils.java
Apache-2.0
@VisibleForTesting static List<String> tokenizeArguments(@Nullable final String args) { if (args == null) { return Collections.emptyList(); } final Matcher matcher = ARGUMENTS_TOKENIZE_PATTERN.matcher(args); final List<String> tokens = new ArrayList<>(); while (matcher.find()) { tokens.add(matcher.group().trim().replace("\"", "").replace("\'", "")); } return tokens; }
Takes program arguments as a single string, and splits them into a list of string. <pre> tokenizeArguments("--foo bar") = ["--foo" "bar"] tokenizeArguments("--foo \"bar baz\"") = ["--foo" "bar baz"] tokenizeArguments("--foo 'bar baz'") = ["--foo" "bar baz"] tokenizeArguments(null) = [] </pre> <strong>WARNING: </strong>This method does not respect escaped quotes.
tokenizeArguments
java
apache/flink
flink-runtime-web/src/main/java/org/apache/flink/runtime/webmonitor/handlers/utils/JarHandlerUtils.java
https://github.com/apache/flink/blob/master/flink-runtime-web/src/main/java/org/apache/flink/runtime/webmonitor/handlers/utils/JarHandlerUtils.java
Apache-2.0
private static void updateJobOverview(File webOverviewDir, File webDir) { try (JsonGenerator gen = jacksonFactory.createGenerator( HistoryServer.createOrGetFile(webDir, JobsOverviewHeaders.URL))) { File[] overviews = new File(webOverviewDir.getPath()).listFiles(); if (overviews != null) { Collection<JobDetails> allJobs = new ArrayList<>(overviews.length); for (File overview : overviews) { MultipleJobsDetails subJobs = mapper.readValue(overview, MultipleJobsDetails.class); allJobs.addAll(subJobs.getJobs()); } mapper.writeValue(gen, new MultipleJobsDetails(allJobs)); } } catch (IOException ioe) { LOG.error("Failed to update job overview.", ioe); } }
This method replicates the JSON response that would be given by the JobsOverviewHandler when listing both running and finished jobs. <p>Every job archive contains a joboverview.json file containing the same structure. Since jobs are archived on their own however the list of finished jobs only contains a single job. <p>For the display in the HistoryServer WebFrontend we have to combine these overviews.
updateJobOverview
java
apache/flink
flink-runtime-web/src/main/java/org/apache/flink/runtime/webmonitor/history/HistoryServerArchiveFetcher.java
https://github.com/apache/flink/blob/master/flink-runtime-web/src/main/java/org/apache/flink/runtime/webmonitor/history/HistoryServerArchiveFetcher.java
Apache-2.0
public static Optional<String> getValidLogUrlPattern( final Configuration config, final ConfigOption<String> option) { String pattern = config.get(option); if (StringUtils.isNullOrWhitespaceOnly(pattern)) { return Optional.empty(); } pattern = pattern.trim(); String scheme = pattern.substring(0, Math.max(pattern.indexOf(SCHEME_SEPARATOR), 0)); if (scheme.isEmpty()) { return Optional.of(HTTP_SCHEME + SCHEME_SEPARATOR + pattern); } else if (HTTP_SCHEME.equalsIgnoreCase(scheme) || HTTPS_SCHEME.equalsIgnoreCase(scheme)) { return Optional.of(pattern); } else { LOG.warn( "Ignore configured value for '{}': unsupported scheme {}", option.key(), scheme); return Optional.empty(); } }
Validate and normalize log url pattern.
getValidLogUrlPattern
java
apache/flink
flink-runtime-web/src/main/java/org/apache/flink/runtime/webmonitor/utils/LogUrlUtil.java
https://github.com/apache/flink/blob/master/flink-runtime-web/src/main/java/org/apache/flink/runtime/webmonitor/utils/LogUrlUtil.java
Apache-2.0
@Test void testLoadWebSubmissionExtension() throws Exception { final Configuration configuration = new Configuration(); configuration.set(JobManagerOptions.ADDRESS, "localhost"); final WebMonitorExtension webMonitorExtension = WebMonitorUtils.loadWebSubmissionExtension( CompletableFuture::new, Duration.ofSeconds(10), Collections.emptyMap(), CompletableFuture.completedFuture("localhost:12345"), Paths.get("/tmp"), Executors.directExecutor(), configuration); assertThat(webMonitorExtension).isNotNull(); }
Tests dynamically loading of handlers such as {@link JarUploadHandler}.
testLoadWebSubmissionExtension
java
apache/flink
flink-runtime-web/src/test/java/org/apache/flink/runtime/webmonitor/WebMonitorUtilsTest.java
https://github.com/apache/flink/blob/master/flink-runtime-web/src/test/java/org/apache/flink/runtime/webmonitor/WebMonitorUtilsTest.java
Apache-2.0
@Test void testPlanJar(@TempDir File tmp1, @TempDir File tmp2) throws Exception { final TestingDispatcherGateway restfulGateway = TestingDispatcherGateway.newBuilder().build(); final JarHandlers handlers = new JarHandlers(tmp1.toPath(), restfulGateway, EXECUTOR_EXTENSION.getExecutor()); final Path originalJar = Paths.get(System.getProperty("targetDir")).resolve(JAR_NAME); final Path jar = Files.copy(originalJar, tmp2.toPath().resolve(JAR_NAME)); final String storedJarPath = JarHandlers.uploadJar(handlers.uploadHandler, jar, restfulGateway); final String storedJarName = Paths.get(storedJarPath).getFileName().toString(); assertThatThrownBy( () -> JarHandlers.showPlan( handlers.planHandler, storedJarName, restfulGateway)) .satisfies( e -> { assertThat( ExceptionUtils.findThrowable( e, ProgramInvocationException.class)) .map(Exception::getMessage) .hasValueSatisfying( message -> { assertThat(message) // original cause is preserved in stack // trace .contains( "The program plan could not be fetched - the program aborted pre-maturely") // implies the jar was registered for the // job graph // (otherwise the jar name would // not occur in the exception) .contains(JAR_NAME) // ensure that no stdout/stderr has been // captured .contains("System.out: " + "hello out!") .contains("System.err: " + "hello err!"); }); }); }
Tests for the {@link JarRunHandler} and {@link JarPlanHandler}.
testPlanJar
java
apache/flink
flink-runtime-web/src/test/java/org/apache/flink/runtime/webmonitor/handlers/JarHandlerTest.java
https://github.com/apache/flink/blob/master/flink-runtime-web/src/test/java/org/apache/flink/runtime/webmonitor/handlers/JarHandlerTest.java
Apache-2.0
@Test void testJarSubmission(@TempDir File uploadDir, @TempDir File temporaryFolder) throws Exception { final TestingDispatcherGateway restfulGateway = TestingDispatcherGateway.newBuilder() .setBlobServerPort( blobServerExtension.getCustomExtension().getBlobServerPort()) .setSubmitFunction( jobGraph -> CompletableFuture.completedFuture(Acknowledge.get())) .build(); final JarHandlers handlers = new JarHandlers( uploadDir.toPath(), restfulGateway, EXECUTOR_EXTENSION.getExecutor()); final JarUploadHandler uploadHandler = handlers.uploadHandler; final JarListHandler listHandler = handlers.listHandler; final JarPlanHandler planHandler = handlers.planHandler; final JarRunHandler runHandler = handlers.runHandler; final JarDeleteHandler deleteHandler = handlers.deleteHandler; // targetDir property is set via surefire configuration final Path originalJar = Paths.get(System.getProperty("targetDir")).resolve("test-program.jar"); final Path jar = Files.copy(originalJar, temporaryFolder.toPath().resolve("test-program.jar")); final String storedJarPath = uploadJar(uploadHandler, jar, restfulGateway); final String storedJarName = Paths.get(storedJarPath).getFileName().toString(); final JarListInfo postUploadListResponse = listJars(listHandler, restfulGateway); assertThat(postUploadListResponse.jarFileList).hasSize(1); final JarListInfo.JarFileInfo listEntry = postUploadListResponse.jarFileList.iterator().next(); assertThat(listEntry.name).isEqualTo(jar.getFileName().toString()); assertThat(listEntry.id).isEqualTo(storedJarName); final JobPlanInfo planResponse = showPlan(planHandler, storedJarName, restfulGateway); // we're only interested in the core functionality so checking for a small detail is // sufficient assertThat(planResponse.getPlan().getName()).isEqualTo("Flink Streaming Job"); runJar(runHandler, storedJarName, restfulGateway); deleteJar(deleteHandler, storedJarName, restfulGateway); final JarListInfo postDeleteListResponse = listJars(listHandler, restfulGateway); assertThat(postDeleteListResponse.jarFileList).isEmpty(); }
Tests the entire lifecycle of a jar submission.
testJarSubmission
java
apache/flink
flink-runtime-web/src/test/java/org/apache/flink/runtime/webmonitor/handlers/JarSubmissionITCase.java
https://github.com/apache/flink/blob/master/flink-runtime-web/src/test/java/org/apache/flink/runtime/webmonitor/handlers/JarSubmissionITCase.java
Apache-2.0
public static void main(String[] args) throws Exception { System.out.println("hello out!"); System.err.println("hello err!"); }
Simple test program that prints to stdout/stderr.
main
java
apache/flink
flink-runtime-web/src/test/java/org/apache/flink/runtime/webmonitor/handlers/utils/OutputTestProgram.java
https://github.com/apache/flink/blob/master/flink-runtime-web/src/test/java/org/apache/flink/runtime/webmonitor/handlers/utils/OutputTestProgram.java
Apache-2.0
@Override protected void initChannel(SocketChannel ch) throws Exception { ChannelPipeline p = ch.pipeline(); p.addLast(new HttpClientCodec()); p.addLast(new HttpContentDecompressor()); p.addLast(new ClientHandler(responses)); }
Creates a client instance for the server at the target host and port. @param host Host of the HTTP server @param port Port of the HTTP server
initChannel
java
apache/flink
flink-runtime-web/src/test/java/org/apache/flink/runtime/webmonitor/testutils/HttpTestClient.java
https://github.com/apache/flink/blob/master/flink-runtime-web/src/test/java/org/apache/flink/runtime/webmonitor/testutils/HttpTestClient.java
Apache-2.0
public void sendRequest(HttpRequest request, Duration timeout) throws InterruptedException, TimeoutException { LOG.debug("Writing {}.", request); // Make the connection attempt. ChannelFuture connect = bootstrap.connect(host, port); Channel channel; if (connect.await(timeout.toMillis(), TimeUnit.MILLISECONDS)) { channel = connect.channel(); } else { throw new TimeoutException("Connection failed"); } channel.writeAndFlush(request); }
Sends a request to the server. <pre> HttpRequest request = new DefaultFullHttpRequest(HttpVersion.HTTP_1_1, HttpMethod.GET, "/overview"); request.headers().set(HttpHeaderNames.HOST, host); request.headers().set(HttpHeaderNames.CONNECTION, HttpHeaderValues.CLOSE); sendRequest(request); </pre> @param request The {@link HttpRequest} to send to the server
sendRequest
java
apache/flink
flink-runtime-web/src/test/java/org/apache/flink/runtime/webmonitor/testutils/HttpTestClient.java
https://github.com/apache/flink/blob/master/flink-runtime-web/src/test/java/org/apache/flink/runtime/webmonitor/testutils/HttpTestClient.java
Apache-2.0
public void sendGetRequest(String path, Duration timeout) throws TimeoutException, InterruptedException { if (!path.startsWith("/")) { path = "/" + path; } HttpRequest getRequest = new DefaultFullHttpRequest(HttpVersion.HTTP_1_1, HttpMethod.GET, path); getRequest.headers().set(HttpHeaderNames.HOST, host); getRequest.headers().set(HttpHeaderNames.CONNECTION, HttpHeaderValues.CLOSE); sendRequest(getRequest, timeout); }
Sends a simple GET request to the given path. You only specify the $path part of http://$host:$host/$path. @param path The $path to GET (http://$host:$host/$path)
sendGetRequest
java
apache/flink
flink-runtime-web/src/test/java/org/apache/flink/runtime/webmonitor/testutils/HttpTestClient.java
https://github.com/apache/flink/blob/master/flink-runtime-web/src/test/java/org/apache/flink/runtime/webmonitor/testutils/HttpTestClient.java
Apache-2.0
public void sendDeleteRequest(String path, Duration timeout) throws TimeoutException, InterruptedException { if (!path.startsWith("/")) { path = "/" + path; } HttpRequest getRequest = new DefaultFullHttpRequest(HttpVersion.HTTP_1_1, HttpMethod.DELETE, path); getRequest.headers().set(HttpHeaderNames.HOST, host); getRequest.headers().set(HttpHeaderNames.CONNECTION, HttpHeaderValues.CLOSE); sendRequest(getRequest, timeout); }
Sends a simple DELETE request to the given path. You only specify the $path part of http://$host:$host/$path. @param path The $path to DELETE (http://$host:$host/$path)
sendDeleteRequest
java
apache/flink
flink-runtime-web/src/test/java/org/apache/flink/runtime/webmonitor/testutils/HttpTestClient.java
https://github.com/apache/flink/blob/master/flink-runtime-web/src/test/java/org/apache/flink/runtime/webmonitor/testutils/HttpTestClient.java
Apache-2.0
public void sendPatchRequest(String path, Duration timeout) throws TimeoutException, InterruptedException { if (!path.startsWith("/")) { path = "/" + path; } HttpRequest getRequest = new DefaultFullHttpRequest(HttpVersion.HTTP_1_1, HttpMethod.PATCH, path); getRequest.headers().set(HttpHeaderNames.HOST, host); getRequest.headers().set(HttpHeaderNames.CONNECTION, HttpHeaderValues.CLOSE); sendRequest(getRequest, timeout); }
Sends a simple PATCH request to the given path. You only specify the $path part of http://$host:$host/$path. @param path The $path to PATCH (http://$host:$host/$path)
sendPatchRequest
java
apache/flink
flink-runtime-web/src/test/java/org/apache/flink/runtime/webmonitor/testutils/HttpTestClient.java
https://github.com/apache/flink/blob/master/flink-runtime-web/src/test/java/org/apache/flink/runtime/webmonitor/testutils/HttpTestClient.java
Apache-2.0
public SimpleHttpResponse getNextResponse() throws InterruptedException { return responses.take(); }
Returns the next available HTTP response. A call to this method blocks until a response becomes available. @return The next available {@link SimpleHttpResponse}
getNextResponse
java
apache/flink
flink-runtime-web/src/test/java/org/apache/flink/runtime/webmonitor/testutils/HttpTestClient.java
https://github.com/apache/flink/blob/master/flink-runtime-web/src/test/java/org/apache/flink/runtime/webmonitor/testutils/HttpTestClient.java
Apache-2.0
public SimpleHttpResponse getNextResponse(Duration timeout) throws InterruptedException, TimeoutException { SimpleHttpResponse response = responses.poll(timeout.toMillis(), TimeUnit.MILLISECONDS); if (response == null) { throw new TimeoutException("No response within timeout of " + timeout + " ms"); } else { return response; } }
Returns the next available HTTP response . A call to this method blocks until a response becomes available or throws an Exception if the timeout fires. @param timeout Timeout in milliseconds for the next response to become available @return The next available {@link SimpleHttpResponse}
getNextResponse
java
apache/flink
flink-runtime-web/src/test/java/org/apache/flink/runtime/webmonitor/testutils/HttpTestClient.java
https://github.com/apache/flink/blob/master/flink-runtime-web/src/test/java/org/apache/flink/runtime/webmonitor/testutils/HttpTestClient.java
Apache-2.0
@Override protected void channelRead0(ChannelHandlerContext ctx, HttpObject msg) throws Exception { LOG.debug("Received {}", msg); if (msg instanceof HttpResponse) { HttpResponse response = (HttpResponse) msg; currentStatus = response.status(); currentType = response.headers().get(HttpHeaderNames.CONTENT_TYPE); currentLocation = response.headers().get(HttpHeaderNames.LOCATION); if (HttpUtil.isTransferEncodingChunked(response)) { LOG.debug("Content is chunked"); } } if (msg instanceof HttpContent) { HttpContent content = (HttpContent) msg; // Add the content currentContent += content.content().toString(CharsetUtil.UTF_8); // Finished with this if (content instanceof LastHttpContent) { responses.add( new SimpleHttpResponse( currentStatus, currentType, currentContent, currentLocation)); currentStatus = null; currentType = null; currentLocation = null; currentContent = ""; ctx.close(); } } }
The response handler. Responses from the server are handled here.
channelRead0
java
apache/flink
flink-runtime-web/src/test/java/org/apache/flink/runtime/webmonitor/testutils/HttpTestClient.java
https://github.com/apache/flink/blob/master/flink-runtime-web/src/test/java/org/apache/flink/runtime/webmonitor/testutils/HttpTestClient.java
Apache-2.0
public static void main(String[] args) throws Exception { actualArguments = args; StreamExecutionEnvironment env = StreamExecutionEnvironment.getExecutionEnvironment(); env.fromData("hello", "world").sinkTo(new DiscardingSink<>()); env.execute(); }
Simple test program that exposes passed arguments.
main
java
apache/flink
flink-runtime-web/src/test/java/org/apache/flink/runtime/webmonitor/testutils/ParameterProgram.java
https://github.com/apache/flink/blob/master/flink-runtime-web/src/test/java/org/apache/flink/runtime/webmonitor/testutils/ParameterProgram.java
Apache-2.0
@Override public Optional<MaterializationRunnable> initMaterialization() throws Exception { if (lastConfirmedMaterializationId < materializedId - 1 && lastFailedMaterializationId < materializedId - 1) { // SharedStateRegistry potentially requires that the checkpoint's dependency on the // shared file be continuous, it will be broken if we trigger a new materialization // before the previous one has either confirmed or failed. See discussion in // https://github.com/apache/flink/pull/22669#issuecomment-1593370772 . LOG.info( "materialization:{} not confirmed or failed or cancelled, skip trigger new one.", materializedId - 1); return Optional.empty(); } SequenceNumber upTo = stateChangelogWriter.nextSequenceNumber(); SequenceNumber lastMaterializedTo = changelogSnapshotState.lastMaterializedTo(); LOG.info( "Initialize Materialization. Current changelog writers last append to sequence number {}", upTo); if (upTo.compareTo(lastMaterializedTo) > 0) { LOG.info("Starting materialization from {} : {}", lastMaterializedTo, upTo); // This ID is not needed for materialization; But since we are re-using the // streamFactory that is designed for state backend snapshot, which requires unique // checkpoint ID. A faked materialized Id is provided here. long materializationID = materializedId++; MaterializationRunnable materializationRunnable = new MaterializationRunnable( keyedStateBackend.snapshot( materializationID, System.currentTimeMillis(), // TODO: implement its own streamFactory. streamFactory, CHECKPOINT_OPTIONS), materializationID, upTo); // log metadata after materialization is triggered changelogStateFactory.resetAllWritingMetaFlags(); return Optional.of(materializationRunnable); } else { LOG.debug( "Skip materialization, last materialized to {} : last log to {}", lastMaterializedTo, upTo); return Optional.empty(); } }
Initialize state materialization so that materialized data can be persisted durably and included into the checkpoint. <p>This method is not thread safe. It should be called either under a lock or through task mailbox executor. @return a tuple of - future snapshot result from the underlying state backend - a {@link SequenceNumber} identifying the latest change in the changelog
initMaterialization
java
apache/flink
flink-state-backends/flink-statebackend-changelog/src/main/java/org/apache/flink/state/changelog/ChangelogKeyedStateBackend.java
https://github.com/apache/flink/blob/master/flink-state-backends/flink-statebackend-changelog/src/main/java/org/apache/flink/state/changelog/ChangelogKeyedStateBackend.java
Apache-2.0
public void checkpoint(long checkpointId, SequenceNumber lastUploadedTo) { checkpointedUpTo.put(checkpointId, lastUploadedTo); }
Set the highest {@link SequenceNumber} of changelog used by the given checkpoint. @param lastUploadedTo exclusive
checkpoint
java
apache/flink
flink-state-backends/flink-statebackend-changelog/src/main/java/org/apache/flink/state/changelog/ChangelogTruncateHelper.java
https://github.com/apache/flink/blob/master/flink-state-backends/flink-statebackend-changelog/src/main/java/org/apache/flink/state/changelog/ChangelogTruncateHelper.java
Apache-2.0
public void checkpointSubsumed(long checkpointId) { SequenceNumber sqn = checkpointedUpTo.get(checkpointId); LOG.debug("checkpoint {} subsumed, max sqn: {}", checkpointId, sqn); if (sqn != null) { subsumedUpTo = sqn; checkpointedUpTo.headMap(checkpointId, true).clear(); truncate(); } }
Handle checkpoint subsumption, potentially {@link #truncate() truncating} the changelog.
checkpointSubsumed
java
apache/flink
flink-state-backends/flink-statebackend-changelog/src/main/java/org/apache/flink/state/changelog/ChangelogTruncateHelper.java
https://github.com/apache/flink/blob/master/flink-state-backends/flink-statebackend-changelog/src/main/java/org/apache/flink/state/changelog/ChangelogTruncateHelper.java
Apache-2.0
protected TestTaskStateManager getTestTaskStateManager() { return TestTaskStateManager.builder().build(); }
Tests for {@link ChangelogStateBackend} using {@link InMemoryStateChangelogStorage} and delegating {@link HashMapStateBackendTest}.
getTestTaskStateManager
java
apache/flink
flink-state-backends/flink-statebackend-changelog/src/test/java/org/apache/flink/state/changelog/ChangelogDelegateHashMapInMemoryTest.java
https://github.com/apache/flink/blob/master/flink-state-backends/flink-statebackend-changelog/src/test/java/org/apache/flink/state/changelog/ChangelogDelegateHashMapInMemoryTest.java
Apache-2.0
@Override public void upload(UploadTask uploadTask) throws IOException { uploads.add(uploadTask); }
An upload scheduler that collects the upload tasks and allows them to be {@link #completeUploads(Function) completed arbitrarily}. State handles used for completion are tracked by {@link #registry}.
upload
java
apache/flink
flink-state-backends/flink-statebackend-changelog/src/test/java/org/apache/flink/state/changelog/ChangelogStateDiscardTest.java
https://github.com/apache/flink/blob/master/flink-state-backends/flink-statebackend-changelog/src/test/java/org/apache/flink/state/changelog/ChangelogStateDiscardTest.java
Apache-2.0
void reportCompletedMaterialization(long duration) { completedMaterializationCounter.inc(); lastDuration = duration; }
Metrics related to the materialization part of Changelog.
reportCompletedMaterialization
java
apache/flink
flink-state-backends/flink-statebackend-common/src/main/java/org/apache/flink/state/common/ChangelogMaterializationMetricGroup.java
https://github.com/apache/flink/blob/master/flink-state-backends/flink-statebackend-common/src/main/java/org/apache/flink/state/common/ChangelogMaterializationMetricGroup.java
Apache-2.0
static void checkArgumentValid(ConfigOption<?> option, Object value) { final String key = option.key(); if (POSITIVE_INT_CONFIG_SET.contains(option)) { Preconditions.checkArgument( (Integer) value > 0, "Configured value for key: " + key + " must be larger than 0."); } else if (SIZE_CONFIG_SET.contains(option)) { Preconditions.checkArgument( ((MemorySize) value).getBytes() > 0, "Configured size for key" + key + " must be larger than 0."); } else if (LOG_MAX_FILE_SIZE.equals(option)) { Preconditions.checkArgument( ((MemorySize) value).getBytes() >= 0, "Configured size for key " + key + " must be larger than or equal to 0."); } else if (LOG_DIR.equals(option)) { Preconditions.checkArgument( new File((String) value).isAbsolute(), "Configured path for key " + key + " is not absolute."); } }
Helper method to check whether the (key,value) is valid through given configuration and returns the formatted value. @param option The configuration key which is configurable in {@link ForStConfigurableOptions}. @param value The value within given configuration.
checkArgumentValid
java
apache/flink
flink-state-backends/flink-statebackend-forst/src/main/java/org/apache/flink/state/forst/ForStConfigurableOptions.java
https://github.com/apache/flink/blob/master/flink-state-backends/flink-statebackend-forst/src/main/java/org/apache/flink/state/forst/ForStConfigurableOptions.java
Apache-2.0
@Override public void completeStateFuture(byte[] bytesValue) throws IOException { if (bytesValue == null) { future.complete(null); return; } V value = table.deserializeValue(bytesValue); future.complete(value); }
The Get access request for ForStDB. @param <K> The type of key in get access request. @param <V> The type of value returned by get request.
completeStateFuture
java
apache/flink
flink-state-backends/flink-statebackend-forst/src/main/java/org/apache/flink/state/forst/ForStDBSingleGetRequest.java
https://github.com/apache/flink/blob/master/flink-state-backends/flink-statebackend-forst/src/main/java/org/apache/flink/state/forst/ForStDBSingleGetRequest.java
Apache-2.0
@SuppressWarnings("unchecked") public ForStDBBunchPutRequest<K, N, UK, UV> buildDBBunchPutRequest( StateRequest<?, ?, ?, ?> stateRequest) { Preconditions.checkArgument( stateRequest.getRequestType() == StateRequestType.MAP_PUT_ALL || stateRequest.getRequestType() == StateRequestType.CLEAR); ContextKey<K, N> contextKey = new ContextKey<>( (RecordContext<K>) stateRequest.getRecordContext(), (N) stateRequest.getNamespace(), null); Map<UK, UV> value = (Map<UK, UV>) stateRequest.getPayload(); return new ForStDBBunchPutRequest(contextKey, value, this, stateRequest.getFuture()); }
Build a request for bunch put. Maily used for {@link StateRequestType#MAP_PUT_ALL} and {@link StateRequestType#CLEAR}. @param stateRequest The state request. @return The {@code ForStDBBunchPutRequest}.
buildDBBunchPutRequest
java
apache/flink
flink-state-backends/flink-statebackend-forst/src/main/java/org/apache/flink/state/forst/ForStMapState.java
https://github.com/apache/flink/blob/master/flink-state-backends/flink-statebackend-forst/src/main/java/org/apache/flink/state/forst/ForStMapState.java
Apache-2.0
public void setUseManagedMemory(boolean useManagedMemory) { this.useManagedMemory = useManagedMemory; }
Configures ForSt to use the managed memory of a slot. See {@link ForStOptions#USE_MANAGED_MEMORY} for details.
setUseManagedMemory
java
apache/flink
flink-state-backends/flink-statebackend-forst/src/main/java/org/apache/flink/state/forst/ForStMemoryConfiguration.java
https://github.com/apache/flink/blob/master/flink-state-backends/flink-statebackend-forst/src/main/java/org/apache/flink/state/forst/ForStMemoryConfiguration.java
Apache-2.0
public void setFixedMemoryPerSlot(MemorySize fixedMemoryPerSlot) { checkArgument( fixedMemoryPerSlot == null || fixedMemoryPerSlot.getBytes() > 0, "Total memory per slot must be > 0"); this.fixedMemoryPerSlot = fixedMemoryPerSlot; }
Configures ForSt to use a fixed amount of memory shared between all instances (operators) in a slot. See {@link ForStOptions#FIX_PER_SLOT_MEMORY_SIZE} for details.
setFixedMemoryPerSlot
java
apache/flink
flink-state-backends/flink-statebackend-forst/src/main/java/org/apache/flink/state/forst/ForStMemoryConfiguration.java
https://github.com/apache/flink/blob/master/flink-state-backends/flink-statebackend-forst/src/main/java/org/apache/flink/state/forst/ForStMemoryConfiguration.java
Apache-2.0
public void setWriteBufferRatio(double writeBufferRatio) { Preconditions.checkArgument( writeBufferRatio > 0 && writeBufferRatio < 1.0, "Write Buffer ratio %s must be in (0, 1)", writeBufferRatio); this.writeBufferRatio = writeBufferRatio; }
Sets the fraction of the total memory to be used for write buffers. This only has an effect is either {@link #setUseManagedMemory(boolean)} or {@link #setFixedMemoryPerSlot(MemorySize)} are set. <p>See {@link ForStOptions#WRITE_BUFFER_RATIO} for details.
setWriteBufferRatio
java
apache/flink
flink-state-backends/flink-statebackend-forst/src/main/java/org/apache/flink/state/forst/ForStMemoryConfiguration.java
https://github.com/apache/flink/blob/master/flink-state-backends/flink-statebackend-forst/src/main/java/org/apache/flink/state/forst/ForStMemoryConfiguration.java
Apache-2.0
public void setHighPriorityPoolRatio(double highPriorityPoolRatio) { Preconditions.checkArgument( highPriorityPoolRatio > 0 && highPriorityPoolRatio < 1.0, "High priority pool ratio %s must be in (0, 1)", highPriorityPoolRatio); this.highPriorityPoolRatio = highPriorityPoolRatio; }
Sets the fraction of the total memory to be used for high priority blocks like indexes, dictionaries, etc. This only has an effect is either {@link #setUseManagedMemory(boolean)} or {@link #setFixedMemoryPerSlot(MemorySize)} are set. <p>See {@link ForStOptions#HIGH_PRIORITY_POOL_RATIO} for details.
setHighPriorityPoolRatio
java
apache/flink
flink-state-backends/flink-statebackend-forst/src/main/java/org/apache/flink/state/forst/ForStMemoryConfiguration.java
https://github.com/apache/flink/blob/master/flink-state-backends/flink-statebackend-forst/src/main/java/org/apache/flink/state/forst/ForStMemoryConfiguration.java
Apache-2.0
public boolean isUsingManagedMemory() { return useManagedMemory != null ? useManagedMemory : ForStOptions.USE_MANAGED_MEMORY.defaultValue(); }
Gets whether the state backend is configured to use the managed memory of a slot for ForSt. See {@link ForStOptions#USE_MANAGED_MEMORY} for details.
isUsingManagedMemory
java
apache/flink
flink-state-backends/flink-statebackend-forst/src/main/java/org/apache/flink/state/forst/ForStMemoryConfiguration.java
https://github.com/apache/flink/blob/master/flink-state-backends/flink-statebackend-forst/src/main/java/org/apache/flink/state/forst/ForStMemoryConfiguration.java
Apache-2.0
public boolean isUsingFixedMemoryPerSlot() { return fixedMemoryPerSlot != null; }
Gets whether the state backend is configured to use a fixed amount of memory shared between all ForSt instances (in all tasks and operators) of a slot. See {@link ForStOptions#FIX_PER_SLOT_MEMORY_SIZE} for details.
isUsingFixedMemoryPerSlot
java
apache/flink
flink-state-backends/flink-statebackend-forst/src/main/java/org/apache/flink/state/forst/ForStMemoryConfiguration.java
https://github.com/apache/flink/blob/master/flink-state-backends/flink-statebackend-forst/src/main/java/org/apache/flink/state/forst/ForStMemoryConfiguration.java
Apache-2.0
@Nullable public MemorySize getFixedMemoryPerSlot() { return fixedMemoryPerSlot; }
Gets the fixed amount of memory to be shared between all RocksDB instances (in all tasks and operators) of a slot. Null is not configured. See {@link ForStOptions#USE_MANAGED_MEMORY} for details.
getFixedMemoryPerSlot
java
apache/flink
flink-state-backends/flink-statebackend-forst/src/main/java/org/apache/flink/state/forst/ForStMemoryConfiguration.java
https://github.com/apache/flink/blob/master/flink-state-backends/flink-statebackend-forst/src/main/java/org/apache/flink/state/forst/ForStMemoryConfiguration.java
Apache-2.0
public double getWriteBufferRatio() { return writeBufferRatio != null ? writeBufferRatio : ForStOptions.WRITE_BUFFER_RATIO.defaultValue(); }
Gets the fraction of the total memory to be used for write buffers. This only has an effect is either {@link #setUseManagedMemory(boolean)} or {@link #setFixedMemoryPerSlot(MemorySize)} are set. <p>See {@link ForStOptions#WRITE_BUFFER_RATIO} for details.
getWriteBufferRatio
java
apache/flink
flink-state-backends/flink-statebackend-forst/src/main/java/org/apache/flink/state/forst/ForStMemoryConfiguration.java
https://github.com/apache/flink/blob/master/flink-state-backends/flink-statebackend-forst/src/main/java/org/apache/flink/state/forst/ForStMemoryConfiguration.java
Apache-2.0
public double getHighPriorityPoolRatio() { return highPriorityPoolRatio != null ? highPriorityPoolRatio : ForStOptions.HIGH_PRIORITY_POOL_RATIO.defaultValue(); }
Gets the fraction of the total memory to be used for high priority blocks like indexes, dictionaries, etc. This only has an effect is either {@link #setUseManagedMemory(boolean)} or {@link #setFixedMemoryPerSlot(MemorySize)} are set. <p>See {@link ForStOptions#HIGH_PRIORITY_POOL_RATIO} for details.
getHighPriorityPoolRatio
java
apache/flink
flink-state-backends/flink-statebackend-forst/src/main/java/org/apache/flink/state/forst/ForStMemoryConfiguration.java
https://github.com/apache/flink/blob/master/flink-state-backends/flink-statebackend-forst/src/main/java/org/apache/flink/state/forst/ForStMemoryConfiguration.java
Apache-2.0
public Boolean isUsingPartitionedIndexFilters() { return usePartitionedIndexFilters != null ? usePartitionedIndexFilters : ForStOptions.USE_PARTITIONED_INDEX_FILTERS.defaultValue(); }
Gets whether the state backend is configured to use partitioned index/filters for ForSt. <p>See {@link ForStOptions#USE_PARTITIONED_INDEX_FILTERS} for details.
isUsingPartitionedIndexFilters
java
apache/flink
flink-state-backends/flink-statebackend-forst/src/main/java/org/apache/flink/state/forst/ForStMemoryConfiguration.java
https://github.com/apache/flink/blob/master/flink-state-backends/flink-statebackend-forst/src/main/java/org/apache/flink/state/forst/ForStMemoryConfiguration.java
Apache-2.0
static long calculateForStDefaultArenaBlockSize(long writeBufferSize) { long arenaBlockSize = writeBufferSize / 8; // Align up to 4k final long align = 4 * 1024; return ((arenaBlockSize + align - 1) / align) * align; }
Calculate the default arena block size as ForSt calculates it in <a href="https://github.com/dataArtisans/frocksdb/blob/49bc897d5d768026f1eb816d960c1f2383396ef4/db/column_family.cc#L196-L201"> here</a>. @return the default arena block size @param writeBufferSize the write buffer size (bytes)
calculateForStDefaultArenaBlockSize
java
apache/flink
flink-state-backends/flink-statebackend-forst/src/main/java/org/apache/flink/state/forst/ForStMemoryControllerUtils.java
https://github.com/apache/flink/blob/master/flink-state-backends/flink-statebackend-forst/src/main/java/org/apache/flink/state/forst/ForStMemoryControllerUtils.java
Apache-2.0
public Collection<ForStProperty> getProperties() { return Collections.unmodifiableCollection(properties); }
@return the enabled ForSt property-based metrics
getProperties
java
apache/flink
flink-state-backends/flink-statebackend-forst/src/main/java/org/apache/flink/state/forst/ForStNativeMetricOptions.java
https://github.com/apache/flink/blob/master/flink-state-backends/flink-statebackend-forst/src/main/java/org/apache/flink/state/forst/ForStNativeMetricOptions.java
Apache-2.0
public Collection<TickerType> getMonitorTickerTypes() { return Collections.unmodifiableCollection(monitorTickerTypes); }
@return the enabled ForSt statistics metrics.
getMonitorTickerTypes
java
apache/flink
flink-state-backends/flink-statebackend-forst/src/main/java/org/apache/flink/state/forst/ForStNativeMetricOptions.java
https://github.com/apache/flink/blob/master/flink-state-backends/flink-statebackend-forst/src/main/java/org/apache/flink/state/forst/ForStNativeMetricOptions.java
Apache-2.0
public boolean isEnabled() { return !properties.isEmpty() || isStatisticsEnabled(); }
{{@link ForStNativeMetricMonitor}} is enabled if any property or ticker type is set. @return true if {{RocksDBNativeMetricMonitor}} should be enabled, false otherwise.
isEnabled
java
apache/flink
flink-state-backends/flink-statebackend-forst/src/main/java/org/apache/flink/state/forst/ForStNativeMetricOptions.java
https://github.com/apache/flink/blob/master/flink-state-backends/flink-statebackend-forst/src/main/java/org/apache/flink/state/forst/ForStNativeMetricOptions.java
Apache-2.0
public boolean isStatisticsEnabled() { return !monitorTickerTypes.isEmpty(); }
@return true if ForSt statistics metrics are enabled, false otherwise.
isStatisticsEnabled
java
apache/flink
flink-state-backends/flink-statebackend-forst/src/main/java/org/apache/flink/state/forst/ForStNativeMetricOptions.java
https://github.com/apache/flink/blob/master/flink-state-backends/flink-statebackend-forst/src/main/java/org/apache/flink/state/forst/ForStNativeMetricOptions.java
Apache-2.0
public boolean isColumnFamilyAsVariable() { return this.columnFamilyAsVariable; }
{{@link ForStNativeMetricMonitor}} Whether to expose the column family as a variable.. @return true is column family to expose variable, false otherwise.
isColumnFamilyAsVariable
java
apache/flink
flink-state-backends/flink-statebackend-forst/src/main/java/org/apache/flink/state/forst/ForStNativeMetricOptions.java
https://github.com/apache/flink/blob/master/flink-state-backends/flink-statebackend-forst/src/main/java/org/apache/flink/state/forst/ForStNativeMetricOptions.java
Apache-2.0
@Override public void close() throws Exception { this.columnFamilyHandle.close(); }
ForSt specific information about the k/v states.
close
java
apache/flink
flink-state-backends/flink-statebackend-forst/src/main/java/org/apache/flink/state/forst/ForStOperationUtils.java
https://github.com/apache/flink/blob/master/flink-state-backends/flink-statebackend-forst/src/main/java/org/apache/flink/state/forst/ForStOperationUtils.java
Apache-2.0
default ForStNativeMetricOptions createNativeMetricsOptions( ForStNativeMetricOptions nativeMetricOptions) { return nativeMetricOptions; }
This method should enable certain ForSt metrics to be forwarded to Flink's metrics reporter. <p>Enabling these monitoring options may degrade ForSt performance and should be set with care. @param nativeMetricOptions The options object with the pre-defined options. @return The options object on which the additional options are set.
createNativeMetricsOptions
java
apache/flink
flink-state-backends/flink-statebackend-forst/src/main/java/org/apache/flink/state/forst/ForStOptionsFactory.java
https://github.com/apache/flink/blob/master/flink-state-backends/flink-statebackend-forst/src/main/java/org/apache/flink/state/forst/ForStOptionsFactory.java
Apache-2.0
public DBOptions getDbOptions() { // initial options from common profile DBOptions opt = createBaseCommonDBOptions(); handlesToClose.add(opt); // load configurable options on top of pre-defined profile setDBOptionsFromConfigurableOptions(opt); // add user-defined options factory, if specified if (optionsFactory != null) { opt = optionsFactory.createDBOptions(opt, handlesToClose); } // add necessary default options opt = opt.setCreateIfMissing(true).setAvoidFlushDuringShutdown(true); // if sharedResources is non-null, use the write buffer manager from it. if (sharedResources != null) { opt.setWriteBufferManager(sharedResources.getResourceHandle().getWriteBufferManager()); } if (enableStatistics) { Statistics statistics = new Statistics(); opt.setStatistics(statistics); handlesToClose.add(statistics); } // TODO: Fallback to checkpoint directory when checkpoint feature is ready if not // configured, // fallback to local directory currently temporarily. if (remoteForStPath != null) { FlinkEnv flinkEnv = new FlinkEnv( remoteBasePath.toString(), new StringifiedForStFileSystem(forStFileSystem)); opt.setEnv(flinkEnv); handlesToClose.add(flinkEnv); } return opt; }
Gets the ForSt {@link DBOptions} to be used for ForSt instances.
getDbOptions
java
apache/flink
flink-state-backends/flink-statebackend-forst/src/main/java/org/apache/flink/state/forst/ForStResourceContainer.java
https://github.com/apache/flink/blob/master/flink-state-backends/flink-statebackend-forst/src/main/java/org/apache/flink/state/forst/ForStResourceContainer.java
Apache-2.0
public ColumnFamilyOptions getColumnOptions() { // initial options from common profile ColumnFamilyOptions opt = createBaseCommonColumnOptions(); handlesToClose.add(opt); // load configurable options on top of pre-defined profile setColumnFamilyOptionsFromConfigurableOptions(opt, handlesToClose); // add user-defined options, if specified if (optionsFactory != null) { opt = optionsFactory.createColumnOptions(opt, handlesToClose); } // if sharedResources is non-null, use the block cache from it and // set necessary options for performance consideration with memory control if (sharedResources != null) { final ForStSharedResources rocksResources = sharedResources.getResourceHandle(); final Cache blockCache = rocksResources.getCache(); TableFormatConfig tableFormatConfig = opt.tableFormatConfig(); BlockBasedTableConfig blockBasedTableConfig; if (tableFormatConfig == null) { blockBasedTableConfig = new BlockBasedTableConfig(); } else { Preconditions.checkArgument( tableFormatConfig instanceof BlockBasedTableConfig, "We currently only support BlockBasedTableConfig When bounding total memory."); blockBasedTableConfig = (BlockBasedTableConfig) tableFormatConfig; } if (rocksResources.isUsingPartitionedIndexFilters() && overwriteFilterIfExist(blockBasedTableConfig)) { blockBasedTableConfig.setIndexType(IndexType.kTwoLevelIndexSearch); blockBasedTableConfig.setPartitionFilters(true); blockBasedTableConfig.setPinTopLevelIndexAndFilter(true); } blockBasedTableConfig.setBlockCache(blockCache); blockBasedTableConfig.setCacheIndexAndFilterBlocks(true); blockBasedTableConfig.setCacheIndexAndFilterBlocksWithHighPriority(true); blockBasedTableConfig.setPinL0FilterAndIndexBlocksInCache(true); opt.setTableFormatConfig(blockBasedTableConfig); } return opt; }
Gets the ForSt {@link ColumnFamilyOptions} to be used for all ForSt instances.
getColumnOptions
java
apache/flink
flink-state-backends/flink-statebackend-forst/src/main/java/org/apache/flink/state/forst/ForStResourceContainer.java
https://github.com/apache/flink/blob/master/flink-state-backends/flink-statebackend-forst/src/main/java/org/apache/flink/state/forst/ForStResourceContainer.java
Apache-2.0
public WriteOptions getWriteOptions() { // Disable WAL by default WriteOptions opt = new WriteOptions().setDisableWAL(true); handlesToClose.add(opt); // add user-defined options factory, if specified if (optionsFactory != null) { opt = optionsFactory.createWriteOptions(opt, handlesToClose); } return opt; }
Gets the ForSt {@link WriteOptions} to be used for write operations.
getWriteOptions
java
apache/flink
flink-state-backends/flink-statebackend-forst/src/main/java/org/apache/flink/state/forst/ForStResourceContainer.java
https://github.com/apache/flink/blob/master/flink-state-backends/flink-statebackend-forst/src/main/java/org/apache/flink/state/forst/ForStResourceContainer.java
Apache-2.0
DBOptions createBaseCommonDBOptions() { return new DBOptions().setUseFsync(false).setStatsDumpPeriodSec(0); }
Create a {@link DBOptions} for ForSt, including some common settings.
createBaseCommonDBOptions
java
apache/flink
flink-state-backends/flink-statebackend-forst/src/main/java/org/apache/flink/state/forst/ForStResourceContainer.java
https://github.com/apache/flink/blob/master/flink-state-backends/flink-statebackend-forst/src/main/java/org/apache/flink/state/forst/ForStResourceContainer.java
Apache-2.0
ColumnFamilyOptions createBaseCommonColumnOptions() { return new ColumnFamilyOptions(); }
Create a {@link ColumnFamilyOptions} for ForSt, including some common settings.
createBaseCommonColumnOptions
java
apache/flink
flink-state-backends/flink-statebackend-forst/src/main/java/org/apache/flink/state/forst/ForStResourceContainer.java
https://github.com/apache/flink/blob/master/flink-state-backends/flink-statebackend-forst/src/main/java/org/apache/flink/state/forst/ForStResourceContainer.java
Apache-2.0
private String resolveRelocatedDbLogPrefix(String instanceForStAbsolutePath) { if (!instanceForStAbsolutePath.isEmpty() && !instanceForStAbsolutePath.matches("^[a-zA-Z0-9\\-._].*")) { instanceForStAbsolutePath = instanceForStAbsolutePath.substring(1); } return instanceForStAbsolutePath.replaceAll("[^a-zA-Z0-9\\-._]", "_") + FORST_RELOCATE_LOG_SUFFIX; }
Resolve the prefix of ForSt's log file name according to ForSt's log file name rules. @param instanceForStAbsolutePath The path where the ForSt directory is located. @return Resolved ForSt log name prefix.
resolveRelocatedDbLogPrefix
java
apache/flink
flink-state-backends/flink-statebackend-forst/src/main/java/org/apache/flink/state/forst/ForStResourceContainer.java
https://github.com/apache/flink/blob/master/flink-state-backends/flink-statebackend-forst/src/main/java/org/apache/flink/state/forst/ForStResourceContainer.java
Apache-2.0
public static <K, N> byte[] serializeKeyAndNamespace( ContextKey<K, N> contextKey, SerializedCompositeKeyBuilder<K> builder, N defaultNamespace, TypeSerializer<N> namespaceSerializer, boolean enableKeyReuse) throws IOException { N namespace = contextKey.getNamespace(); namespace = (namespace == null ? defaultNamespace : namespace); if (enableKeyReuse && namespace == defaultNamespace) { // key reuse. return contextKey.getOrCreateSerializedKey( ctxKey -> { builder.setKeyAndKeyGroup(ctxKey.getRawKey(), ctxKey.getKeyGroup()); return builder.buildCompositeKeyNamespace( defaultNamespace, namespaceSerializer); }); } else { // no key reuse, serialize again. builder.setKeyAndKeyGroup(contextKey.getRawKey(), contextKey.getKeyGroup()); return builder.buildCompositeKeyNamespace(namespace, namespaceSerializer); } }
Serialize a key and namespace. No user key. @param contextKey the context key of current request @param builder key builder @param defaultNamespace default namespace of the state @param namespaceSerializer the namespace serializer @param enableKeyReuse whether to enable key reuse
serializeKeyAndNamespace
java
apache/flink
flink-state-backends/flink-statebackend-forst/src/main/java/org/apache/flink/state/forst/ForStSerializerUtils.java
https://github.com/apache/flink/blob/master/flink-state-backends/flink-statebackend-forst/src/main/java/org/apache/flink/state/forst/ForStSerializerUtils.java
Apache-2.0
public void setLocalDbStoragePath(String path) { setLocalDbStoragePaths(path == null ? null : new String[] {path}); }
Sets the path where the ForSt local files should be stored on the local file system. Setting this path overrides the default behavior, where the files are stored across the configured temp directories. <p>Passing {@code null} to this function restores the default behavior, where the configured temp directories will be used. @param path The path where the local ForSt database files are stored.
setLocalDbStoragePath
java
apache/flink
flink-state-backends/flink-statebackend-forst/src/main/java/org/apache/flink/state/forst/ForStStateBackend.java
https://github.com/apache/flink/blob/master/flink-state-backends/flink-statebackend-forst/src/main/java/org/apache/flink/state/forst/ForStStateBackend.java
Apache-2.0
public void setLocalDbStoragePaths(String... paths) { if (paths == null) { localForStDirectories = null; } else if (paths.length == 0) { throw new IllegalArgumentException("empty paths"); } else { File[] pp = new File[paths.length]; for (int i = 0; i < paths.length; i++) { final String rawPath = paths[i]; final String path; if (rawPath == null) { throw new IllegalArgumentException("null path"); } else { // we need this for backwards compatibility, to allow URIs like 'file:///'... URI uri = null; try { uri = new Path(rawPath).toUri(); } catch (Exception e) { // cannot parse as a path } if (uri != null && uri.getScheme() != null) { if ("file".equalsIgnoreCase(uri.getScheme())) { path = uri.getPath(); } else { throw new IllegalArgumentException( "Path " + rawPath + " has a non-local scheme"); } } else { path = rawPath; } } pp[i] = new File(path); if (!pp[i].isAbsolute()) { throw new IllegalArgumentException("Relative paths are not supported"); } } localForStDirectories = pp; } }
Sets the local directories in which the ForSt database puts some files (like metadata files). These directories do not need to be persistent, they can be ephemeral, meaning that they are lost on a machine failure, because state in ForSt is persisted in checkpoints. <p>If nothing is configured, these directories default to the TaskManager's local temporary file directories. <p>Each distinct state will be stored in one path, but when the state backend creates multiple states, they will store their files on different paths. <p>Passing {@code null} to this function restores the default behavior, where the configured temp directories will be used. @param paths The paths across which the local ForSt database files will be spread.
setLocalDbStoragePaths
java
apache/flink
flink-state-backends/flink-statebackend-forst/src/main/java/org/apache/flink/state/forst/ForStStateBackend.java
https://github.com/apache/flink/blob/master/flink-state-backends/flink-statebackend-forst/src/main/java/org/apache/flink/state/forst/ForStStateBackend.java
Apache-2.0
public String[] getLocalDbStoragePaths() { if (localForStDirectories == null) { return null; } else { String[] paths = new String[localForStDirectories.length]; for (int i = 0; i < paths.length; i++) { paths[i] = localForStDirectories[i].toString(); } return paths; } }
Gets the configured local DB storage paths, or null, if none were configured. <p>Under these directories on the TaskManager, ForSt stores some metadata files. These directories do not need to be persistent, they can be ephermeral, meaning that they are lost on a machine failure, because state in ForSt is persisted in checkpoints. <p>If nothing is configured, these directories default to the TaskManager's local temporary file directories.
getLocalDbStoragePaths
java
apache/flink
flink-state-backends/flink-statebackend-forst/src/main/java/org/apache/flink/state/forst/ForStStateBackend.java
https://github.com/apache/flink/blob/master/flink-state-backends/flink-statebackend-forst/src/main/java/org/apache/flink/state/forst/ForStStateBackend.java
Apache-2.0
public void setForStOptions(ForStOptionsFactory optionsFactory) { this.forStOptionsFactory = optionsFactory; }
Sets {@link org.forstdb.Options} for the ForSt instances. Because the options are not serializable and hold native code references, they must be specified through a factory. <p>The options created by the factory here are applied on top of user-configured options from configuration set by {@link #configure(ReadableConfig, ClassLoader)} with keys in {@link ForStConfigurableOptions}. @param optionsFactory The options factory that lazily creates the ForSt options.
setForStOptions
java
apache/flink
flink-state-backends/flink-statebackend-forst/src/main/java/org/apache/flink/state/forst/ForStStateBackend.java
https://github.com/apache/flink/blob/master/flink-state-backends/flink-statebackend-forst/src/main/java/org/apache/flink/state/forst/ForStStateBackend.java
Apache-2.0
@Override public ForStStateBackend createFromConfig(ReadableConfig config, ClassLoader classLoader) throws IllegalConfigurationException { return new ForStStateBackend().configure(config, classLoader); }
A factory that creates an {@link ForStStateBackend} from a configuration.
createFromConfig
java
apache/flink
flink-state-backends/flink-statebackend-forst/src/main/java/org/apache/flink/state/forst/ForStStateBackendFactory.java
https://github.com/apache/flink/blob/master/flink-state-backends/flink-statebackend-forst/src/main/java/org/apache/flink/state/forst/ForStStateBackendFactory.java
Apache-2.0
@Override public Thread newThread(Runnable runnable) { return super.newThread( () -> { initializer.run(); runnable.run(); }); }
An {@link ExecutorThreadFactory} that could run a initializer before running the actual runnable for each created thread.
newThread
java
apache/flink
flink-state-backends/flink-statebackend-forst/src/main/java/org/apache/flink/state/forst/ForStStateExecutor.java
https://github.com/apache/flink/blob/master/flink-state-backends/flink-statebackend-forst/src/main/java/org/apache/flink/state/forst/ForStStateExecutor.java
Apache-2.0
private @Nullable StreamStateHandle tryPathCopyingToCheckpoint( @Nonnull StreamStateHandle sourceHandle, CheckpointStreamFactory checkpointStreamFactory, CheckpointedStateScope stateScope) { try { if (!checkpointStreamFactory.canFastDuplicate(sourceHandle, stateScope)) { return null; } List<StreamStateHandle> result = checkpointStreamFactory.duplicate( Collections.singletonList(sourceHandle), stateScope); return result.get(0); } catch (Exception e) { LOG.warn("Failed to duplicate file to checkpoint: {} {}", sourceHandle, stateScope, e); } return null; }
Duplicate file to checkpoint storage by calling {@link CheckpointStreamFactory#duplicate} if possible. @param sourceHandle The source state handle @param checkpointStreamFactory The checkpoint stream factory @param stateScope The state scope @return The target state handle if path-copying is successful, otherwise null
tryPathCopyingToCheckpoint
java
apache/flink
flink-state-backends/flink-statebackend-forst/src/main/java/org/apache/flink/state/forst/datatransfer/CopyDataTransferStrategy.java
https://github.com/apache/flink/blob/master/flink-state-backends/flink-statebackend-forst/src/main/java/org/apache/flink/state/forst/datatransfer/CopyDataTransferStrategy.java
Apache-2.0
public HandleAndLocalPath transferFileToCheckpointFs( SnapshotType.SharingFilesStrategy sharingFilesStrategy, Path file, long transferBytes, CheckpointStreamFactory checkpointStreamFactory, CheckpointedStateScope stateScope, CloseableRegistry snapshotCloseableRegistry, CloseableRegistry tmpResourcesRegistry, boolean forceCopy) throws Exception { try { DataTransferStrategy strategy = DataTransferStrategyBuilder.buildForSnapshot( sharingFilesStrategy, forStFs, checkpointStreamFactory, forceCopy); return createTransferFuture( strategy, file, transferBytes, checkpointStreamFactory, stateScope, snapshotCloseableRegistry, tmpResourcesRegistry) .get(); } catch (ExecutionException e) { throw convertExecutionException(e); } }
Transfer a single file to checkpoint filesystem. @param transferBytes Bytes will be transfer from the head of the file. If < 0, the whole file will be transferred.
transferFileToCheckpointFs
java
apache/flink
flink-state-backends/flink-statebackend-forst/src/main/java/org/apache/flink/state/forst/datatransfer/ForStStateDataTransfer.java
https://github.com/apache/flink/blob/master/flink-state-backends/flink-statebackend-forst/src/main/java/org/apache/flink/state/forst/datatransfer/ForStStateDataTransfer.java
Apache-2.0
public List<HandleAndLocalPath> transferFilesToCheckpointFs( SnapshotType.SharingFilesStrategy sharingFilesStrategy, List<Path> files, CheckpointStreamFactory checkpointStreamFactory, CheckpointedStateScope stateScope, CloseableRegistry closeableRegistry, CloseableRegistry tmpResourcesRegistry, boolean forceCopy) throws Exception { DataTransferStrategy strategy = DataTransferStrategyBuilder.buildForSnapshot( sharingFilesStrategy, forStFs, checkpointStreamFactory, forceCopy); List<CompletableFuture<HandleAndLocalPath>> futures = files.stream() .map( file -> createTransferFuture( strategy, file, -1, checkpointStreamFactory, stateScope, closeableRegistry, tmpResourcesRegistry)) .collect(Collectors.toList()); try { List<HandleAndLocalPath> handles = new ArrayList<>(files.size()); for (CompletableFuture<HandleAndLocalPath> future : futures) { handles.add(future.get()); } return handles; } catch (ExecutionException e) { throw convertExecutionException(e); } }
Transfer a batch of files to checkpoint filesystem.
transferFilesToCheckpointFs
java
apache/flink
flink-state-backends/flink-statebackend-forst/src/main/java/org/apache/flink/state/forst/datatransfer/ForStStateDataTransfer.java
https://github.com/apache/flink/blob/master/flink-state-backends/flink-statebackend-forst/src/main/java/org/apache/flink/state/forst/datatransfer/ForStStateDataTransfer.java
Apache-2.0
public void transferAllStateDataToDirectory( Collection<StateHandleTransferSpec> transferSpecs, CloseableRegistry closeableRegistry, RecoveryClaimMode recoveryClaimMode) throws Exception { // We use this closer for fine-grained shutdown of all parallel transferring. CloseableRegistry internalCloser = new CloseableRegistry(); // Make sure we also react to external close signals. closeableRegistry.registerCloseable(internalCloser); try { List<CompletableFuture<Void>> futures = transferAllStateDataToDirectoryAsync( transferSpecs, internalCloser, recoveryClaimMode) .collect(Collectors.toList()); // Wait until either all futures completed successfully or one failed exceptionally. FutureUtils.completeAll(futures).get(); } catch (ExecutionException e) { // Delete the transfer destination quietly. transferSpecs.stream() .map(StateHandleTransferSpec::getTransferDestination) .forEach( dir -> { try { getDbFileSystem().delete(dir, true); } catch (IOException ignored) { LOG.warn("Failed to delete transfer destination.", ignored); } }); throw convertExecutionException(e); } finally { // Unregister and close the internal closer. if (closeableRegistry.unregisterCloseable(internalCloser)) { IOUtils.closeQuietly(internalCloser); } } }
Transfer all state data to the target directory, as specified in the transfer requests. @param transferSpecs the list of transfers. @throws Exception If anything about the transfer goes wrong.
transferAllStateDataToDirectory
java
apache/flink
flink-state-backends/flink-statebackend-forst/src/main/java/org/apache/flink/state/forst/datatransfer/ForStStateDataTransfer.java
https://github.com/apache/flink/blob/master/flink-state-backends/flink-statebackend-forst/src/main/java/org/apache/flink/state/forst/datatransfer/ForStStateDataTransfer.java
Apache-2.0
public int readFully(ByteBuffer bb) throws IOException { if (bb == null) { throw new NullPointerException(); } else if (bb.remaining() == 0) { return 0; } return originalInputStream instanceof ByteBufferReadable ? ((ByteBufferReadable) originalInputStream).read(bb) : readFullyFromFSDataInputStream(originalInputStream, bb); }
Reads up to <code>ByteBuffer#remaining</code> bytes of data from the input stream into a ByteBuffer. Not Thread-safe yet since the interface of sequential read of ForSt only be accessed by one thread at a time. TODO: Rename all methods about 'readFully' to 'read' when next version of ForSt is ready. @param bb the buffer into which the data is read. @return the total number of bytes read into the buffer. @throws IOException If the first byte cannot be read for any reason other than end of file, or if the input stream has been closed, or if some other I/O error occurs. @throws NullPointerException If <code>bb</code> is <code>null</code>.
readFully
java
apache/flink
flink-state-backends/flink-statebackend-forst/src/main/java/org/apache/flink/state/forst/fs/ByteBufferReadableFSDataInputStream.java
https://github.com/apache/flink/blob/master/flink-state-backends/flink-statebackend-forst/src/main/java/org/apache/flink/state/forst/fs/ByteBufferReadableFSDataInputStream.java
Apache-2.0
private FSDataInputStream getStream() throws IOException { if (isFlinkThread()) { cacheEntry.touch(); } int round = 0; // Repeat at most 3 times. If fails, we will get the original stream for read. while (round++ < 3) { // Firstly, we try to get cache stream FSDataInputStream stream = tryGetCacheStream(); if (stream != null) { fileBasedCache.incHitCounter(); return stream; } // No cache stream if (streamStatus == StreamStatus.CACHED_CLOSING) { // if closing, update the position try { semaphore.acquire(1); } catch (InterruptedException e) { throw new RuntimeException(e); } originalStream.seek(position); position = -1; LOG.trace( "Cached Stream {} status from {} to {}", cacheEntry.cachePath, streamStatus, StreamStatus.CACHED_CLOSED); streamStatus = StreamStatus.CACHED_CLOSED; } // if it is CACHED_CLOSED, we try to reopen it if (streamStatus == StreamStatus.CACHED_CLOSED) { stream = tryReopenCachedStream(); if (stream != null) { fileBasedCache.incHitCounter(); return stream; } fileBasedCache.incMissCounter(); return originalStream; } else if (streamStatus == StreamStatus.ORIGINAL) { fileBasedCache.incMissCounter(); return originalStream; } else { // The stream is not closed, but we cannot get the cache stream. // Meaning that it is in the process of closing, but the status has not been // updated. Thus, we'd better retry here until it reach a stable state (CLOSING). Thread.yield(); } } return originalStream; }
Retrieves the appropriate input stream for reading data. This method attempts to use the cached stream if it is available and valid. If the cached stream is not available, it falls back to the original stream. The method also handles the transition between cached and original streams based on the current status of the stream. The invoker must ensure to release the cache stream after use. @return the input stream to be used for reading data @throws IOException if an I/O error occurs while accessing the stream
getStream
java
apache/flink
flink-state-backends/flink-statebackend-forst/src/main/java/org/apache/flink/state/forst/fs/cache/CachedDataInputStream.java
https://github.com/apache/flink/blob/master/flink-state-backends/flink-statebackend-forst/src/main/java/org/apache/flink/state/forst/fs/cache/CachedDataInputStream.java
Apache-2.0
private FSDataInputStream tryGetCacheStream() { if (streamStatus == StreamStatus.CACHED_OPEN && cacheEntry.tryRetain() > 0) { // Double-check the status as it may change after retain. if (streamStatus == StreamStatus.CACHED_OPEN) { return fsdis; } } return null; }
Attempts to retrieve the cached stream if it is open and the reference count is greater than zero. If successful, it retains the reference count and returns the cached stream. The invoker must ensure to release the stream after use. @return the cached stream if available, or null if not
tryGetCacheStream
java
apache/flink
flink-state-backends/flink-statebackend-forst/src/main/java/org/apache/flink/state/forst/fs/cache/CachedDataInputStream.java
https://github.com/apache/flink/blob/master/flink-state-backends/flink-statebackend-forst/src/main/java/org/apache/flink/state/forst/fs/cache/CachedDataInputStream.java
Apache-2.0
private FSDataInputStream tryReopenCachedStream() { if (streamStatus == StreamStatus.CACHED_CLOSED && isFlinkThread()) { try { fsdis = cacheEntry.getCacheStream(); if (fsdis != null) { LOG.trace( "Cached Stream {} status from {} to {}", cacheEntry.cachePath, streamStatus, StreamStatus.CACHED_OPEN); fsdis.seek(originalStream.getPos()); streamStatus = StreamStatus.CACHED_OPEN; return fsdis; } } catch (IOException e) { LOG.warn("Reopen stream error.", e); } } return null; }
Attempts to reopen the cached stream if it is closed and the current thread is a Flink thread. If successful, it updates the stream status and seeks to the original stream's position. Reference counting is retained, the invoked thread must dereference the stream after use. @return the reopened cached stream, or null if reopening fails
tryReopenCachedStream
java
apache/flink
flink-state-backends/flink-statebackend-forst/src/main/java/org/apache/flink/state/forst/fs/cache/CachedDataInputStream.java
https://github.com/apache/flink/blob/master/flink-state-backends/flink-statebackend-forst/src/main/java/org/apache/flink/state/forst/fs/cache/CachedDataInputStream.java
Apache-2.0
public void addFirst(K key, V value) { if (!isSafeToAddFirst(value)) { addSecond(key, value); return; } Node newNode = new Node(key, value); newNodeCreated(value, newNode); map.put(key, newNode); if (head == null) { head = tail = newNode; } else { newNode.next = head; head.prev = newNode; head = newNode; } newNode.isBeforeMiddle = true; size++; addedToFirst(value); }
Adds a new entry to the front of the cache. @param key the key of the entry @param value the value of the entry
addFirst
java
apache/flink
flink-state-backends/flink-statebackend-forst/src/main/java/org/apache/flink/state/forst/fs/cache/DoubleListLru.java
https://github.com/apache/flink/blob/master/flink-state-backends/flink-statebackend-forst/src/main/java/org/apache/flink/state/forst/fs/cache/DoubleListLru.java
Apache-2.0
public void moveMiddleBack() { if (middle != null) { middle.isBeforeMiddle = true; V theValue = middle.value; middle = middle.next; secondSize--; movedToFirst(theValue); } }
Moves the middle pointer back by one position.
moveMiddleBack
java
apache/flink
flink-state-backends/flink-statebackend-forst/src/main/java/org/apache/flink/state/forst/fs/cache/DoubleListLru.java
https://github.com/apache/flink/blob/master/flink-state-backends/flink-statebackend-forst/src/main/java/org/apache/flink/state/forst/fs/cache/DoubleListLru.java
Apache-2.0
public void moveMiddleFront() { if (middle != null && middle.prev != null) { middle = middle.prev; middle.isBeforeMiddle = false; secondSize++; movedToSecond(middle.value); } else if (middle == null && size > 0) { middle = tail; middle.isBeforeMiddle = false; secondSize++; movedToSecond(middle.value); } }
Moves the middle pointer forward by one position.
moveMiddleFront
java
apache/flink
flink-state-backends/flink-statebackend-forst/src/main/java/org/apache/flink/state/forst/fs/cache/DoubleListLru.java
https://github.com/apache/flink/blob/master/flink-state-backends/flink-statebackend-forst/src/main/java/org/apache/flink/state/forst/fs/cache/DoubleListLru.java
Apache-2.0
public void addSecond(K key, V value) { Node newNode = new Node(key, value); newNodeCreated(value, newNode); map.put(key, newNode); if (head == null) { head = tail = middle = newNode; } else if (middle == null) { newNode.prev = tail; tail.next = newNode; tail = newNode; middle = newNode; } else { newNode.next = middle; newNode.prev = middle.prev; if (middle.prev != null) { middle.prev.next = newNode; } else { // head == middle head = newNode; } middle.prev = newNode; middle = newNode; } newNode.isBeforeMiddle = false; secondSize++; size++; addedToSecond(value); }
Inserts a new entry at the middle of the cache. @param key the key of the entry @param value the value of the entry
addSecond
java
apache/flink
flink-state-backends/flink-statebackend-forst/src/main/java/org/apache/flink/state/forst/fs/cache/DoubleListLru.java
https://github.com/apache/flink/blob/master/flink-state-backends/flink-statebackend-forst/src/main/java/org/apache/flink/state/forst/fs/cache/DoubleListLru.java
Apache-2.0
@VisibleForTesting V getMiddle() { return middle != null ? middle.value : null; }
Returns the value of the middle entry in the cache. @return the value of the middle entry, or null if the cache is empty
getMiddle
java
apache/flink
flink-state-backends/flink-statebackend-forst/src/main/java/org/apache/flink/state/forst/fs/cache/DoubleListLru.java
https://github.com/apache/flink/blob/master/flink-state-backends/flink-statebackend-forst/src/main/java/org/apache/flink/state/forst/fs/cache/DoubleListLru.java
Apache-2.0
public V get(K key, boolean affectOrder) { Node node = map.get(key); if (node == null) { return null; } if (!affectOrder) { return node.value; } accessNode(node); return node.value; }
Retrieves the value associated with the specified key. Optionally affects the order of the entries in the cache. @param key the key of the entry @param affectOrder true if the order of the entries should be affected, false otherwise @return the value associated with the key, or null if the key is not found
get
java
apache/flink
flink-state-backends/flink-statebackend-forst/src/main/java/org/apache/flink/state/forst/fs/cache/DoubleListLru.java
https://github.com/apache/flink/blob/master/flink-state-backends/flink-statebackend-forst/src/main/java/org/apache/flink/state/forst/fs/cache/DoubleListLru.java
Apache-2.0
private void moveToFront(Node node) { assert node.isBeforeMiddle; if (node == head) { return; } if (node == tail) { tail = node.prev; tail.next = null; } else { node.prev.next = node.next; node.next.prev = node.prev; } node.prev = null; node.next = head; head.prev = node; head = node; }
Moves the specified node to the front of the cache. @param node the node to be moved to the front
moveToFront
java
apache/flink
flink-state-backends/flink-statebackend-forst/src/main/java/org/apache/flink/state/forst/fs/cache/DoubleListLru.java
https://github.com/apache/flink/blob/master/flink-state-backends/flink-statebackend-forst/src/main/java/org/apache/flink/state/forst/fs/cache/DoubleListLru.java
Apache-2.0
private void moveToMiddle(Node node) { assert !node.isBeforeMiddle; if (node == middle) { return; } if (node == tail) { tail = node.prev; tail.next = null; } else { node.prev.next = node.next; node.next.prev = node.prev; } node.next = middle; node.prev = middle.prev; if (middle.prev != null) { middle.prev.next = node; } else { // head == middle head = node; } middle.prev = node; middle = node; }
Moves the specified node to the middle of the cache. @param node the node to be moved to the middle
moveToMiddle
java
apache/flink
flink-state-backends/flink-statebackend-forst/src/main/java/org/apache/flink/state/forst/fs/cache/DoubleListLru.java
https://github.com/apache/flink/blob/master/flink-state-backends/flink-statebackend-forst/src/main/java/org/apache/flink/state/forst/fs/cache/DoubleListLru.java
Apache-2.0
@VisibleForTesting public static void unsetFlinkThread() { isFlinkThread.set(false); }
Sets the current thread as a Flink thread. This method is used to mark the thread as a Flink thread, which can be used to determine whether the file access would affect the LRU cache order, or metrics updates.
unsetFlinkThread
java
apache/flink
flink-state-backends/flink-statebackend-forst/src/main/java/org/apache/flink/state/forst/fs/cache/FileBasedCache.java
https://github.com/apache/flink/blob/master/flink-state-backends/flink-statebackend-forst/src/main/java/org/apache/flink/state/forst/fs/cache/FileBasedCache.java
Apache-2.0
private void tryEvict(FileCacheEntry value) { if (value.invalidate() && evictCounter != null) { evictCounter.inc(); value.evictCount++; } }
Tool method that evict a file cache, releasing the owned reference if needed.
tryEvict
java
apache/flink
flink-state-backends/flink-statebackend-forst/src/main/java/org/apache/flink/state/forst/fs/cache/FileBasedCache.java
https://github.com/apache/flink/blob/master/flink-state-backends/flink-statebackend-forst/src/main/java/org/apache/flink/state/forst/fs/cache/FileBasedCache.java
Apache-2.0
public CachedDataInputStream open(FSDataInputStream originalStream) throws IOException { LOG.trace("Open new stream for cache entry {}.", cachePath); FSDataInputStream cacheStream = getCacheStream(); if (cacheStream != null) { CachedDataInputStream inputStream = new CachedDataInputStream(fileBasedCache, this, cacheStream, originalStream); openedStreams.add(inputStream); release(); return inputStream; } else { CachedDataInputStream inputStream = new CachedDataInputStream(fileBasedCache, this, originalStream); openedStreams.add(inputStream); return inputStream; } }
Opens a new {@link CachedDataInputStream} from this cache entry. If the cache stream is available, it will be used; otherwise, the original stream will be used. But the cache stream will be used once available. The opened stream is added to the queue of opened streams associated with this cache entry. @param originalStream the original input stream to be used if the cache stream is not available @return a new {@link CachedDataInputStream} for reading data @throws IOException if an I/O error occurs while opening the stream
open
java
apache/flink
flink-state-backends/flink-statebackend-forst/src/main/java/org/apache/flink/state/forst/fs/cache/FileCacheEntry.java
https://github.com/apache/flink/blob/master/flink-state-backends/flink-statebackend-forst/src/main/java/org/apache/flink/state/forst/fs/cache/FileCacheEntry.java
Apache-2.0
FSDataInputStream getCacheStream() throws IOException { if (status.get() == EntryStatus.LOADED && tryRetain() > 0) { return cacheFs.open(cachePath); } return null; }
Retrieves the cached input stream for this cache entry if it is available and the entry is in a valid state. The method attempts to open the cached stream if the entry is in the LOADED state and retains a reference to it. @return the cached input stream if available, otherwise null @throws IOException if an I/O error occurs while opening the cached stream
getCacheStream
java
apache/flink
flink-state-backends/flink-statebackend-forst/src/main/java/org/apache/flink/state/forst/fs/cache/FileCacheEntry.java
https://github.com/apache/flink/blob/master/flink-state-backends/flink-statebackend-forst/src/main/java/org/apache/flink/state/forst/fs/cache/FileCacheEntry.java
Apache-2.0
void setTouchFunction(Runnable touchFunction) { this.touchFunction = touchFunction; }
Sets the touch function associated with this cache entry. The reason for setting the touch function is to update the entry order in {@link FileBasedCache}. The touch function is not initialized in constructor, since the node in LRU should be created before the touch function is available, and this all happens after this entry is built.
setTouchFunction
java
apache/flink
flink-state-backends/flink-statebackend-forst/src/main/java/org/apache/flink/state/forst/fs/cache/FileCacheEntry.java
https://github.com/apache/flink/blob/master/flink-state-backends/flink-statebackend-forst/src/main/java/org/apache/flink/state/forst/fs/cache/FileCacheEntry.java
Apache-2.0
void touch() { if (touchFunction != null) { touchFunction.run(); } }
Invokes the touch function associated with this cache entry. This method is called to indicate that the cache entry has been accessed, and as a result, the entry order in {@link FileBasedCache} is expected to be updated.
touch
java
apache/flink
flink-state-backends/flink-statebackend-forst/src/main/java/org/apache/flink/state/forst/fs/cache/FileCacheEntry.java
https://github.com/apache/flink/blob/master/flink-state-backends/flink-statebackend-forst/src/main/java/org/apache/flink/state/forst/fs/cache/FileCacheEntry.java
Apache-2.0
Path load() { FSDataInputStream inputStream = null; FSDataOutputStream outputStream = null; try { final byte[] buffer = new byte[READ_BUFFER_SIZE]; inputStream = originalPath.getFileSystem().open(originalPath, READ_BUFFER_SIZE); outputStream = cacheFs.create(cachePath, FileSystem.WriteMode.OVERWRITE); long maxTransferBytes = originalPath.getFileSystem().getFileStatus(originalPath).getLen(); while (maxTransferBytes > 0) { int maxReadBytes = (int) Math.min(maxTransferBytes, READ_BUFFER_SIZE); int readBytes = inputStream.read(buffer, 0, maxReadBytes); if (readBytes == -1) { break; } outputStream.write(buffer, 0, readBytes); maxTransferBytes -= readBytes; } return cachePath; } catch (IOException e) { return null; } finally { try { if (inputStream != null) { inputStream.close(); } if (outputStream != null) { outputStream.close(); } } catch (IOException e) { // ignore } } }
Loads the file from the original path to the cache path. This method reads the file from the original path and writes it to the cache path. If the file is successfully loaded, the cache path is returned. If an I/O error occurs during the loading process, null is returned. @see FileBasedCache#movedToFirst(FileCacheEntry) @return the cache path if the file is successfully loaded, otherwise null.
load
java
apache/flink
flink-state-backends/flink-statebackend-forst/src/main/java/org/apache/flink/state/forst/fs/cache/FileCacheEntry.java
https://github.com/apache/flink/blob/master/flink-state-backends/flink-statebackend-forst/src/main/java/org/apache/flink/state/forst/fs/cache/FileCacheEntry.java
Apache-2.0
@Override protected void referenceCountReachedZero(@Nullable Object o) { if (switchStatus(EntryStatus.INVALID, EntryStatus.REMOVING) || checkStatus(EntryStatus.CLOSED)) { fileBasedCache.removeFile(this); } }
Only two scenario that the reference count can reach 0. 1. The cache entry is invalid the reference count is released. {@see invalidate()} 2. The cache entry is closed and the reference count is scheduled released. {@see close()}
referenceCountReachedZero
java
apache/flink
flink-state-backends/flink-statebackend-forst/src/main/java/org/apache/flink/state/forst/fs/cache/FileCacheEntry.java
https://github.com/apache/flink/blob/master/flink-state-backends/flink-statebackend-forst/src/main/java/org/apache/flink/state/forst/fs/cache/FileCacheEntry.java
Apache-2.0
public MappingEntry createNewFile(Path filePath, boolean overwrite, FileBasedCache cache) { String key = filePath.toString(); boolean isLocal = FileOwnershipDecider.shouldAlwaysBeLocal(filePath); if (isLocal) { filePath = forceLocalPath(filePath); } return addFileToMappingTable( key, toUUIDPath(filePath), FileOwnershipDecider.decideForNewFile(filePath), isLocal ? null : cache, true, overwrite); }
Create a new file in the mapping table.
createNewFile
java
apache/flink
flink-state-backends/flink-statebackend-forst/src/main/java/org/apache/flink/state/forst/fs/filemapping/FileMappingManager.java
https://github.com/apache/flink/blob/master/flink-state-backends/flink-statebackend-forst/src/main/java/org/apache/flink/state/forst/fs/filemapping/FileMappingManager.java
Apache-2.0
public boolean renameFile(String src, String dst) throws IOException { if (src.equals(dst)) { return true; } MappingEntry srcEntry = mappingTable.get(src); if (srcEntry != null) { // rename file if (mappingTable.containsKey(dst)) { MappingEntry dstEntry = mappingTable.remove(dst); dstEntry.release(); } LOG.trace("rename: {} -> {}", src, dst); mappingTable.remove(src); mappingTable.put(dst, srcEntry); } else { // rename directory = link to dst dir + delete src dir // step 1: link all files under src to dst List<String> toRename = listByPrefix(src); for (String key : toRename) { MappingEntry sourceEntry = mappingTable.get(key); sourceEntry.retain(); String renamedDst = key.replace(src, dst); LOG.trace("rename: {} -> {}", key, renamedDst); mappingTable.put(renamedDst, sourceEntry); } Path dstPath = new Path(dst); if (!fileSystem.exists(dstPath)) { fileSystem.mkdirs(dstPath); } // step 2: delete src dir deleteFileOrDirectory(new Path(src), true); } return true; }
1. If src can match any key, we only `mark rename`, no physical file would be renamed. 2. If src is a directory, all files under src will be renamed, including linked files and local files, the directory also would be renamed in file system physically. @param src the source path @param dst the destination path @return always return true except for IOException
renameFile
java
apache/flink
flink-state-backends/flink-statebackend-forst/src/main/java/org/apache/flink/state/forst/fs/filemapping/FileMappingManager.java
https://github.com/apache/flink/blob/master/flink-state-backends/flink-statebackend-forst/src/main/java/org/apache/flink/state/forst/fs/filemapping/FileMappingManager.java
Apache-2.0
@Override public void close() throws IOException { // Synchronize on the entry to ensure the atomicity of endWriting and close // also see invokers of MappingEntry.isWriting synchronized (entry) { super.close(); entry.endWriting(); } }
A {@link FSDataOutputStream} that is associated with a {@link MappingEntry}.
close
java
apache/flink
flink-state-backends/flink-statebackend-forst/src/main/java/org/apache/flink/state/forst/fs/filemapping/FSDataOutputStreamWithEntry.java
https://github.com/apache/flink/blob/master/flink-state-backends/flink-statebackend-forst/src/main/java/org/apache/flink/state/forst/fs/filemapping/FSDataOutputStreamWithEntry.java
Apache-2.0
private void initBaseDBFromSingleStateHandle(StateHandleTransferSpec stateHandleSpec) throws Exception { IncrementalRemoteKeyedStateHandle stateHandle = stateHandleSpec.getStateHandle(); logger.info( "Starting opening base ForSt instance in operator {} with target key-group range {} from state handle {}.", operatorIdentifier, keyGroupRange.prettyPrintInterval(), stateHandleSpec); // Restore base DB from selected initial handle restoreBaseDBFromMainHandle(stateHandleSpec); KeyGroupRange stateHandleKeyGroupRange = stateHandle.getKeyGroupRange(); // Check if the key-groups range has changed. if (Objects.equals(stateHandleKeyGroupRange, keyGroupRange)) { // This is the case if we didn't rescale, so we can restore all the info from the // previous backend instance (backend id and incremental checkpoint history). restorePreviousIncrementalFilesStatus(stateHandle); } else { // If the key-groups don't match, this was a scale out, and we need to clip the // key-groups range of the db to the target range for this backend. try { clipDBWithKeyGroupRange( this.forstHandle.getDb(), this.forstHandle.getColumnFamilyHandles(), keyGroupRange, stateHandleKeyGroupRange, keyGroupPrefixBytes, useDeleteFilesInRange); } catch (RocksDBException e) { String errMsg = "Failed to clip DB after initialization."; logger.error(errMsg, e); throw new BackendBuildingException(errMsg, e); } } logger.info( "Finished opening base ForSt instance in operator {} with target key-group range {}.", operatorIdentifier, keyGroupRange.prettyPrintInterval()); }
Initializes the base DB that we restore from a single local state handle. @param stateHandleSpec the state handle to restore the base DB from. @throws Exception on any error during restore.
initBaseDBFromSingleStateHandle
java
apache/flink
flink-state-backends/flink-statebackend-forst/src/main/java/org/apache/flink/state/forst/restore/ForStIncrementalRestoreOperation.java
https://github.com/apache/flink/blob/master/flink-state-backends/flink-statebackend-forst/src/main/java/org/apache/flink/state/forst/restore/ForStIncrementalRestoreOperation.java
Apache-2.0
private void restorePreviousIncrementalFilesStatus( IncrementalKeyedStateHandle incrementalHandle) { backendUID = incrementalHandle.getBackendIdentifier(); restoredSstFiles.put( incrementalHandle.getCheckpointId(), incrementalHandle.getSharedStateHandles()); lastCompletedCheckpointId = incrementalHandle.getCheckpointId(); logger.info( "Restored previous incremental files status in backend with range {} in operator {}: backend uuid {}, last checkpoint id {}.", keyGroupRange.prettyPrintInterval(), operatorIdentifier, backendUID, lastCompletedCheckpointId); }
Restores the checkpointing status and state for this backend. This can only be done if the backend was not rescaled and is therefore identical to the source backend in the previous run. @param incrementalHandle the single state handle from which the backend is restored.
restorePreviousIncrementalFilesStatus
java
apache/flink
flink-state-backends/flink-statebackend-forst/src/main/java/org/apache/flink/state/forst/restore/ForStIncrementalRestoreOperation.java
https://github.com/apache/flink/blob/master/flink-state-backends/flink-statebackend-forst/src/main/java/org/apache/flink/state/forst/restore/ForStIncrementalRestoreOperation.java
Apache-2.0
private void copyToBaseDBUsingTempDBs( List<StateHandleTransferSpec> toImportSpecs, byte[] startKeyGroupPrefixBytes, byte[] stopKeyGroupPrefixBytes) throws Exception { if (toImportSpecs.isEmpty()) { return; } logger.info( "Starting to copy state handles for backend with range {} in operator {} using temporary instances.", keyGroupRange.prettyPrintInterval(), operatorIdentifier); try (ForStDBWriteBatchWrapper writeBatchWrapper = new ForStDBWriteBatchWrapper(this.forstHandle.getDb(), writeBatchSize); Closeable ignored = cancelStreamRegistry.registerCloseableTemporarily( writeBatchWrapper.getCancelCloseable())) { for (StateHandleTransferSpec handleToCopy : toImportSpecs) { try (RestoredDBInstance restoredDBInstance = restoreTempDBInstance(handleToCopy)) { copyTempDbIntoBaseDb( restoredDBInstance, writeBatchWrapper, startKeyGroupPrefixBytes, stopKeyGroupPrefixBytes); } } } logger.info( "Competed copying state handles for backend with range {} in operator {} using temporary instances.", keyGroupRange.prettyPrintInterval(), operatorIdentifier); }
Helper method to copy all data from the given local state handles to the base DB by using temporary DB instances. @param toImportSpecs the state handles to import. @param startKeyGroupPrefixBytes the min/start key of the key groups range as bytes. @param stopKeyGroupPrefixBytes the max+1/end key of the key groups range as bytes. @throws Exception on any copy error.
copyToBaseDBUsingTempDBs
java
apache/flink
flink-state-backends/flink-statebackend-forst/src/main/java/org/apache/flink/state/forst/restore/ForStIncrementalRestoreOperation.java
https://github.com/apache/flink/blob/master/flink-state-backends/flink-statebackend-forst/src/main/java/org/apache/flink/state/forst/restore/ForStIncrementalRestoreOperation.java
Apache-2.0
private void mergeStateHandlesWithCopyFromTemporaryInstance( StateHandleTransferSpec baseSpec, List<StateHandleTransferSpec> keyedStateHandles, byte[] startKeyGroupPrefixBytes, byte[] stopKeyGroupPrefixBytes) throws Exception { logger.info( "Starting to merge state for backend with range {} in operator {} from multiple state handles using temporary instances.", keyGroupRange.prettyPrintInterval(), operatorIdentifier); // Init the base DB instance with the initial state initBaseDBFromSingleStateHandle(baseSpec); // Copy remaining handles using temporary RocksDB instances copyToBaseDBUsingTempDBs( keyedStateHandles, startKeyGroupPrefixBytes, stopKeyGroupPrefixBytes); logger.info( "Completed merging state for backend with range {} in operator {} from multiple state handles using temporary instances.", keyGroupRange.prettyPrintInterval(), operatorIdentifier); }
Helper method that merges the data from multiple state handles into the restoring base DB by the help of copying through temporary RocksDB instances. @param keyedStateHandles the state handles to merge into the base DB. @param startKeyGroupPrefixBytes the min/start key of the key groups range as bytes. @param stopKeyGroupPrefixBytes the max+1/end key of the key groups range as bytes. @throws Exception on any merge error.
mergeStateHandlesWithCopyFromTemporaryInstance
java
apache/flink
flink-state-backends/flink-statebackend-forst/src/main/java/org/apache/flink/state/forst/restore/ForStIncrementalRestoreOperation.java
https://github.com/apache/flink/blob/master/flink-state-backends/flink-statebackend-forst/src/main/java/org/apache/flink/state/forst/restore/ForStIncrementalRestoreOperation.java
Apache-2.0
public void mergeStateHandlesWithClipAndIngest( List<StateHandleTransferSpec> keyedStateHandles, byte[] startKeyGroupPrefixBytes, byte[] stopKeyGroupPrefixBytes) throws Exception { Path exportCfBasePath = new Path(forstBasePath, "export-cfs"); getFileSystem(forstBasePath).mkdirs(exportCfBasePath); final Map<RegisteredStateMetaInfoBase.Key, List<ExportImportFilesMetaData>> exportedColumnFamilyMetaData = new HashMap<>(keyedStateHandles.size()); final List<StateHandleTransferSpec> notImportableHandles = new ArrayList<>(keyedStateHandles.size()); try { KeyGroupRange exportedSstKeyGroupsRange = exportColumnFamiliesWithSstDataInKeyGroupsRange( exportCfBasePath, keyedStateHandles, exportedColumnFamilyMetaData, notImportableHandles); if (exportedColumnFamilyMetaData.isEmpty()) { // Nothing coule be exported, so we fall back to // #mergeStateHandlesWithCopyFromTemporaryInstance int bestStateHandleForInit = findTheBestStateHandleForInitial( restoreStateHandles, keyGroupRange, overlapFractionThreshold); notImportableHandles.remove(bestStateHandleForInit); StateHandleTransferSpec baseSpec = new StateHandleTransferSpec( restoreStateHandles.get(bestStateHandleForInit), new Path(forstBasePath, DB_DIR_STRING)); transferAllStateHandles(Collections.singletonList(baseSpec)); mergeStateHandlesWithCopyFromTemporaryInstance( baseSpec, notImportableHandles, startKeyGroupPrefixBytes, stopKeyGroupPrefixBytes); } else { // We initialize the base DB by importing all the exported data. initBaseDBFromColumnFamilyImports( exportedColumnFamilyMetaData, exportedSstKeyGroupsRange); // Copy data from handles that we couldn't directly import using temporary // instances. copyToBaseDBUsingTempDBs( notImportableHandles, startKeyGroupPrefixBytes, stopKeyGroupPrefixBytes); } } finally { // Close native RocksDB objects exportedColumnFamilyMetaData.values().forEach(IOUtils::closeAllQuietly); // Cleanup export base directory cleanUpPathQuietly(exportCfBasePath); } }
Restores the base DB by merging multiple state handles into one. This method first checks if all data to import is in the expected key-groups range and then uses import/export. Otherwise, this method falls back to copying the data using a temporary DB. @param keyedStateHandles the list of state handles to restore the base DB from. @param startKeyGroupPrefixBytes the min/start key of the key groups range as bytes. @param stopKeyGroupPrefixBytes the max+1/end key of the key groups range as bytes. @throws Exception on any restore error.
mergeStateHandlesWithClipAndIngest
java
apache/flink
flink-state-backends/flink-statebackend-forst/src/main/java/org/apache/flink/state/forst/restore/ForStIncrementalRestoreOperation.java
https://github.com/apache/flink/blob/master/flink-state-backends/flink-statebackend-forst/src/main/java/org/apache/flink/state/forst/restore/ForStIncrementalRestoreOperation.java
Apache-2.0
@Override public SnapshotResult<KeyedStateHandle> get(CloseableRegistry snapshotCloseableRegistry) throws Exception { boolean completed = false; final List<StreamStateHandle> reusedHandle = new ArrayList<>(); try { // Handle to the meta data file SnapshotResult<StreamStateHandle> metaStateHandle = materializeMetaData( snapshotCloseableRegistry, tmpResourcesRegistry, snapshotResources.stateMetaInfoSnapshots, checkpointId, checkpointStreamFactory); final List<HandleAndLocalPath> sstFiles = new ArrayList<>(); final List<HandleAndLocalPath> miscFiles = new ArrayList<>(); long checkpointedSize = metaStateHandle.getStateSize(); checkpointedSize += transferSnapshotFiles( sstFiles, miscFiles, snapshotCloseableRegistry, tmpResourcesRegistry, reusedHandle); final IncrementalRemoteKeyedStateHandle jmIncrementalKeyedStateHandle = new IncrementalRemoteKeyedStateHandle( backendUID, keyGroupRange, checkpointId, sstFiles, miscFiles, metaStateHandle.getJobManagerOwnedSnapshot(), checkpointedSize); completed = true; return SnapshotResult.of(jmIncrementalKeyedStateHandle); } finally { snapshotResources.release(); if (!completed) { try { tmpResourcesRegistry.close(); } catch (Exception e) { LOG.warn("Could not properly clean tmp resources.", e); } } else { // Report the reuse of state handle to stream factory, which is essential for // file merging mechanism. checkpointStreamFactory.reusePreviousStateHandle(reusedHandle); } } }
Encapsulates the process to perform an incremental snapshot of a ForStKeyedStateBackend.
get
java
apache/flink
flink-state-backends/flink-statebackend-forst/src/main/java/org/apache/flink/state/forst/snapshot/ForStIncrementalSnapshotStrategy.java
https://github.com/apache/flink/blob/master/flink-state-backends/flink-statebackend-forst/src/main/java/org/apache/flink/state/forst/snapshot/ForStIncrementalSnapshotStrategy.java
Apache-2.0
@Override public SnapshotResult<KeyedStateHandle> get(CloseableRegistry snapshotCloseableRegistry) throws Exception { boolean completed = false; try { // Handle to the meta data file SnapshotResult<StreamStateHandle> metaStateHandle = materializeMetaData( snapshotCloseableRegistry, tmpResourcesRegistry, snapshotResources.stateMetaInfoSnapshots, checkpointId, checkpointStreamFactory); final List<IncrementalKeyedStateHandle.HandleAndLocalPath> privateFiles = new ArrayList<>(); long checkpointedSize = metaStateHandle.getStateSize(); checkpointedSize += uploadSnapshotFiles( privateFiles, snapshotCloseableRegistry, tmpResourcesRegistry); final IncrementalRemoteKeyedStateHandle jmIncrementalKeyedStateHandle = new IncrementalRemoteKeyedStateHandle( backendUID, keyGroupRange, checkpointId, Collections.emptyList(), privateFiles, metaStateHandle.getJobManagerOwnedSnapshot(), checkpointedSize); completed = true; return SnapshotResult.of(jmIncrementalKeyedStateHandle); } finally { snapshotResources.release(); if (!completed) { try { tmpResourcesRegistry.close(); } catch (Exception e) { LOG.warn("Could not properly clean tmp resources.", e); } } } }
Encapsulates the process to perform a full snapshot of a ForStKeyedStateBackend.
get
java
apache/flink
flink-state-backends/flink-statebackend-forst/src/main/java/org/apache/flink/state/forst/snapshot/ForStNativeFullSnapshotStrategy.java
https://github.com/apache/flink/blob/master/flink-state-backends/flink-statebackend-forst/src/main/java/org/apache/flink/state/forst/snapshot/ForStNativeFullSnapshotStrategy.java
Apache-2.0
@Test public void testTwoSeparateClassLoaders() throws Exception { // collect the libraries / class folders with ForSt related code: the state backend and // ForSt itself final URL codePath1 = ForStStateBackend.class.getProtectionDomain().getCodeSource().getLocation(); final URL codePath2 = RocksDB.class.getProtectionDomain().getCodeSource().getLocation(); final ClassLoader parent = getClass().getClassLoader(); final ClassLoader loader1 = FlinkUserCodeClassLoaders.childFirst( new URL[] {codePath1, codePath2}, parent, new String[0], NOOP_EXCEPTION_HANDLER, true); final ClassLoader loader2 = FlinkUserCodeClassLoaders.childFirst( new URL[] {codePath1, codePath2}, parent, new String[0], NOOP_EXCEPTION_HANDLER, true); final String className = ForStStateBackend.class.getName(); final Class<?> clazz1 = Class.forName(className, false, loader1); final Class<?> clazz2 = Class.forName(className, false, loader2); assertNotEquals( "Test broken - the two reflectively loaded classes are equal", clazz1, clazz2); final Object instance1 = clazz1.getConstructor().newInstance(); final Object instance2 = clazz2.getConstructor().newInstance(); final String tempDir = tmp.newFolder().getAbsolutePath(); final Method meth1 = clazz1.getDeclaredMethod("ensureForStIsLoaded", String.class, Executor.class); final Method meth2 = clazz2.getDeclaredMethod("ensureForStIsLoaded", String.class, Executor.class); meth1.setAccessible(true); meth2.setAccessible(true); // if all is well, these methods can both complete successfully meth1.invoke(instance1, tempDir, Executors.directExecutor()); meth2.invoke(instance2, tempDir, Executors.directExecutor()); }
This test validates that the ForSt JNI library loading works properly in the presence of the ForSt code being loaded dynamically via reflection. That can happen when ForSt is in the user code JAR, or in certain test setups. TODO: test working with both ForSt and RocksDB
testTwoSeparateClassLoaders
java
apache/flink
flink-state-backends/flink-statebackend-forst/src/test/java/org/apache/flink/state/forst/ForStMultiClassLoaderTest.java
https://github.com/apache/flink/blob/master/flink-state-backends/flink-statebackend-forst/src/test/java/org/apache/flink/state/forst/ForStMultiClassLoaderTest.java
Apache-2.0
@Test public void testSharedResourcesAfterClose() throws Exception { OpaqueMemoryResource<ForStSharedResources> sharedResources = getSharedResources(); ForStResourceContainer container = new ForStResourceContainer(null, sharedResources); container.close(); ForStSharedResources forStSharedResources = sharedResources.getResourceHandle(); assertThat(forStSharedResources.getCache().isOwningHandle(), is(false)); assertThat(forStSharedResources.getWriteBufferManager().isOwningHandle(), is(false)); }
Guard the shared resources will be released after {@link ForStResourceContainer#close()} when the {@link ForStResourceContainer} instance is initiated with {@link OpaqueMemoryResource}. @throws Exception if unexpected error happened.
testSharedResourcesAfterClose
java
apache/flink
flink-state-backends/flink-statebackend-forst/src/test/java/org/apache/flink/state/forst/ForStResourceContainerTest.java
https://github.com/apache/flink/blob/master/flink-state-backends/flink-statebackend-forst/src/test/java/org/apache/flink/state/forst/ForStResourceContainerTest.java
Apache-2.0
@Test public void testGetDbOptionsWithSharedResources() throws Exception { final int optionNumber = 20; OpaqueMemoryResource<ForStSharedResources> sharedResources = getSharedResources(); ForStResourceContainer container = new ForStResourceContainer(null, sharedResources); HashSet<WriteBufferManager> writeBufferManagers = new HashSet<>(); for (int i = 0; i < optionNumber; i++) { DBOptions dbOptions = container.getDbOptions(); WriteBufferManager writeBufferManager = getWriteBufferManager(dbOptions); writeBufferManagers.add(writeBufferManager); } assertThat(writeBufferManagers.size(), is(1)); assertThat( writeBufferManagers.iterator().next(), is(sharedResources.getResourceHandle().getWriteBufferManager())); container.close(); }
Guard that {@link ForStResourceContainer#getDbOptions()} shares the same {@link WriteBufferManager} instance if the {@link ForStResourceContainer} instance is initiated with {@link OpaqueMemoryResource}. @throws Exception if unexpected error happened.
testGetDbOptionsWithSharedResources
java
apache/flink
flink-state-backends/flink-statebackend-forst/src/test/java/org/apache/flink/state/forst/ForStResourceContainerTest.java
https://github.com/apache/flink/blob/master/flink-state-backends/flink-statebackend-forst/src/test/java/org/apache/flink/state/forst/ForStResourceContainerTest.java
Apache-2.0
@Test public void testDefaultDbLogDir() throws Exception { final ForStStateBackend backend = new ForStStateBackend(); final File logFile = File.createTempFile(getClass().getSimpleName() + "-", ".log"); // set the environment variable 'log.file' with the Flink log file location System.setProperty("log.file", logFile.getPath()); try (ForStResourceContainer container = backend.createOptionsAndResourceContainer(new Path(tempFolder.toString()))) { assertEquals( ForStConfigurableOptions.LOG_LEVEL.defaultValue(), container.getDbOptions().infoLogLevel()); assertEquals(logFile.getParent(), container.getDbOptions().dbLogDir()); } finally { logFile.delete(); } StringBuilder longInstanceBasePath = new StringBuilder(tempFolder.newFolder().getAbsolutePath()); while (longInstanceBasePath.length() < 255) { longInstanceBasePath.append("/append-for-long-path"); } try (ForStResourceContainer container = backend.createOptionsAndResourceContainer( new Path(longInstanceBasePath.toString()))) { assertTrue(container.getDbOptions().dbLogDir().isEmpty()); } finally { logFile.delete(); } }
Tests for configuring the ForSt State Backend.
testDefaultDbLogDir
java
apache/flink
flink-state-backends/flink-statebackend-forst/src/test/java/org/apache/flink/state/forst/ForStStateBackendConfigTest.java
https://github.com/apache/flink/blob/master/flink-state-backends/flink-statebackend-forst/src/test/java/org/apache/flink/state/forst/ForStStateBackendConfigTest.java
Apache-2.0
@Test(expected = IllegalArgumentException.class) public void testSetEmptyPaths() throws Exception { ForStStateBackend forStStateBackend = new ForStStateBackend(); forStStateBackend.setLocalDbStoragePaths(); }
Validates that empty arguments for the local DB path are invalid.
testSetEmptyPaths
java
apache/flink
flink-state-backends/flink-statebackend-forst/src/test/java/org/apache/flink/state/forst/ForStStateBackendConfigTest.java
https://github.com/apache/flink/blob/master/flink-state-backends/flink-statebackend-forst/src/test/java/org/apache/flink/state/forst/ForStStateBackendConfigTest.java
Apache-2.0
@Test public void testLoadForStStateBackend() throws Exception { final String localDir1 = tmp.newFolder().getAbsolutePath(); final String localDir2 = tmp.newFolder().getAbsolutePath(); final String localDirs = localDir1 + File.pathSeparator + localDir2; final boolean incremental = !CheckpointingOptions.INCREMENTAL_CHECKPOINTS.defaultValue(); final Configuration config1 = new Configuration(); config1.setString(backendKey, "forst"); config1.set(ForStOptions.LOCAL_DIRECTORIES, localDirs); config1.set(CheckpointingOptions.INCREMENTAL_CHECKPOINTS, incremental); final Configuration config2 = new Configuration(); config2.setString(backendKey, ForStStateBackendFactory.class.getName()); config2.set(ForStOptions.LOCAL_DIRECTORIES, localDirs); config2.set(CheckpointingOptions.INCREMENTAL_CHECKPOINTS, incremental); StateBackend backend1 = StateBackendLoader.loadStateBackendFromConfig(config1, cl, null); StateBackend backend2 = StateBackendLoader.loadStateBackendFromConfig(config2, cl, null); assertTrue(backend1 instanceof ForStStateBackend); assertTrue(backend2 instanceof ForStStateBackend); ForStStateBackend fs1 = (ForStStateBackend) backend1; ForStStateBackend fs2 = (ForStStateBackend) backend1; checkPaths(fs1.getLocalDbStoragePaths(), localDir1, localDir2); checkPaths(fs2.getLocalDbStoragePaths(), localDir1, localDir2); }
Validates loading a file system state backend with additional parameters from the cluster configuration.
testLoadForStStateBackend
java
apache/flink
flink-state-backends/flink-statebackend-forst/src/test/java/org/apache/flink/state/forst/ForStStateBackendFactoryTest.java
https://github.com/apache/flink/blob/master/flink-state-backends/flink-statebackend-forst/src/test/java/org/apache/flink/state/forst/ForStStateBackendFactoryTest.java
Apache-2.0
@Test void testTransferHeadPartCorrectly() throws Exception { File checkpointPrivateFolder = TempDirUtils.newFolder(temporaryFolder, "private"); Path checkpointPrivateDirectory = Path.fromLocalFile(checkpointPrivateFolder); File checkpointSharedFolder = TempDirUtils.newFolder(temporaryFolder, "shared"); Path checkpointSharedDirectory = Path.fromLocalFile(checkpointSharedFolder); FileSystem fileSystem = checkpointPrivateDirectory.getFileSystem(); int fileStateSizeThreshold = 1024; int headBytes = 512; // make sure just a part of origin state file int writeBufferSize = 4096; FsCheckpointStreamFactory checkpointStreamFactory = new FsCheckpointStreamFactory( fileSystem, checkpointPrivateDirectory, checkpointSharedDirectory, fileStateSizeThreshold, writeBufferSize); String localFolder = "local"; TempDirUtils.newFolder(temporaryFolder, localFolder); Path sstFile = generateRandomSstFile(localFolder, 1, fileStateSizeThreshold); try (ForStStateDataTransfer stateTransfer = new ForStStateDataTransfer(5)) { HandleAndLocalPath handleAndLocalPath = stateTransfer.transferFileToCheckpointFs( SnapshotType.SharingFilesStrategy.FORWARD_BACKWARD, sstFile, headBytes, checkpointStreamFactory, CheckpointedStateScope.SHARED, new CloseableRegistry(), new CloseableRegistry(), false); assertStateContentEqual( sstFile, headBytes, handleAndLocalPath.getHandle().openInputStream()); } }
Test that transfer file head part correctly.
testTransferHeadPartCorrectly
java
apache/flink
flink-state-backends/flink-statebackend-forst/src/test/java/org/apache/flink/state/forst/datatransfer/ForStStateDataTransferTest.java
https://github.com/apache/flink/blob/master/flink-state-backends/flink-statebackend-forst/src/test/java/org/apache/flink/state/forst/datatransfer/ForStStateDataTransferTest.java
Apache-2.0
@Test public void testMultiThreadRestoreCorrectly() throws Exception { int numRemoteHandles = 3; int numSubHandles = 6; byte[][][] contents = createContents(numRemoteHandles, numSubHandles); List<StateHandleTransferSpec> transferRequests = new ArrayList<>(numRemoteHandles); for (int i = 0; i < numRemoteHandles; ++i) { transferRequests.add( createTransferRequestForContent( Path.fromLocalFile(TempDirUtils.newFolder(temporaryFolder)), contents[i], i)); } try (ForStStateDataTransfer stateTransfer = new ForStStateDataTransfer(4)) { stateTransfer.transferAllStateDataToDirectory( transferRequests, new CloseableRegistry(), RecoveryClaimMode.DEFAULT); } for (int i = 0; i < numRemoteHandles; ++i) { StateHandleTransferSpec transferRequest = transferRequests.get(i); Path dstPath = transferRequest.getTransferDestination(); assertTrue(dstPath.getFileSystem().exists(dstPath)); for (int j = 0; j < numSubHandles; ++j) { assertStateContentEqual( contents[i][j], new Path(dstPath, String.format("sharedState-%d-%d", i, j))); } } }
Tests that transfer files to forst working dir with multi-thread correctly.
testMultiThreadRestoreCorrectly
java
apache/flink
flink-state-backends/flink-statebackend-forst/src/test/java/org/apache/flink/state/forst/datatransfer/ForStStateDataTransferTest.java
https://github.com/apache/flink/blob/master/flink-state-backends/flink-statebackend-forst/src/test/java/org/apache/flink/state/forst/datatransfer/ForStStateDataTransferTest.java
Apache-2.0
@Override public int size() { return totalSize - logicallyRemovedNodes.size(); }
Guards for the free of space when state map is closed. This is mainly used to synchronize with snapshots.
size
java
apache/flink
flink-state-backends/flink-statebackend-heap-spillable/src/main/java/org/apache/flink/runtime/state/heap/CopyOnWriteSkipListStateMap.java
https://github.com/apache/flink/blob/master/flink-state-backends/flink-statebackend-heap-spillable/src/main/java/org/apache/flink/runtime/state/heap/CopyOnWriteSkipListStateMap.java
Apache-2.0