code
stringlengths 25
201k
| docstring
stringlengths 19
96.2k
| func_name
stringlengths 0
235
| language
stringclasses 1
value | repo
stringlengths 8
51
| path
stringlengths 11
314
| url
stringlengths 62
377
| license
stringclasses 7
values |
|---|---|---|---|---|---|---|---|
public static JobGraph getJobGraphUnderUserClassLoader(
final ClassLoader userClassloader,
final Pipeline pipeline,
final Configuration configuration,
final int defaultParallelism) {
final ClassLoader contextClassLoader = Thread.currentThread().getContextClassLoader();
try {
Thread.currentThread().setContextClassLoader(userClassloader);
return FlinkPipelineTranslationUtil.getJobGraph(
userClassloader, pipeline, configuration, defaultParallelism);
} finally {
Thread.currentThread().setContextClassLoader(contextClassLoader);
}
}
|
Transmogrifies the given {@link Pipeline} under the userClassloader to a {@link JobGraph}.
|
getJobGraphUnderUserClassLoader
|
java
|
apache/flink
|
flink-clients/src/main/java/org/apache/flink/client/FlinkPipelineTranslationUtil.java
|
https://github.com/apache/flink/blob/master/flink-clients/src/main/java/org/apache/flink/client/FlinkPipelineTranslationUtil.java
|
Apache-2.0
|
protected void info(String[] args) throws Exception {
LOG.info("Running 'info' command.");
final Options commandOptions = CliFrontendParser.getInfoCommandOptions();
final CommandLine commandLine = getCommandLine(commandOptions, args, true);
final ProgramOptions programOptions = ProgramOptions.create(commandLine);
// evaluate help flag
if (commandLine.hasOption(HELP_OPTION.getOpt())) {
CliFrontendParser.printHelpForInfo();
return;
}
// -------- build the packaged program -------------
LOG.info("Building program from JAR file");
PackagedProgram program = null;
try {
LOG.info("Creating program plan dump");
final CustomCommandLine activeCommandLine =
validateAndGetActiveCommandLine(checkNotNull(commandLine));
final Configuration effectiveConfiguration =
getEffectiveConfiguration(activeCommandLine, commandLine);
updateEffectiveConfiguration(
effectiveConfiguration,
programOptions,
getJobJarAndDependencies(programOptions));
program = buildProgram(programOptions, effectiveConfiguration);
int parallelism = programOptions.getParallelism();
if (ExecutionConfig.PARALLELISM_DEFAULT == parallelism) {
parallelism = getDefaultParallelism(effectiveConfiguration);
}
Pipeline pipeline =
PackagedProgramUtils.getPipelineFromProgram(
program, effectiveConfiguration, parallelism, true);
String jsonPlan =
FlinkPipelineTranslationUtil.translateToJSONExecutionPlan(
program.getUserCodeClassLoader(), pipeline);
if (jsonPlan != null) {
System.out.println(
"----------------------- Execution Plan -----------------------");
System.out.println(jsonPlan);
System.out.println(
"--------------------------------------------------------------");
} else {
System.out.println("JSON plan could not be generated.");
}
String description = program.getDescription();
System.out.println();
if (description != null) {
System.out.println(description);
} else {
System.out.println("No description provided.");
}
} finally {
if (program != null) {
program.close();
}
}
}
|
Executes the info action.
@param args Command line arguments for the info action.
|
info
|
java
|
apache/flink
|
flink-clients/src/main/java/org/apache/flink/client/cli/CliFrontend.java
|
https://github.com/apache/flink/blob/master/flink-clients/src/main/java/org/apache/flink/client/cli/CliFrontend.java
|
Apache-2.0
|
protected void stop(String[] args) throws Exception {
LOG.info("Running 'stop-with-savepoint' command.");
final Options commandOptions = CliFrontendParser.getStopCommandOptions();
final CommandLine commandLine = getCommandLine(commandOptions, args, false);
final StopOptions stopOptions = new StopOptions(commandLine);
if (stopOptions.isPrintHelp()) {
CliFrontendParser.printHelpForStop(customCommandLines);
return;
}
final String[] cleanedArgs = stopOptions.getArgs();
final String targetDirectory =
stopOptions.hasSavepointFlag() && cleanedArgs.length > 0
? stopOptions.getTargetDirectory()
: null; // the default savepoint location is going to be used in this case.
final JobID jobId =
cleanedArgs.length != 0
? parseJobId(cleanedArgs[0])
: parseJobId(stopOptions.getTargetDirectory());
final boolean advanceToEndOfEventTime = stopOptions.shouldAdvanceToEndOfEventTime();
final SavepointFormatType formatType = stopOptions.getFormatType();
logAndSysout(
(advanceToEndOfEventTime ? "Draining job " : "Suspending job ")
+ "\""
+ jobId
+ "\" with a "
+ formatType
+ " savepoint.");
final CustomCommandLine activeCommandLine = validateAndGetActiveCommandLine(commandLine);
runClusterAction(
activeCommandLine,
commandLine,
(clusterClient, effectiveConfiguration) -> {
// Trigger savepoint in detached mode
if (stopOptions.isDetached()) {
// trigger stop-with-savepoint in detached mode and
// return the trigger id immediately
stopWithDetachedSavepoint(
clusterClient,
jobId,
advanceToEndOfEventTime,
targetDirectory,
formatType,
getClientTimeout(effectiveConfiguration));
} else {
stopWithSavepoint(
clusterClient,
jobId,
advanceToEndOfEventTime,
targetDirectory,
formatType,
getClientTimeout(effectiveConfiguration));
}
});
}
|
Executes the STOP action.
@param args Command line arguments for the stop action.
|
stop
|
java
|
apache/flink
|
flink-clients/src/main/java/org/apache/flink/client/cli/CliFrontend.java
|
https://github.com/apache/flink/blob/master/flink-clients/src/main/java/org/apache/flink/client/cli/CliFrontend.java
|
Apache-2.0
|
protected void savepoint(String[] args) throws Exception {
LOG.info("Running 'savepoint' command.");
final Options commandOptions = CliFrontendParser.getSavepointCommandOptions();
final CommandLine commandLine = getCommandLine(commandOptions, args, false);
final SavepointOptions savepointOptions = new SavepointOptions(commandLine);
// evaluate help flag
if (savepointOptions.isPrintHelp()) {
CliFrontendParser.printHelpForSavepoint(customCommandLines);
return;
}
final CustomCommandLine activeCommandLine = validateAndGetActiveCommandLine(commandLine);
if (savepointOptions.isDispose()) {
runClusterAction(
activeCommandLine,
commandLine,
(clusterClient, effectiveConfiguration) ->
disposeSavepoint(
clusterClient,
savepointOptions.getSavepointPath(),
getClientTimeout(effectiveConfiguration)));
} else {
String[] cleanedArgs = savepointOptions.getArgs();
final JobID jobId;
if (cleanedArgs.length >= 1) {
String jobIdString = cleanedArgs[0];
jobId = parseJobId(jobIdString);
} else {
throw new CliArgsException(
"Missing JobID. " + "Specify a Job ID to trigger a savepoint.");
}
final String savepointDirectory;
if (cleanedArgs.length >= 2) {
savepointDirectory = cleanedArgs[1];
} else {
savepointDirectory = null;
}
// Print superfluous arguments
if (cleanedArgs.length >= 3) {
logAndSysout(
"Provided more arguments than required. Ignoring not needed arguments.");
}
runClusterAction(
activeCommandLine,
commandLine,
(clusterClient, effectiveConfiguration) -> {
// Trigger savepoint in detached mode
if (savepointOptions.isDetached()) {
// trigger savepoint in detached mode and
// return the trigger id immediately
triggerDetachedSavepoint(
clusterClient,
jobId,
savepointDirectory,
savepointOptions.getFormatType(),
getClientTimeout(effectiveConfiguration));
} else {
triggerSavepoint(
clusterClient,
jobId,
savepointDirectory,
savepointOptions.getFormatType(),
getClientTimeout(effectiveConfiguration));
}
});
}
}
|
Executes the SAVEPOINT action.
@param args Command line arguments for the savepoint action.
|
savepoint
|
java
|
apache/flink
|
flink-clients/src/main/java/org/apache/flink/client/cli/CliFrontend.java
|
https://github.com/apache/flink/blob/master/flink-clients/src/main/java/org/apache/flink/client/cli/CliFrontend.java
|
Apache-2.0
|
private void disposeSavepoint(
ClusterClient<?> clusterClient, String savepointPath, Duration clientTimeout)
throws FlinkException {
checkNotNull(
savepointPath,
"Missing required argument: savepoint path. "
+ "Usage: bin/flink savepoint -d <savepoint-path>");
logAndSysout("Disposing savepoint '" + savepointPath + "'.");
final CompletableFuture<Acknowledge> disposeFuture =
clusterClient.disposeSavepoint(savepointPath);
logAndSysout("Waiting for response...");
try {
disposeFuture.get(clientTimeout.toMillis(), TimeUnit.MILLISECONDS);
} catch (Exception e) {
throw new FlinkException("Failed to dispose the savepoint '" + savepointPath + "'.", e);
}
logAndSysout("Savepoint '" + savepointPath + "' disposed.");
}
|
Sends a SavepointDisposalRequest to the job manager.
|
disposeSavepoint
|
java
|
apache/flink
|
flink-clients/src/main/java/org/apache/flink/client/cli/CliFrontend.java
|
https://github.com/apache/flink/blob/master/flink-clients/src/main/java/org/apache/flink/client/cli/CliFrontend.java
|
Apache-2.0
|
protected void checkpoint(String[] args) throws Exception {
LOG.info("Running 'checkpoint' command.");
final Options commandOptions = CliFrontendParser.getCheckpointCommandOptions();
final CommandLine commandLine = getCommandLine(commandOptions, args, false);
final CheckpointOptions checkpointOptions = new CheckpointOptions(commandLine);
// evaluate help flag
if (checkpointOptions.isPrintHelp()) {
CliFrontendParser.printHelpForCheckpoint(customCommandLines);
return;
}
final CustomCommandLine activeCommandLine = validateAndGetActiveCommandLine(commandLine);
String[] cleanedArgs = checkpointOptions.getArgs();
final JobID jobId;
if (cleanedArgs.length >= 1) {
String jobIdString = cleanedArgs[0];
jobId = parseJobId(jobIdString);
} else {
throw new CliArgsException(
"Missing JobID. " + "Specify a Job ID to manipulate a checkpoint.");
}
runClusterAction(
activeCommandLine,
commandLine,
(clusterClient, effectiveConfiguration) ->
triggerCheckpoint(
clusterClient,
jobId,
checkpointOptions.getCheckpointType(),
getClientTimeout(effectiveConfiguration)));
}
|
Executes the CHECKPOINT action.
@param args Command line arguments for the checkpoint action.
|
checkpoint
|
java
|
apache/flink
|
flink-clients/src/main/java/org/apache/flink/client/cli/CliFrontend.java
|
https://github.com/apache/flink/blob/master/flink-clients/src/main/java/org/apache/flink/client/cli/CliFrontend.java
|
Apache-2.0
|
PackagedProgram buildProgram(final ProgramOptions runOptions)
throws FileNotFoundException, ProgramInvocationException, CliArgsException {
return buildProgram(runOptions, configuration);
}
|
Creates a Packaged program from the given command line options.
@return A PackagedProgram (upon success)
|
buildProgram
|
java
|
apache/flink
|
flink-clients/src/main/java/org/apache/flink/client/cli/CliFrontend.java
|
https://github.com/apache/flink/blob/master/flink-clients/src/main/java/org/apache/flink/client/cli/CliFrontend.java
|
Apache-2.0
|
PackagedProgram buildProgram(final ProgramOptions runOptions, final Configuration configuration)
throws FileNotFoundException, ProgramInvocationException, CliArgsException {
runOptions.validate();
String[] programArgs = runOptions.getProgramArgs();
String jarFilePath = runOptions.getJarFilePath();
List<URL> classpaths = runOptions.getClasspaths();
// Get assembler class
String entryPointClass = runOptions.getEntryPointClassName();
File jarFile = jarFilePath != null ? getJarFile(jarFilePath) : null;
return PackagedProgram.newBuilder()
.setJarFile(jarFile)
.setUserClassPaths(classpaths)
.setEntryPointClassName(entryPointClass)
.setConfiguration(configuration)
.setSavepointRestoreSettings(runOptions.getSavepointRestoreSettings())
.setArguments(programArgs)
.build();
}
|
Creates a Packaged program from the given command line options and the
effectiveConfiguration.
@return A PackagedProgram (upon success)
|
buildProgram
|
java
|
apache/flink
|
flink-clients/src/main/java/org/apache/flink/client/cli/CliFrontend.java
|
https://github.com/apache/flink/blob/master/flink-clients/src/main/java/org/apache/flink/client/cli/CliFrontend.java
|
Apache-2.0
|
private File getJarFile(String jarFilePath) throws FileNotFoundException {
File jarFile = new File(jarFilePath);
// Check if JAR file exists
if (!jarFile.exists()) {
throw new FileNotFoundException("JAR file does not exist: " + jarFile);
} else if (!jarFile.isFile()) {
throw new FileNotFoundException("JAR file is not a file: " + jarFile);
}
return jarFile;
}
|
Gets the JAR file from the path.
@param jarFilePath The path of JAR file
@return The JAR file
@throws FileNotFoundException The JAR file does not exist.
|
getJarFile
|
java
|
apache/flink
|
flink-clients/src/main/java/org/apache/flink/client/cli/CliFrontend.java
|
https://github.com/apache/flink/blob/master/flink-clients/src/main/java/org/apache/flink/client/cli/CliFrontend.java
|
Apache-2.0
|
private static int handleArgException(CliArgsException e) {
LOG.error("Invalid command line arguments.", e);
System.out.println(e.getMessage());
System.out.println();
System.out.println("Use the help option (-h or --help) to get help on the command.");
return 1;
}
|
Displays an exception message for incorrect command line arguments.
@param e The exception to display.
@return The return code for the process.
|
handleArgException
|
java
|
apache/flink
|
flink-clients/src/main/java/org/apache/flink/client/cli/CliFrontend.java
|
https://github.com/apache/flink/blob/master/flink-clients/src/main/java/org/apache/flink/client/cli/CliFrontend.java
|
Apache-2.0
|
private static int handleParametrizationException(ProgramParametrizationException e) {
LOG.error("Program has not been parametrized properly.", e);
System.err.println(e.getMessage());
return 1;
}
|
Displays an optional exception message for incorrect program parametrization.
@param e The exception to display.
@return The return code for the process.
|
handleParametrizationException
|
java
|
apache/flink
|
flink-clients/src/main/java/org/apache/flink/client/cli/CliFrontend.java
|
https://github.com/apache/flink/blob/master/flink-clients/src/main/java/org/apache/flink/client/cli/CliFrontend.java
|
Apache-2.0
|
private static int handleMissingJobException() {
System.err.println();
System.err.println(
"The program didn't contain a Flink job. "
+ "Perhaps you forgot to call execute() on the execution environment.");
return 1;
}
|
Displays a message for a program without a job to execute.
@return The return code for the process.
|
handleMissingJobException
|
java
|
apache/flink
|
flink-clients/src/main/java/org/apache/flink/client/cli/CliFrontend.java
|
https://github.com/apache/flink/blob/master/flink-clients/src/main/java/org/apache/flink/client/cli/CliFrontend.java
|
Apache-2.0
|
private <ClusterID> void runClusterAction(
CustomCommandLine activeCommandLine,
CommandLine commandLine,
ClusterAction<ClusterID> clusterAction)
throws FlinkException {
final Configuration effectiveConfiguration =
getEffectiveConfiguration(activeCommandLine, commandLine);
LOG.debug(
"Effective configuration after Flink conf, and custom commandline: {}",
effectiveConfiguration);
final ClusterClientFactory<ClusterID> clusterClientFactory =
clusterClientServiceLoader.getClusterClientFactory(effectiveConfiguration);
final ClusterID clusterId = clusterClientFactory.getClusterId(effectiveConfiguration);
if (clusterId == null) {
throw new FlinkException(
"No cluster id was specified. Please specify a cluster to which you would like to connect.");
}
try (final ClusterDescriptor<ClusterID> clusterDescriptor =
clusterClientFactory.createClusterDescriptor(effectiveConfiguration)) {
try (final ClusterClient<ClusterID> clusterClient =
clusterDescriptor.retrieve(clusterId).getClusterClient()) {
clusterAction.runAction(clusterClient, effectiveConfiguration);
}
}
}
|
Retrieves the {@link ClusterClient} from the given {@link CustomCommandLine} and runs the
given {@link ClusterAction} against it.
@param activeCommandLine to create the {@link ClusterDescriptor} from
@param commandLine containing the parsed command line options
@param clusterAction the cluster action to run against the retrieved {@link ClusterClient}.
@param <ClusterID> type of the cluster id
@throws FlinkException if something goes wrong
|
runClusterAction
|
java
|
apache/flink
|
flink-clients/src/main/java/org/apache/flink/client/cli/CliFrontend.java
|
https://github.com/apache/flink/blob/master/flink-clients/src/main/java/org/apache/flink/client/cli/CliFrontend.java
|
Apache-2.0
|
public int parseAndRun(String[] args) {
// check for action
if (args.length < 1) {
CliFrontendParser.printHelp(customCommandLines);
System.out.println("Please specify an action.");
return 1;
}
// get action
String action = args[0];
// remove action from parameters
final String[] params = Arrays.copyOfRange(args, 1, args.length);
try {
// do action
switch (action) {
case ACTION_RUN:
run(params);
return 0;
case ACTION_LIST:
list(params);
return 0;
case ACTION_INFO:
info(params);
return 0;
case ACTION_CANCEL:
cancel(params);
return 0;
case ACTION_STOP:
stop(params);
return 0;
case ACTION_SAVEPOINT:
savepoint(params);
return 0;
case ACTION_CHECKPOINT:
checkpoint(params);
return 0;
case "-h":
case "--help":
CliFrontendParser.printHelp(customCommandLines);
return 0;
case "-v":
case "--version":
String version = EnvironmentInformation.getVersion();
String commitID = EnvironmentInformation.getRevisionInformation().commitId;
System.out.print("Version: " + version);
System.out.println(
commitID.equals(EnvironmentInformation.UNKNOWN)
? ""
: ", Commit ID: " + commitID);
return 0;
default:
System.out.printf("\"%s\" is not a valid action.\n", action);
System.out.println();
System.out.println(
"Valid actions are \"run\", \"list\", \"info\", \"savepoint\", \"stop\", or \"cancel\".");
System.out.println();
System.out.println(
"Specify the version option (-v or --version) to print Flink version.");
System.out.println();
System.out.println(
"Specify the help option (-h or --help) to get help on the command.");
return 1;
}
} catch (CliArgsException ce) {
return handleArgException(ce);
} catch (ProgramParametrizationException ppe) {
return handleParametrizationException(ppe);
} catch (ProgramMissingJobException pmje) {
return handleMissingJobException();
} catch (Exception e) {
return handleError(e);
}
}
|
Parses the command line arguments and starts the requested action.
@param args command line arguments of the client.
@return The return code of the program
|
parseAndRun
|
java
|
apache/flink
|
flink-clients/src/main/java/org/apache/flink/client/cli/CliFrontend.java
|
https://github.com/apache/flink/blob/master/flink-clients/src/main/java/org/apache/flink/client/cli/CliFrontend.java
|
Apache-2.0
|
public CustomCommandLine validateAndGetActiveCommandLine(CommandLine commandLine) {
LOG.debug("Custom commandlines: {}", customCommandLines);
for (CustomCommandLine cli : customCommandLines) {
LOG.debug(
"Checking custom commandline {}, isActive: {}", cli, cli.isActive(commandLine));
if (cli.isActive(commandLine)) {
return cli;
}
}
throw new IllegalStateException("No valid command-line found.");
}
|
Gets the custom command-line for the arguments.
@param commandLine The input to the command-line.
@return custom command-line which is active (may only be one at a time)
|
validateAndGetActiveCommandLine
|
java
|
apache/flink
|
flink-clients/src/main/java/org/apache/flink/client/cli/CliFrontend.java
|
https://github.com/apache/flink/blob/master/flink-clients/src/main/java/org/apache/flink/client/cli/CliFrontend.java
|
Apache-2.0
|
private static CustomCommandLine loadCustomCommandLine(String className, Object... params)
throws Exception {
Class<? extends CustomCommandLine> customCliClass =
Class.forName(className).asSubclass(CustomCommandLine.class);
// construct class types from the parameters
Class<?>[] types = new Class<?>[params.length];
for (int i = 0; i < params.length; i++) {
checkNotNull(params[i], "Parameters for custom command-lines may not be null.");
types[i] = params[i].getClass();
}
Constructor<? extends CustomCommandLine> constructor = customCliClass.getConstructor(types);
return constructor.newInstance(params);
}
|
Loads a class from the classpath that implements the CustomCommandLine interface.
@param className The fully-qualified class name to load.
@param params The constructor parameters
|
loadCustomCommandLine
|
java
|
apache/flink
|
flink-clients/src/main/java/org/apache/flink/client/cli/CliFrontend.java
|
https://github.com/apache/flink/blob/master/flink-clients/src/main/java/org/apache/flink/client/cli/CliFrontend.java
|
Apache-2.0
|
private Duration getClientTimeout(Configuration effectiveConfiguration) {
return effectiveConfiguration.get(ClientOptions.CLIENT_TIMEOUT);
}
|
Get client timeout from command line via effective configuration.
@param effectiveConfiguration Flink effective configuration.
@return client timeout with Duration type
|
getClientTimeout
|
java
|
apache/flink
|
flink-clients/src/main/java/org/apache/flink/client/cli/CliFrontend.java
|
https://github.com/apache/flink/blob/master/flink-clients/src/main/java/org/apache/flink/client/cli/CliFrontend.java
|
Apache-2.0
|
private int getDefaultParallelism(Configuration effectiveConfiguration) {
return effectiveConfiguration.get(CoreOptions.DEFAULT_PARALLELISM);
}
|
Get default parallelism from command line via effective configuration.
@param effectiveConfiguration Flink effective configuration.
@return default parallelism.
|
getDefaultParallelism
|
java
|
apache/flink
|
flink-clients/src/main/java/org/apache/flink/client/cli/CliFrontend.java
|
https://github.com/apache/flink/blob/master/flink-clients/src/main/java/org/apache/flink/client/cli/CliFrontend.java
|
Apache-2.0
|
public static Options mergeOptions(@Nullable Options optionsA, @Nullable Options optionsB) {
final Options resultOptions = new Options();
if (optionsA != null) {
for (Option option : optionsA.getOptions()) {
resultOptions.addOption(option);
}
}
if (optionsB != null) {
for (Option option : optionsB.getOptions()) {
resultOptions.addOption(option);
}
}
return resultOptions;
}
|
Merges the given {@link Options} into a new Options object.
@param optionsA options to merge, can be null if none
@param optionsB options to merge, can be null if none
@return
|
mergeOptions
|
java
|
apache/flink
|
flink-clients/src/main/java/org/apache/flink/client/cli/CliFrontendParser.java
|
https://github.com/apache/flink/blob/master/flink-clients/src/main/java/org/apache/flink/client/cli/CliFrontendParser.java
|
Apache-2.0
|
public boolean isPrintHelp() {
return printHelp;
}
|
Base class for all options parsed from the command line. Contains options for printing help and
the JobManager address.
|
isPrintHelp
|
java
|
apache/flink
|
flink-clients/src/main/java/org/apache/flink/client/cli/CommandLineOptions.java
|
https://github.com/apache/flink/blob/master/flink-clients/src/main/java/org/apache/flink/client/cli/CommandLineOptions.java
|
Apache-2.0
|
default CommandLine parseCommandLineOptions(String[] args, boolean stopAtNonOptions)
throws CliArgsException {
final Options options = new Options();
addGeneralOptions(options);
addRunOptions(options);
return CliFrontendParser.parse(options, args, stopAtNonOptions);
}
|
Materializes the command line arguments in the given {@link CommandLine} to a {@link
Configuration} and returns it.
|
parseCommandLineOptions
|
java
|
apache/flink
|
flink-clients/src/main/java/org/apache/flink/client/cli/CustomCommandLine.java
|
https://github.com/apache/flink/blob/master/flink-clients/src/main/java/org/apache/flink/client/cli/CustomCommandLine.java
|
Apache-2.0
|
public static void encodeDynamicProperties(
final CommandLine commandLine, final Configuration effectiveConfiguration) {
final Properties properties = commandLine.getOptionProperties(DYNAMIC_PROPERTIES.getOpt());
properties
.stringPropertyNames()
.forEach(
key -> {
final String value = properties.getProperty(key);
if (value != null) {
effectiveConfiguration.setString(key, value);
} else {
effectiveConfiguration.setString(key, "true");
}
});
}
|
Parses dynamic properties from the given {@link CommandLine} and sets them on the {@link
Configuration}.
|
encodeDynamicProperties
|
java
|
apache/flink
|
flink-clients/src/main/java/org/apache/flink/client/cli/DynamicPropertiesUtil.java
|
https://github.com/apache/flink/blob/master/flink-clients/src/main/java/org/apache/flink/client/cli/DynamicPropertiesUtil.java
|
Apache-2.0
|
public static ExecutionConfigAccessor fromConfiguration(final Configuration configuration) {
return new ExecutionConfigAccessor(checkNotNull(configuration));
}
|
Creates an {@link ExecutionConfigAccessor} based on the provided {@link Configuration}.
|
fromConfiguration
|
java
|
apache/flink
|
flink-clients/src/main/java/org/apache/flink/client/cli/ExecutionConfigAccessor.java
|
https://github.com/apache/flink/blob/master/flink-clients/src/main/java/org/apache/flink/client/cli/ExecutionConfigAccessor.java
|
Apache-2.0
|
public static boolean isPythonEntryPoint(CommandLine line) {
return line.hasOption(PY_OPTION.getOpt())
|| line.hasOption(PYMODULE_OPTION.getOpt())
|| "org.apache.flink.client.python.PythonGatewayServer"
.equals(line.getOptionValue(CLASS_OPTION.getOpt()));
}
|
@return True if the commandline contains "-py" or "-pym" options or comes from PyFlink shell,
false otherwise.
|
isPythonEntryPoint
|
java
|
apache/flink
|
flink-clients/src/main/java/org/apache/flink/client/cli/ProgramOptionsUtils.java
|
https://github.com/apache/flink/blob/master/flink-clients/src/main/java/org/apache/flink/client/cli/ProgramOptionsUtils.java
|
Apache-2.0
|
@Override
public ClusterSpecification getClusterSpecification(Configuration configuration) {
checkNotNull(configuration);
final int jobManagerMemoryMB =
JobManagerProcessUtils.processSpecFromConfig(configuration)
.getTotalProcessMemorySize()
.getMebiBytes();
final int taskManagerMemoryMB =
TaskExecutorProcessUtils.processSpecFromConfig(configuration)
.getTotalProcessMemorySize()
.getMebiBytes();
int slotsPerTaskManager = configuration.get(TaskManagerOptions.NUM_TASK_SLOTS);
return new ClusterSpecification.ClusterSpecificationBuilder()
.setMasterMemoryMB(jobManagerMemoryMB)
.setTaskManagerMemoryMB(taskManagerMemoryMB)
.setSlotsPerTaskManager(slotsPerTaskManager)
.createClusterSpecification();
}
|
An abstract {@link ClusterClientFactory} containing some common implementations for different
containerized deployment clusters.
|
getClusterSpecification
|
java
|
apache/flink
|
flink-clients/src/main/java/org/apache/flink/client/deployment/AbstractContainerizedClusterClientFactory.java
|
https://github.com/apache/flink/blob/master/flink-clients/src/main/java/org/apache/flink/client/deployment/AbstractContainerizedClusterClientFactory.java
|
Apache-2.0
|
default Optional<String> getApplicationTargetName() {
return Optional.empty();
}
|
Returns the option to be used when trying to execute an application in Application Mode using
this cluster client factory, or an {@link Optional#empty()} if the environment of this
cluster client factory does not support Application Mode.
|
getApplicationTargetName
|
java
|
apache/flink
|
flink-clients/src/main/java/org/apache/flink/client/deployment/ClusterClientFactory.java
|
https://github.com/apache/flink/blob/master/flink-clients/src/main/java/org/apache/flink/client/deployment/ClusterClientFactory.java
|
Apache-2.0
|
private CompletableFuture<Void> runApplicationAsync(
final DispatcherGateway dispatcherGateway,
final ScheduledExecutor scheduledExecutor,
final boolean enforceSingleJobExecution,
final boolean submitFailedJobOnApplicationError) {
final CompletableFuture<List<JobID>> applicationExecutionFuture = new CompletableFuture<>();
final Set<JobID> tolerateMissingResult = Collections.synchronizedSet(new HashSet<>());
// we need to hand in a future as return value because we need to get those JobIs out
// from the scheduled task that executes the user program
applicationExecutionTask =
scheduledExecutor.schedule(
() ->
runApplicationEntryPoint(
applicationExecutionFuture,
tolerateMissingResult,
dispatcherGateway,
scheduledExecutor,
enforceSingleJobExecution,
submitFailedJobOnApplicationError),
0L,
TimeUnit.MILLISECONDS);
return applicationExecutionFuture.thenCompose(
jobIds ->
getApplicationResult(
dispatcherGateway,
jobIds,
tolerateMissingResult,
scheduledExecutor));
}
|
Runs the user program entrypoint by scheduling a task on the given {@code scheduledExecutor}.
The returned {@link CompletableFuture} completes when all jobs of the user application
succeeded. if any of them fails, or if job submission fails.
|
runApplicationAsync
|
java
|
apache/flink
|
flink-clients/src/main/java/org/apache/flink/client/deployment/application/ApplicationDispatcherBootstrap.java
|
https://github.com/apache/flink/blob/master/flink-clients/src/main/java/org/apache/flink/client/deployment/application/ApplicationDispatcherBootstrap.java
|
Apache-2.0
|
private CompletableFuture<JobResult> unwrapJobResultException(
final CompletableFuture<JobResult> jobResult) {
return jobResult.thenApply(
result -> {
if (result.isSuccess()) {
return result;
}
throw new CompletionException(
UnsuccessfulExecutionException.fromJobResult(
result, application.getUserCodeClassLoader()));
});
}
|
If the given {@link JobResult} indicates success, this passes through the {@link JobResult}.
Otherwise, this returns a future that is finished exceptionally (potentially with an
exception from the {@link JobResult}).
|
unwrapJobResultException
|
java
|
apache/flink
|
flink-clients/src/main/java/org/apache/flink/client/deployment/application/ApplicationDispatcherBootstrap.java
|
https://github.com/apache/flink/blob/master/flink-clients/src/main/java/org/apache/flink/client/deployment/application/ApplicationDispatcherBootstrap.java
|
Apache-2.0
|
public static FromClasspathEntryClassInformationProvider create(
String jobClassName, Iterable<URL> classpath) throws IOException, FlinkException {
Preconditions.checkNotNull(jobClassName, "No job class name passed.");
Preconditions.checkNotNull(classpath, "No classpath passed.");
return new FromClasspathEntryClassInformationProvider(jobClassName);
}
|
Creates a {@code FromClasspathEntryClassInformationProvider} based on the passed job class
and classpath.
@param jobClassName The job's class name.
@param classpath The classpath the job class should be part of.
@return The {@code FromClasspathEntryClassInformationProvider} instances collecting the
necessary information.
@throws IOException If some Jar listed on the classpath wasn't accessible.
@throws FlinkException If the passed job class is not present on the passed classpath.
|
create
|
java
|
apache/flink
|
flink-clients/src/main/java/org/apache/flink/client/deployment/application/FromClasspathEntryClassInformationProvider.java
|
https://github.com/apache/flink/blob/master/flink-clients/src/main/java/org/apache/flink/client/deployment/application/FromClasspathEntryClassInformationProvider.java
|
Apache-2.0
|
public static FromClasspathEntryClassInformationProvider createFromClasspath(
Iterable<URL> classpath) throws IOException, FlinkException {
return new FromClasspathEntryClassInformationProvider(
extractJobClassFromUrlClasspath(classpath));
}
|
Creates a {@code FromClasspathEntryClassInformationProvider} looking for the entry class
providing the main method on the passed classpath.
@param classpath The classpath the job class is expected to be part of.
@return The {@code FromClasspathEntryClassInformationProvider} providing the job class found
on the passed classpath.
@throws IOException If some Jar listed on the classpath wasn't accessible.
@throws FlinkException Either no or too many main methods were found on the classpath.
|
createFromClasspath
|
java
|
apache/flink
|
flink-clients/src/main/java/org/apache/flink/client/deployment/application/FromClasspathEntryClassInformationProvider.java
|
https://github.com/apache/flink/blob/master/flink-clients/src/main/java/org/apache/flink/client/deployment/application/FromClasspathEntryClassInformationProvider.java
|
Apache-2.0
|
public static FromClasspathEntryClassInformationProvider createFromSystemClasspath()
throws IOException, FlinkException {
return new FromClasspathEntryClassInformationProvider(extractJobClassFromSystemClasspath());
}
|
Creates a {@code FromClasspathEntryClassInformationProvider} looking for the entry class
providing the main method on the system classpath.
@return The {@code FromClasspathEntryClassInformationProvider} providing the job class found
on the system classpath.
@throws IOException If some Jar listed on the system classpath wasn't accessible.
@throws FlinkException Either no or too many main methods were found on the system classpath.
|
createFromSystemClasspath
|
java
|
apache/flink
|
flink-clients/src/main/java/org/apache/flink/client/deployment/application/FromClasspathEntryClassInformationProvider.java
|
https://github.com/apache/flink/blob/master/flink-clients/src/main/java/org/apache/flink/client/deployment/application/FromClasspathEntryClassInformationProvider.java
|
Apache-2.0
|
public static FromClasspathEntryClassInformationProvider
createWithJobClassAssumingOnSystemClasspath(String jobClassName) {
return new FromClasspathEntryClassInformationProvider(jobClassName);
}
|
Creates a {@code FromClasspathEntryClassInformationProvider} assuming that the passed job
class is available on the system classpath.
@param jobClassName The job class name working as the entry point.
@return The {@code FromClasspathEntryClassInformationProvider} providing the job class found.
|
createWithJobClassAssumingOnSystemClasspath
|
java
|
apache/flink
|
flink-clients/src/main/java/org/apache/flink/client/deployment/application/FromClasspathEntryClassInformationProvider.java
|
https://github.com/apache/flink/blob/master/flink-clients/src/main/java/org/apache/flink/client/deployment/application/FromClasspathEntryClassInformationProvider.java
|
Apache-2.0
|
@Override
public Optional<File> getJarFile() {
return Optional.empty();
}
|
Always returns an empty {@code Optional} because this implementation relies on the JAR
archive being available on either the user or the system classpath.
@return An empty {@code Optional}.
|
getJarFile
|
java
|
apache/flink
|
flink-clients/src/main/java/org/apache/flink/client/deployment/application/FromClasspathEntryClassInformationProvider.java
|
https://github.com/apache/flink/blob/master/flink-clients/src/main/java/org/apache/flink/client/deployment/application/FromClasspathEntryClassInformationProvider.java
|
Apache-2.0
|
public static FromJarEntryClassInformationProvider createFromCustomJar(
File jarFile, @Nullable String jobClassName) {
return new FromJarEntryClassInformationProvider(jarFile, jobClassName);
}
|
Creates a {@code FromJarEntryClassInformationProvider} for a custom Jar archive. At least the
{@code jarFile} or the {@code jobClassName} has to be set.
@param jarFile The Jar archive.
@param jobClassName The name of the job class.
@return The {@code FromJarEntryClassInformationProvider} referring to the passed information.
|
createFromCustomJar
|
java
|
apache/flink
|
flink-clients/src/main/java/org/apache/flink/client/deployment/application/FromJarEntryClassInformationProvider.java
|
https://github.com/apache/flink/blob/master/flink-clients/src/main/java/org/apache/flink/client/deployment/application/FromJarEntryClassInformationProvider.java
|
Apache-2.0
|
public static FromJarEntryClassInformationProvider createFromPythonJar() {
return new FromJarEntryClassInformationProvider(
new File(PackagedProgramUtils.getPythonJar().getPath()),
PackagedProgramUtils.getPythonDriverClassName());
}
|
Creates a {@code FromJarEntryClassInformationProvider} for a job implemented in Python.
@return A {@code FromJarEntryClassInformationProvider} for a job implemented in Python
|
createFromPythonJar
|
java
|
apache/flink
|
flink-clients/src/main/java/org/apache/flink/client/deployment/application/FromJarEntryClassInformationProvider.java
|
https://github.com/apache/flink/blob/master/flink-clients/src/main/java/org/apache/flink/client/deployment/application/FromJarEntryClassInformationProvider.java
|
Apache-2.0
|
@Override
public Optional<File> getJarFile() {
return Optional.of(jarFile);
}
|
Returns the specified {@code jarFile}.
@return The specified {@code jarFile}.
@see #getJobClassName()
|
getJarFile
|
java
|
apache/flink
|
flink-clients/src/main/java/org/apache/flink/client/deployment/application/FromJarEntryClassInformationProvider.java
|
https://github.com/apache/flink/blob/master/flink-clients/src/main/java/org/apache/flink/client/deployment/application/FromJarEntryClassInformationProvider.java
|
Apache-2.0
|
@Override
public Optional<String> getJobClassName() {
return Optional.ofNullable(jobClassName);
}
|
Returns the specified job class name that is either available in the corresponding {@code
jarFile}. It can return an empty {@code Optional} if the job class is the entry class of the
jar.
@return Returns the job class that can be found in the respective {@code jarFile}. It can
also return an empty {@code Optional} despite if the job class is the entry class of the
jar.
@see #getJarFile()
|
getJobClassName
|
java
|
apache/flink
|
flink-clients/src/main/java/org/apache/flink/client/deployment/application/FromJarEntryClassInformationProvider.java
|
https://github.com/apache/flink/blob/master/flink-clients/src/main/java/org/apache/flink/client/deployment/application/FromJarEntryClassInformationProvider.java
|
Apache-2.0
|
static JarFileWithEntryClass findOnlyEntryClass(Iterable<File> jarFiles) throws IOException {
List<JarFileWithEntryClass> jarsWithEntryClasses = new ArrayList<>();
for (File jarFile : jarFiles) {
findEntryClass(jarFile)
.ifPresent(
entryClass ->
jarsWithEntryClasses.add(
new JarFileWithEntryClass(jarFile, entryClass)));
}
int size = jarsWithEntryClasses.size();
if (size == 0) {
throw new NoSuchElementException("No JAR with manifest attribute for entry class");
}
if (size == 1) {
return jarsWithEntryClasses.get(0);
}
// else: size > 1
throw new IllegalArgumentException(
"Multiple JARs with manifest attribute for entry class: " + jarsWithEntryClasses);
}
|
Returns a JAR file with its entry class as specified in the manifest.
@param jarFiles JAR files to parse
@throws NoSuchElementException if no JAR file contains an entry class attribute
@throws IllegalArgumentException if multiple JAR files contain an entry class manifest
attribute
|
findOnlyEntryClass
|
java
|
apache/flink
|
flink-clients/src/main/java/org/apache/flink/client/deployment/application/JarManifestParser.java
|
https://github.com/apache/flink/blob/master/flink-clients/src/main/java/org/apache/flink/client/deployment/application/JarManifestParser.java
|
Apache-2.0
|
@VisibleForTesting
static Optional<String> findEntryClass(File jarFile) throws IOException {
return findFirstManifestAttribute(
jarFile,
PackagedProgram.MANIFEST_ATTRIBUTE_ASSEMBLER_CLASS,
PackagedProgram.MANIFEST_ATTRIBUTE_MAIN_CLASS);
}
|
Returns the entry class as specified in the manifest of the provided JAR file.
<p>The following manifest attributes are checked in order to find the entry class:
<ol>
<li>{@link PackagedProgram#MANIFEST_ATTRIBUTE_ASSEMBLER_CLASS}
<li>{@link PackagedProgram#MANIFEST_ATTRIBUTE_MAIN_CLASS}
</ol>
@param jarFile JAR file to parse
@return Optional holding entry class
@throws IOException If there is an error accessing the JAR
|
findEntryClass
|
java
|
apache/flink
|
flink-clients/src/main/java/org/apache/flink/client/deployment/application/JarManifestParser.java
|
https://github.com/apache/flink/blob/master/flink-clients/src/main/java/org/apache/flink/client/deployment/application/JarManifestParser.java
|
Apache-2.0
|
public static JobGraph getJobGraph(
@Nonnull final Pipeline pipeline,
@Nonnull final Configuration configuration,
@Nonnull ClassLoader userClassloader)
throws MalformedURLException {
checkNotNull(pipeline);
checkNotNull(configuration);
final ExecutionConfigAccessor executionConfigAccessor =
ExecutionConfigAccessor.fromConfiguration(configuration);
final JobGraph jobGraph =
FlinkPipelineTranslationUtil.getJobGraph(
userClassloader,
pipeline,
configuration,
executionConfigAccessor.getParallelism());
configuration
.getOptional(PipelineOptionsInternal.PIPELINE_FIXED_JOB_ID)
.ifPresent(strJobID -> jobGraph.setJobID(JobID.fromHexString(strJobID)));
if (configuration.get(DeploymentOptions.ATTACHED)
&& configuration.get(DeploymentOptions.SHUTDOWN_IF_ATTACHED)) {
jobGraph.setInitialClientHeartbeatTimeout(
configuration.get(ClientOptions.CLIENT_HEARTBEAT_TIMEOUT).toMillis());
}
jobGraph.addJars(executionConfigAccessor.getJars());
jobGraph.setClasspaths(executionConfigAccessor.getClasspaths());
jobGraph.setSavepointRestoreSettings(executionConfigAccessor.getSavepointRestoreSettings());
return jobGraph;
}
|
Creates the {@link JobGraph} corresponding to the provided {@link Pipeline}.
@param pipeline the pipeline whose job graph we are computing.
@param configuration the configuration with the necessary information such as jars and
classpaths to be included, the parallelism of the job and potential savepoint settings
used to bootstrap its state.
@param userClassloader the classloader which can load user classes.
@return the corresponding {@link JobGraph}.
|
getJobGraph
|
java
|
apache/flink
|
flink-clients/src/main/java/org/apache/flink/client/deployment/executors/PipelineExecutorUtils.java
|
https://github.com/apache/flink/blob/master/flink-clients/src/main/java/org/apache/flink/client/deployment/executors/PipelineExecutorUtils.java
|
Apache-2.0
|
default CompletableFuture<Map<String, Object>> getAccumulators(JobID jobID) {
return getAccumulators(jobID, ClassLoader.getSystemClassLoader());
}
|
Requests and returns the accumulators for the given job identifier. Accumulators can be
requested while a is running or after it has finished. The default class loader is used to
deserialize the incoming accumulator results.
@param jobID The job identifier of a job.
@return A Map containing the accumulator's name and its value.
|
getAccumulators
|
java
|
apache/flink
|
flink-clients/src/main/java/org/apache/flink/client/program/ClusterClient.java
|
https://github.com/apache/flink/blob/master/flink-clients/src/main/java/org/apache/flink/client/program/ClusterClient.java
|
Apache-2.0
|
default CompletableFuture<Set<AbstractID>> listCompletedClusterDatasetIds() {
return CompletableFuture.completedFuture(Collections.emptySet());
}
|
Return a set of ids of the completed cluster datasets.
@return A set of ids of the completely cached intermediate dataset.
|
listCompletedClusterDatasetIds
|
java
|
apache/flink
|
flink-clients/src/main/java/org/apache/flink/client/program/ClusterClient.java
|
https://github.com/apache/flink/blob/master/flink-clients/src/main/java/org/apache/flink/client/program/ClusterClient.java
|
Apache-2.0
|
default CompletableFuture<Void> invalidateClusterDataset(AbstractID clusterDatasetId) {
return CompletableFuture.completedFuture(null);
}
|
Invalidate the cached intermediate dataset with the given id.
@param clusterDatasetId id of the cluster dataset to be invalidated.
@return Future which will be completed when the cached dataset is invalidated.
|
invalidateClusterDataset
|
java
|
apache/flink
|
flink-clients/src/main/java/org/apache/flink/client/program/ClusterClient.java
|
https://github.com/apache/flink/blob/master/flink-clients/src/main/java/org/apache/flink/client/program/ClusterClient.java
|
Apache-2.0
|
default CompletableFuture<Void> reportHeartbeat(JobID jobId, long expiredTimestamp) {
return FutureUtils.completedVoidFuture();
}
|
The client reports the heartbeat to the dispatcher for aliveness.
@param jobId The jobId for the client and the job.
@return
|
reportHeartbeat
|
java
|
apache/flink
|
flink-clients/src/main/java/org/apache/flink/client/program/ClusterClient.java
|
https://github.com/apache/flink/blob/master/flink-clients/src/main/java/org/apache/flink/client/program/ClusterClient.java
|
Apache-2.0
|
private static String prettyFormat(Collection<String> errorMessages) {
StringBuilder builder =
new StringBuilder("Not allowed configuration change(s) were detected:");
for (String error : errorMessages) {
builder.append("\n - " + error);
}
return builder.toString();
}
|
Serial version UID for serialization interoperability.
|
prettyFormat
|
java
|
apache/flink
|
flink-clients/src/main/java/org/apache/flink/client/program/MutatedConfigurationException.java
|
https://github.com/apache/flink/blob/master/flink-clients/src/main/java/org/apache/flink/client/program/MutatedConfigurationException.java
|
Apache-2.0
|
@Nullable
public String getDescription() throws ProgramInvocationException {
if (ProgramDescription.class.isAssignableFrom(this.mainClass)) {
ProgramDescription descr;
try {
descr =
InstantiationUtil.instantiate(
this.mainClass.asSubclass(ProgramDescription.class),
ProgramDescription.class);
} catch (Throwable t) {
return null;
}
try {
return descr.getDescription();
} catch (Throwable t) {
throw new ProgramInvocationException(
"Error while getting the program description"
+ (t.getMessage() == null ? "." : ": " + t.getMessage()),
t);
}
} else {
return null;
}
}
|
Returns the description provided by the Program class. This may contain a description of the
plan itself and its arguments.
@return The description of the PactProgram's input parameters.
@throws ProgramInvocationException This invocation is thrown if the Program can't be properly
loaded. Causes may be a missing / wrong class or manifest files.
|
getDescription
|
java
|
apache/flink
|
flink-clients/src/main/java/org/apache/flink/client/program/PackagedProgram.java
|
https://github.com/apache/flink/blob/master/flink-clients/src/main/java/org/apache/flink/client/program/PackagedProgram.java
|
Apache-2.0
|
public void invokeInteractiveModeForExecution() throws ProgramInvocationException {
FlinkSecurityManager.monitorUserSystemExitForCurrentThread();
try {
callMainMethod(mainClass, args);
} finally {
FlinkSecurityManager.unmonitorUserSystemExitForCurrentThread();
}
}
|
This method assumes that the context environment is prepared, or the execution will be a
local execution by default.
|
invokeInteractiveModeForExecution
|
java
|
apache/flink
|
flink-clients/src/main/java/org/apache/flink/client/program/PackagedProgram.java
|
https://github.com/apache/flink/blob/master/flink-clients/src/main/java/org/apache/flink/client/program/PackagedProgram.java
|
Apache-2.0
|
public List<URL> getClasspaths() {
return this.classpaths;
}
|
Returns the classpaths that are required by the program.
@return List of {@link java.net.URL}s.
|
getClasspaths
|
java
|
apache/flink
|
flink-clients/src/main/java/org/apache/flink/client/program/PackagedProgram.java
|
https://github.com/apache/flink/blob/master/flink-clients/src/main/java/org/apache/flink/client/program/PackagedProgram.java
|
Apache-2.0
|
public ClassLoader getUserCodeClassLoader() {
return this.userCodeClassLoader;
}
|
Gets the {@link java.lang.ClassLoader} that must be used to load user code classes.
@return The user code ClassLoader.
|
getUserCodeClassLoader
|
java
|
apache/flink
|
flink-clients/src/main/java/org/apache/flink/client/program/PackagedProgram.java
|
https://github.com/apache/flink/blob/master/flink-clients/src/main/java/org/apache/flink/client/program/PackagedProgram.java
|
Apache-2.0
|
public static JobGraph createJobGraph(
PackagedProgram packagedProgram,
Configuration configuration,
int defaultParallelism,
@Nullable JobID jobID,
boolean suppressOutput)
throws ProgramInvocationException {
final Pipeline pipeline =
getPipelineFromProgram(
packagedProgram, configuration, defaultParallelism, suppressOutput);
final JobGraph jobGraph =
FlinkPipelineTranslationUtil.getJobGraphUnderUserClassLoader(
packagedProgram.getUserCodeClassLoader(),
pipeline,
configuration,
defaultParallelism);
if (jobID != null) {
jobGraph.setJobID(jobID);
}
jobGraph.addJars(packagedProgram.getJobJarAndDependencies());
jobGraph.setClasspaths(packagedProgram.getClasspaths());
jobGraph.setSavepointRestoreSettings(packagedProgram.getSavepointSettings());
return jobGraph;
}
|
Creates a {@link JobGraph} with a specified {@link JobID} from the given {@link
PackagedProgram}.
@param packagedProgram to extract the JobGraph from
@param configuration to use for the optimizer and job graph generator
@param defaultParallelism for the JobGraph
@param jobID the pre-generated job id
@return JobGraph extracted from the PackagedProgram
@throws ProgramInvocationException if the JobGraph generation failed
|
createJobGraph
|
java
|
apache/flink
|
flink-clients/src/main/java/org/apache/flink/client/program/PackagedProgramUtils.java
|
https://github.com/apache/flink/blob/master/flink-clients/src/main/java/org/apache/flink/client/program/PackagedProgramUtils.java
|
Apache-2.0
|
public Result fetchArtifacts(String[] uris) {
checkArgument(uris != null && uris.length > 0, "At least one URI is required.");
List<File> artifacts =
Arrays.stream(uris)
.map(FunctionUtils.uncheckedFunction(this::fetchArtifact))
.collect(Collectors.toList());
if (artifacts.size() > 1) {
return new Result(null, artifacts);
}
if (artifacts.size() == 1) {
return new Result(artifacts.get(0), null);
}
// Should not happen.
throw new IllegalStateException("Corrupt artifact fetching state.");
}
|
Fetches artifacts from a given URI string array. The job jar and any additional artifacts are
mixed, in case of multiple artifacts the {@link DefaultPackagedProgramRetriever} logic will
be used to find the job jar.
@param uris URIs to fetch
@return result with the fetched artifacts
|
fetchArtifacts
|
java
|
apache/flink
|
flink-clients/src/main/java/org/apache/flink/client/program/artifact/ArtifactFetchManager.java
|
https://github.com/apache/flink/blob/master/flink-clients/src/main/java/org/apache/flink/client/program/artifact/ArtifactFetchManager.java
|
Apache-2.0
|
public static synchronized void createMissingParents(File baseDir) {
checkNotNull(baseDir, "Base dir has to be provided.");
if (!baseDir.exists()) {
try {
FileUtils.forceMkdirParent(baseDir);
LOG.info("Created parents for base dir: {}", baseDir);
} catch (Exception e) {
throw new FlinkRuntimeException(
String.format("Failed to create parent(s) for given base dir: %s", baseDir),
e);
}
}
}
|
Creates missing parent directories for the given {@link File} if there are any. Does nothing
otherwise.
@param baseDir base dir to create parents for
|
createMissingParents
|
java
|
apache/flink
|
flink-clients/src/main/java/org/apache/flink/client/program/artifact/ArtifactUtils.java
|
https://github.com/apache/flink/blob/master/flink-clients/src/main/java/org/apache/flink/client/program/artifact/ArtifactUtils.java
|
Apache-2.0
|
@Override
File fetch(String uri, Configuration flinkConf, File targetDir) throws Exception {
ArtifactUtils.createMissingParents(targetDir);
Path source = new Path(uri);
long start = System.currentTimeMillis();
FileSystem fileSystem = source.getFileSystem();
String fileName = source.getName();
File targetFile = new File(targetDir, fileName);
try (FSDataInputStream inputStream = fileSystem.open(source)) {
FileUtils.copyToFile(inputStream, targetFile);
}
LOG.debug(
"Copied file from {} to {}, cost {} ms",
source,
targetFile,
System.currentTimeMillis() - start);
return targetFile;
}
|
Copies artifact via the Flink filesystem plugin.
|
fetch
|
java
|
apache/flink
|
flink-clients/src/main/java/org/apache/flink/client/program/artifact/FsArtifactFetcher.java
|
https://github.com/apache/flink/blob/master/flink-clients/src/main/java/org/apache/flink/client/program/artifact/FsArtifactFetcher.java
|
Apache-2.0
|
@Override
File fetch(String uri, Configuration flinkConf, File targetDir) throws IOException {
ArtifactUtils.createMissingParents(targetDir);
long start = System.currentTimeMillis();
URL url = new URL(uri);
HttpURLConnection conn = (HttpURLConnection) url.openConnection();
Map<String, String> headers = flinkConf.get(ArtifactFetchOptions.HTTP_HEADERS);
if (headers != null) {
headers.forEach(conn::setRequestProperty);
}
conn.setRequestMethod("GET");
String fileName = FilenameUtils.getName(url.getPath());
File targetFile = new File(targetDir, fileName);
try (InputStream inputStream = conn.getInputStream()) {
FileUtils.copyToFile(inputStream, targetFile);
}
LOG.debug(
"Copied file from {} to {}, cost {} ms",
uri,
targetFile,
System.currentTimeMillis() - start);
return targetFile;
}
|
Downloads artifact from an HTTP resource.
|
fetch
|
java
|
apache/flink
|
flink-clients/src/main/java/org/apache/flink/client/program/artifact/HttpArtifactFetcher.java
|
https://github.com/apache/flink/blob/master/flink-clients/src/main/java/org/apache/flink/client/program/artifact/HttpArtifactFetcher.java
|
Apache-2.0
|
@Override
File fetch(String uri, Configuration flinkConf, File targetDir) throws Exception {
URI resolvedUri = PackagedProgramUtils.resolveURI(uri);
File targetFile = new File(resolvedUri.getPath());
LOG.debug("Retrieved local file from {} as {}", uri, targetFile);
return targetFile;
}
|
Retrieves a local artifact as a valid {@link File}.
|
fetch
|
java
|
apache/flink
|
flink-clients/src/main/java/org/apache/flink/client/program/artifact/LocalArtifactFetcher.java
|
https://github.com/apache/flink/blob/master/flink-clients/src/main/java/org/apache/flink/client/program/artifact/LocalArtifactFetcher.java
|
Apache-2.0
|
@Override
public long sleepTime(final long attempt) {
checkArgument(attempt >= 0, "attempt must not be negative (%s)", attempt);
final long exponentialSleepTime = initialWait * Math.round(Math.pow(2, attempt));
return exponentialSleepTime >= 0 && exponentialSleepTime < maxWait
? exponentialSleepTime
: maxWait;
}
|
{@link WaitStrategy} with exponentially increasing sleep time.
|
sleepTime
|
java
|
apache/flink
|
flink-clients/src/main/java/org/apache/flink/client/program/rest/retry/ExponentialWaitStrategy.java
|
https://github.com/apache/flink/blob/master/flink-clients/src/main/java/org/apache/flink/client/program/rest/retry/ExponentialWaitStrategy.java
|
Apache-2.0
|
@Test
void testWaitUntilJobInitializationFinished_throwsInitializationException() {
Iterator<JobStatus> statusSequenceIterator =
Arrays.asList(JobStatus.INITIALIZING, JobStatus.INITIALIZING, JobStatus.FAILED)
.iterator();
assertThatThrownBy(
() ->
ClientUtils.waitUntilJobInitializationFinished(
statusSequenceIterator::next,
() -> {
Throwable throwable =
new JobInitializationException(
TESTING_JOB_ID,
"Something is wrong",
new RuntimeException("Err"));
return buildJobResult(throwable);
},
ClassLoader.getSystemClassLoader()))
.isInstanceOf(JobInitializationException.class)
.hasMessage("Something is wrong");
}
|
Ensure that the waitUntilJobInitializationFinished() method throws
JobInitializationException.
|
testWaitUntilJobInitializationFinished_throwsInitializationException
|
java
|
apache/flink
|
flink-clients/src/test/java/org/apache/flink/client/ClientUtilsTest.java
|
https://github.com/apache/flink/blob/master/flink-clients/src/test/java/org/apache/flink/client/ClientUtilsTest.java
|
Apache-2.0
|
@Test
void testWaitUntilJobInitializationFinished_doesNotThrowRuntimeException() throws Exception {
Iterator<JobStatus> statusSequenceIterator =
Arrays.asList(JobStatus.INITIALIZING, JobStatus.INITIALIZING, JobStatus.FAILED)
.iterator();
ClientUtils.waitUntilJobInitializationFinished(
statusSequenceIterator::next,
() -> buildJobResult(new RuntimeException("Err")),
ClassLoader.getSystemClassLoader());
}
|
Ensure that waitUntilJobInitializationFinished() does not throw non-initialization
exceptions.
|
testWaitUntilJobInitializationFinished_doesNotThrowRuntimeException
|
java
|
apache/flink
|
flink-clients/src/test/java/org/apache/flink/client/ClientUtilsTest.java
|
https://github.com/apache/flink/blob/master/flink-clients/src/test/java/org/apache/flink/client/ClientUtilsTest.java
|
Apache-2.0
|
@Test
void shouldSubmitToJobClient() {
final ClusterClient<?> clusterClient =
new MiniClusterClient(new Configuration(), MINI_CLUSTER_RESOURCE.getMiniCluster());
JobGraph jobGraph = streamGraph.getJobGraph();
jobGraph.addJars(Collections.emptyList());
jobGraph.setClasspaths(Collections.emptyList());
assertThatFuture(clusterClient.submitJob(jobGraph)).eventuallySucceeds().isNotNull();
}
|
This test verifies correct job submission messaging logic and plan translation calls.
|
shouldSubmitToJobClient
|
java
|
apache/flink
|
flink-clients/src/test/java/org/apache/flink/client/program/ClientTest.java
|
https://github.com/apache/flink/blob/master/flink-clients/src/test/java/org/apache/flink/client/program/ClientTest.java
|
Apache-2.0
|
public static void main(String[] args) throws Exception {
final StreamExecutionEnvironment env =
StreamExecutionEnvironment.getExecutionEnvironment();
env.fromData(1, 2).sinkTo(new DiscardingSink<>());
JobExecutionResult result = env.execute();
result.getAccumulatorResult("dummy");
}
|
Test job that uses an eager sink.
|
main
|
java
|
apache/flink
|
flink-clients/src/test/java/org/apache/flink/client/program/ClientTest.java
|
https://github.com/apache/flink/blob/master/flink-clients/src/test/java/org/apache/flink/client/program/ClientTest.java
|
Apache-2.0
|
public static void main(String[] args) throws Exception {
final StreamExecutionEnvironment env =
StreamExecutionEnvironment.getExecutionEnvironment();
for (int i = 0; i < 2; i++) {
env.fromData(1, 2).sinkTo(new DiscardingSink<>());
JobClient jc = env.executeAsync();
jc.getJobExecutionResult();
}
}
|
Test job with multiple execute() calls.
|
main
|
java
|
apache/flink
|
flink-clients/src/test/java/org/apache/flink/client/program/ClientTest.java
|
https://github.com/apache/flink/blob/master/flink-clients/src/test/java/org/apache/flink/client/program/ClientTest.java
|
Apache-2.0
|
public static void main(String[] args) throws Exception {
final StreamExecutionEnvironment env =
StreamExecutionEnvironment.getExecutionEnvironment();
env.fromData(1, 2).sinkTo(new DiscardingSink<>());
env.execute().getNetRuntime();
}
|
Test job that retrieves the net runtime from the {@link JobExecutionResult}.
|
main
|
java
|
apache/flink
|
flink-clients/src/test/java/org/apache/flink/client/program/ClientTest.java
|
https://github.com/apache/flink/blob/master/flink-clients/src/test/java/org/apache/flink/client/program/ClientTest.java
|
Apache-2.0
|
public static void main(String[] args) throws Exception {
final StreamExecutionEnvironment env =
StreamExecutionEnvironment.getExecutionEnvironment();
env.fromData(1, 2).sinkTo(new DiscardingSink<>());
env.execute().getJobID();
}
|
Test job that retrieves the job ID from the {@link JobExecutionResult}.
|
main
|
java
|
apache/flink
|
flink-clients/src/test/java/org/apache/flink/client/program/ClientTest.java
|
https://github.com/apache/flink/blob/master/flink-clients/src/test/java/org/apache/flink/client/program/ClientTest.java
|
Apache-2.0
|
public static void main(String[] args) throws Exception {
final StreamExecutionEnvironment env =
StreamExecutionEnvironment.getExecutionEnvironment();
env.fromData(1, 2).sinkTo(new DiscardingSink<>());
env.execute().getAccumulatorResult(ACCUMULATOR_NAME);
}
|
Test job that retrieves an accumulator from the {@link JobExecutionResult}.
|
main
|
java
|
apache/flink
|
flink-clients/src/test/java/org/apache/flink/client/program/ClientTest.java
|
https://github.com/apache/flink/blob/master/flink-clients/src/test/java/org/apache/flink/client/program/ClientTest.java
|
Apache-2.0
|
@Parameters(name = "testParameter-{0}")
public static Collection<TestParameter> parameters() {
return Arrays.asList(
TestParameter.of(
DataStreamTestProgram.class,
pipeline -> ((StreamGraph) pipeline).getExecutionConfig()));
}
|
Tests for {@link PackagedProgramUtils} methods that should be executed for {@link
StreamExecutionEnvironment} and {@link Environment}.
|
parameters
|
java
|
apache/flink
|
flink-clients/src/test/java/org/apache/flink/client/program/PackagedProgramUtilsPipelineTest.java
|
https://github.com/apache/flink/blob/master/flink-clients/src/test/java/org/apache/flink/client/program/PackagedProgramUtilsPipelineTest.java
|
Apache-2.0
|
public static void main(String[] args) throws Exception {
StreamExecutionEnvironment env = StreamExecutionEnvironment.getExecutionEnvironment();
env.fromData("hello").print();
env.execute();
}
|
Test Program for the DataStream API.
|
main
|
java
|
apache/flink
|
flink-clients/src/test/java/org/apache/flink/client/program/PackagedProgramUtilsPipelineTest.java
|
https://github.com/apache/flink/blob/master/flink-clients/src/test/java/org/apache/flink/client/program/PackagedProgramUtilsPipelineTest.java
|
Apache-2.0
|
@Test
void testResolveURI() throws URISyntaxException {
final String relativeFile = "path/of/user.jar";
assertThat(resolveURI(relativeFile))
.hasScheme("file")
.hasPath(new File(System.getProperty("user.dir"), relativeFile).getAbsolutePath());
final String absoluteFile = "/path/of/user.jar";
assertThat(resolveURI(absoluteFile)).hasScheme("file").hasPath(absoluteFile);
final String fileSchemaFile = "file:///path/of/user.jar";
assertThat(resolveURI(fileSchemaFile).getScheme()).isEqualTo("file");
assertThat(resolveURI(fileSchemaFile)).hasToString(fileSchemaFile);
final String localSchemaFile = "local:///path/of/user.jar";
assertThat(resolveURI(localSchemaFile).getScheme()).isEqualTo("local");
assertThat(resolveURI(localSchemaFile)).hasToString(localSchemaFile);
}
|
Tests {@link PackagedProgramUtils}.
<p>See also {@link PackagedProgramUtilsPipelineTest} for tests that need to test behaviour of
{@link DataStream} programs.
|
testResolveURI
|
java
|
apache/flink
|
flink-clients/src/test/java/org/apache/flink/client/program/PackagedProgramUtilsTest.java
|
https://github.com/apache/flink/blob/master/flink-clients/src/test/java/org/apache/flink/client/program/PackagedProgramUtilsTest.java
|
Apache-2.0
|
public static void main(String[] args) throws ClassNotFoundException {
ClassLoader cl = Thread.currentThread().getContextClassLoader();
Class.forName(EXTERNAL_CLASS, false, cl);
}
|
Simulate a class that requires an external dependency.
|
main
|
java
|
apache/flink
|
flink-clients/src/test/java/org/apache/flink/client/testjar/JobWithExternalDependency.java
|
https://github.com/apache/flink/blob/master/flink-clients/src/test/java/org/apache/flink/client/testjar/JobWithExternalDependency.java
|
Apache-2.0
|
public static void main(String[] args) throws Exception {
final StreamExecutionEnvironment env = StreamExecutionEnvironment.getExecutionEnvironment();
final DataStreamSource<Integer> source = env.fromData(1, 2, 3, 4);
final SingleOutputStreamOperator<Integer> mapper = source.map(element -> 2 * element);
mapper.sinkTo(new DiscardingSink<>());
ParameterTool parameterTool = ParameterTool.fromArgs(args);
env.execute(TestJob.class.getCanonicalName() + "-" + parameterTool.getRequired("arg"));
}
|
Test job which is used for {@link JarManifestParserTest}.
|
main
|
java
|
apache/flink
|
flink-clients/src/test/java/org/apache/flink/client/testjar/TestJob.java
|
https://github.com/apache/flink/blob/master/flink-clients/src/test/java/org/apache/flink/client/testjar/TestJob.java
|
Apache-2.0
|
public static File getTestJobJar() throws FileNotFoundException {
// Check the module's pom.xml for how we create the JAR
File f = new File("target/maven-test-jar.jar");
if (!f.exists()) {
throw new FileNotFoundException(
"Test jar not present. Invoke tests using Maven "
+ "or build the jar using 'mvn process-test-classes' in flink-clients");
}
return f;
}
|
Returns the test jar including {@link TestJob} (see pom.xml and assembly/test-assembly.xml).
@return Test jar file
@throws FileNotFoundException If test-jar can not be found
|
getTestJobJar
|
java
|
apache/flink
|
flink-clients/src/test/java/org/apache/flink/client/testjar/TestJob.java
|
https://github.com/apache/flink/blob/master/flink-clients/src/test/java/org/apache/flink/client/testjar/TestJob.java
|
Apache-2.0
|
public ConcreteBuilderT setMaxBatchSizeInBytes(long maxBatchSizeInBytes) {
this.maxBatchSizeInBytes = maxBatchSizeInBytes;
return (ConcreteBuilderT) this;
}
|
@param maxBatchSizeInBytes a flush will be attempted if the most recent call to write
introduces an element to the buffer such that the total size of the buffer is greater
than or equal to this threshold value. If this happens, the maximum number of elements
from the head of the buffer will be selected, that is smaller than {@code
maxBatchSizeInBytes} in size will be flushed.
@return {@link ConcreteBuilderT} itself
|
setMaxBatchSizeInBytes
|
java
|
apache/flink
|
flink-connectors/flink-connector-base/src/main/java/org/apache/flink/connector/base/sink/AsyncSinkBaseBuilder.java
|
https://github.com/apache/flink/blob/master/flink-connectors/flink-connector-base/src/main/java/org/apache/flink/connector/base/sink/AsyncSinkBaseBuilder.java
|
Apache-2.0
|
public ConcreteBuilderT setMaxRecordSizeInBytes(long maxRecordSizeInBytes) {
this.maxRecordSizeInBytes = maxRecordSizeInBytes;
return (ConcreteBuilderT) this;
}
|
@param maxRecordSizeInBytes the maximum size of each records in bytes. If a record larger
than this is passed to the sink, it will throw an {@code IllegalArgumentException}.
@return {@link ConcreteBuilderT} itself
|
setMaxRecordSizeInBytes
|
java
|
apache/flink
|
flink-connectors/flink-connector-base/src/main/java/org/apache/flink/connector/base/sink/AsyncSinkBaseBuilder.java
|
https://github.com/apache/flink/blob/master/flink-connectors/flink-connector-base/src/main/java/org/apache/flink/connector/base/sink/AsyncSinkBaseBuilder.java
|
Apache-2.0
|
public static FatalExceptionClassifier getInterruptedExceptionClassifier() {
return FatalExceptionClassifier.withRootCauseOfType(
InterruptedException.class,
err -> new FlinkException("Thread was interrupted", err));
}
|
Common retry exception classifiers needed for common errors.
|
getInterruptedExceptionClassifier
|
java
|
apache/flink
|
flink-connectors/flink-connector-base/src/main/java/org/apache/flink/connector/base/sink/writer/AsyncSinkFatalExceptionClassifiers.java
|
https://github.com/apache/flink/blob/master/flink-connectors/flink-connector-base/src/main/java/org/apache/flink/connector/base/sink/writer/AsyncSinkFatalExceptionClassifiers.java
|
Apache-2.0
|
public List<RequestEntryT> getBatchEntries() {
return batchEntries;
}
|
Returns the list of request entries in this batch.
@return a list of request entries for the batch
|
getBatchEntries
|
java
|
apache/flink
|
flink-connectors/flink-connector-base/src/main/java/org/apache/flink/connector/base/sink/writer/Batch.java
|
https://github.com/apache/flink/blob/master/flink-connectors/flink-connector-base/src/main/java/org/apache/flink/connector/base/sink/writer/Batch.java
|
Apache-2.0
|
public long getSizeInBytes() {
return sizeInBytes;
}
|
Returns the total size in bytes of the batch.
@return the batch's cumulative byte size
|
getSizeInBytes
|
java
|
apache/flink
|
flink-connectors/flink-connector-base/src/main/java/org/apache/flink/connector/base/sink/writer/Batch.java
|
https://github.com/apache/flink/blob/master/flink-connectors/flink-connector-base/src/main/java/org/apache/flink/connector/base/sink/writer/Batch.java
|
Apache-2.0
|
public int getRecordCount() {
return recordCount;
}
|
Returns the total number of entries in the batch.
@return the record count in the batch
|
getRecordCount
|
java
|
apache/flink
|
flink-connectors/flink-connector-base/src/main/java/org/apache/flink/connector/base/sink/writer/Batch.java
|
https://github.com/apache/flink/blob/master/flink-connectors/flink-connector-base/src/main/java/org/apache/flink/connector/base/sink/writer/Batch.java
|
Apache-2.0
|
@Override
public void add(RequestEntryWrapper<RequestEntryT> entry, boolean prioritize) {
if (prioritize) {
buffer.addFirst(entry);
} else {
buffer.add(entry);
}
totalSizeInBytes += entry.getSize();
}
|
Adds a request entry to the buffer. If {@code prioritize} is true, the entry is inserted at
the front (for retries). Otherwise, it is added at the end following FIFO order.
@param entry The request entry to add.
@param prioritize If true, insert at the front; otherwise, add at the end.
|
add
|
java
|
apache/flink
|
flink-connectors/flink-connector-base/src/main/java/org/apache/flink/connector/base/sink/writer/DequeRequestBuffer.java
|
https://github.com/apache/flink/blob/master/flink-connectors/flink-connector-base/src/main/java/org/apache/flink/connector/base/sink/writer/DequeRequestBuffer.java
|
Apache-2.0
|
default void open(WriterInitContext context) {
// No-op default implementation
}
|
This interface specifies the mapping between elements of a stream to request entries that can be
sent to the destination. The mapping is provided by the end-user of a sink, not the sink creator.
<p>The request entries contain all relevant information required to create and sent the actual
request. Eg, for Kinesis Data Streams, the request entry includes the payload and the partition
key.
|
open
|
java
|
apache/flink
|
flink-connectors/flink-connector-base/src/main/java/org/apache/flink/connector/base/sink/writer/ElementConverter.java
|
https://github.com/apache/flink/blob/master/flink-connectors/flink-connector-base/src/main/java/org/apache/flink/connector/base/sink/writer/ElementConverter.java
|
Apache-2.0
|
@Override
public Batch<RequestEntryT> createNextBatch(
RequestInfo requestInfo, RequestBuffer<RequestEntryT> bufferedRequestEntries) {
List<RequestEntryT> batch = new ArrayList<>(requestInfo.getBatchSize());
long batchSizeBytes = 0L;
for (int i = 0; i < requestInfo.getBatchSize() && !bufferedRequestEntries.isEmpty(); i++) {
RequestEntryWrapper<RequestEntryT> peekedEntry = bufferedRequestEntries.peek();
long requestEntrySize = peekedEntry.getSize();
if (batchSizeBytes + requestEntrySize > maxBatchSizeInBytes) {
break; // Stop if adding the next entry exceeds the byte limit
}
RequestEntryWrapper<RequestEntryT> elem = bufferedRequestEntries.poll();
batch.add(elem.getRequestEntry());
batchSizeBytes += requestEntrySize;
}
return new Batch<>(batch, batchSizeBytes);
}
|
Creates the next batch of request entries based on the provided {@code requestInfo} and the
currently buffered entries.
|
createNextBatch
|
java
|
apache/flink
|
flink-connectors/flink-connector-base/src/main/java/org/apache/flink/connector/base/sink/writer/SimpleBatchCreator.java
|
https://github.com/apache/flink/blob/master/flink-connectors/flink-connector-base/src/main/java/org/apache/flink/connector/base/sink/writer/SimpleBatchCreator.java
|
Apache-2.0
|
public int getBatchSize() {
return batchSize;
}
|
Dataclass to encapsulate information about starting requests.
|
getBatchSize
|
java
|
apache/flink
|
flink-connectors/flink-connector-base/src/main/java/org/apache/flink/connector/base/sink/writer/strategy/BasicRequestInfo.java
|
https://github.com/apache/flink/blob/master/flink-connectors/flink-connector-base/src/main/java/org/apache/flink/connector/base/sink/writer/strategy/BasicRequestInfo.java
|
Apache-2.0
|
static SourceListEntry of(SourceFactory configurer, Boundedness boundedness) {
return new SourceListEntry(configurer, boundedness);
}
|
Entry for list of underlying sources.
|
of
|
java
|
apache/flink
|
flink-connectors/flink-connector-base/src/main/java/org/apache/flink/connector/base/source/hybrid/HybridSource.java
|
https://github.com/apache/flink/blob/master/flink-connectors/flink-connector-base/src/main/java/org/apache/flink/connector/base/source/hybrid/HybridSource.java
|
Apache-2.0
|
public <ToEnumT extends SplitEnumerator, NextSourceT extends Source<T, ?, ?>>
HybridSourceBuilder<T, ToEnumT> addSource(NextSourceT source) {
return addSource(new PassthroughSourceFactory<>(source), source.getBoundedness());
}
|
Add pre-configured source (without switch time modification).
|
addSource
|
java
|
apache/flink
|
flink-connectors/flink-connector-base/src/main/java/org/apache/flink/connector/base/source/hybrid/HybridSource.java
|
https://github.com/apache/flink/blob/master/flink-connectors/flink-connector-base/src/main/java/org/apache/flink/connector/base/source/hybrid/HybridSource.java
|
Apache-2.0
|
public <ToEnumT extends SplitEnumerator, NextSourceT extends Source<T, ?, ?>>
HybridSourceBuilder<T, ToEnumT> addSource(
SourceFactory<T, NextSourceT, ? super EnumT> sourceFactory,
Boundedness boundedness) {
if (!sources.isEmpty()) {
Preconditions.checkArgument(
Boundedness.BOUNDED.equals(sources.get(sources.size() - 1).boundedness),
"All sources except the final source need to be bounded.");
}
ClosureCleaner.clean(
sourceFactory, ExecutionConfig.ClosureCleanerLevel.RECURSIVE, true);
sources.add(SourceListEntry.of(sourceFactory, boundedness));
return (HybridSourceBuilder) this;
}
|
Add source with deferred instantiation based on previous enumerator.
|
addSource
|
java
|
apache/flink
|
flink-connectors/flink-connector-base/src/main/java/org/apache/flink/connector/base/source/hybrid/HybridSource.java
|
https://github.com/apache/flink/blob/master/flink-connectors/flink-connector-base/src/main/java/org/apache/flink/connector/base/source/hybrid/HybridSource.java
|
Apache-2.0
|
public void add(String splitId, E record) {
recordsBySplits.computeIfAbsent(splitId, sid -> new ArrayList<>()).add(record);
}
|
Add the record from the given split ID.
@param splitId the split ID the record was from.
@param record the record to add.
|
add
|
java
|
apache/flink
|
flink-connectors/flink-connector-base/src/main/java/org/apache/flink/connector/base/source/reader/RecordsBySplits.java
|
https://github.com/apache/flink/blob/master/flink-connectors/flink-connector-base/src/main/java/org/apache/flink/connector/base/source/reader/RecordsBySplits.java
|
Apache-2.0
|
public void add(SourceSplit split, E record) {
add(split.splitId(), record);
}
|
Add the record from the given source split.
@param split the source split the record was from.
@param record the record to add.
|
add
|
java
|
apache/flink
|
flink-connectors/flink-connector-base/src/main/java/org/apache/flink/connector/base/source/reader/RecordsBySplits.java
|
https://github.com/apache/flink/blob/master/flink-connectors/flink-connector-base/src/main/java/org/apache/flink/connector/base/source/reader/RecordsBySplits.java
|
Apache-2.0
|
public void addAll(String splitId, Collection<E> records) {
this.recordsBySplits.compute(
splitId,
(id, r) -> {
if (r == null) {
r = records;
} else {
r.addAll(records);
}
return r;
});
}
|
Add multiple records from the given split ID.
@param splitId the split ID given the records were from.
@param records the records to add.
|
addAll
|
java
|
apache/flink
|
flink-connectors/flink-connector-base/src/main/java/org/apache/flink/connector/base/source/reader/RecordsBySplits.java
|
https://github.com/apache/flink/blob/master/flink-connectors/flink-connector-base/src/main/java/org/apache/flink/connector/base/source/reader/RecordsBySplits.java
|
Apache-2.0
|
public void addAll(SourceSplit split, Collection<E> records) {
addAll(split.splitId(), records);
}
|
Add multiple records from the given source split.
@param split the source split the records were from.
@param records the records to add.
|
addAll
|
java
|
apache/flink
|
flink-connectors/flink-connector-base/src/main/java/org/apache/flink/connector/base/source/reader/RecordsBySplits.java
|
https://github.com/apache/flink/blob/master/flink-connectors/flink-connector-base/src/main/java/org/apache/flink/connector/base/source/reader/RecordsBySplits.java
|
Apache-2.0
|
public RecordsBySplits<E> build() {
return new RecordsBySplits<>(
recordsBySplits.isEmpty() ? Collections.emptyMap() : recordsBySplits,
finishedSplits.isEmpty() ? Collections.emptySet() : finishedSplits);
}
|
Mark multiple splits with the given IDs as finished.
@param splitIds the IDs of the finished splits.
|
build
|
java
|
apache/flink
|
flink-connectors/flink-connector-base/src/main/java/org/apache/flink/connector/base/source/reader/RecordsBySplits.java
|
https://github.com/apache/flink/blob/master/flink-connectors/flink-connector-base/src/main/java/org/apache/flink/connector/base/source/reader/RecordsBySplits.java
|
Apache-2.0
|
public int getNumberOfCurrentlyAssignedSplits() {
return splitStates.size();
}
|
Gets the number of splits the reads has currently assigned.
<p>These are the splits that have been added via {@link #addSplits(List)} and have not yet
been finished by returning them from the {@link SplitReader#fetch()} as part of {@link
RecordsWithSplitIds#finishedSplits()}.
|
getNumberOfCurrentlyAssignedSplits
|
java
|
apache/flink
|
flink-connectors/flink-connector-base/src/main/java/org/apache/flink/connector/base/source/reader/SourceReaderBase.java
|
https://github.com/apache/flink/blob/master/flink-connectors/flink-connector-base/src/main/java/org/apache/flink/connector/base/source/reader/SourceReaderBase.java
|
Apache-2.0
|
private boolean isEndOfStreamReached(T record) {
if (isStreamEnd) {
return true;
}
if (eofRecordHandler.apply(record)) {
isStreamEnd = true;
}
return isStreamEnd;
}
|
Judge and handle the eof record.
@return whether the record is the eof record.
|
isEndOfStreamReached
|
java
|
apache/flink
|
flink-connectors/flink-connector-base/src/main/java/org/apache/flink/connector/base/source/reader/SourceReaderBase.java
|
https://github.com/apache/flink/blob/master/flink-connectors/flink-connector-base/src/main/java/org/apache/flink/connector/base/source/reader/SourceReaderBase.java
|
Apache-2.0
|
public void addSplits(List<SplitT> splitsToAdd) {
lock.lock();
try {
enqueueTaskUnsafe(new AddSplitsTask<>(splitReader, splitsToAdd, assignedSplits));
wakeUpUnsafe(true);
} finally {
lock.unlock();
}
}
|
Add splits to the split fetcher. This operation is asynchronous.
@param splitsToAdd the splits to add.
|
addSplits
|
java
|
apache/flink
|
flink-connectors/flink-connector-base/src/main/java/org/apache/flink/connector/base/source/reader/fetcher/SplitFetcher.java
|
https://github.com/apache/flink/blob/master/flink-connectors/flink-connector-base/src/main/java/org/apache/flink/connector/base/source/reader/fetcher/SplitFetcher.java
|
Apache-2.0
|
public void removeSplits(List<SplitT> splitsToRemove) {
lock.lock();
try {
enqueueTaskUnsafe(
new RemoveSplitsTask<>(
splitReader, splitsToRemove, assignedSplits, splitFinishedHook));
wakeUpUnsafe(true);
} finally {
lock.unlock();
}
}
|
Notice the split fetcher that some splits finished. This operation is asynchronous.
@param splitsToRemove the splits need to be removed.
|
removeSplits
|
java
|
apache/flink
|
flink-connectors/flink-connector-base/src/main/java/org/apache/flink/connector/base/source/reader/fetcher/SplitFetcher.java
|
https://github.com/apache/flink/blob/master/flink-connectors/flink-connector-base/src/main/java/org/apache/flink/connector/base/source/reader/fetcher/SplitFetcher.java
|
Apache-2.0
|
Map<String, SplitT> assignedSplits() {
return assignedSplits;
}
|
Package private for unit test.
@return the assigned splits.
|
assignedSplits
|
java
|
apache/flink
|
flink-connectors/flink-connector-base/src/main/java/org/apache/flink/connector/base/source/reader/fetcher/SplitFetcher.java
|
https://github.com/apache/flink/blob/master/flink-connectors/flink-connector-base/src/main/java/org/apache/flink/connector/base/source/reader/fetcher/SplitFetcher.java
|
Apache-2.0
|
boolean isIdle() {
lock.lock();
try {
return assignedSplits.isEmpty() && taskQueue.isEmpty() && runningTask == null;
} finally {
lock.unlock();
}
}
|
Package private for unit test.
@return true if task queue is empty, false otherwise.
|
isIdle
|
java
|
apache/flink
|
flink-connectors/flink-connector-base/src/main/java/org/apache/flink/connector/base/source/reader/fetcher/SplitFetcher.java
|
https://github.com/apache/flink/blob/master/flink-connectors/flink-connector-base/src/main/java/org/apache/flink/connector/base/source/reader/fetcher/SplitFetcher.java
|
Apache-2.0
|
protected synchronized SplitFetcher<E, SplitT> createSplitFetcher() {
if (closed) {
throw new IllegalStateException("The split fetcher manager has closed.");
}
// Create SplitReader.
SplitReader<E, SplitT> splitReader = splitReaderFactory.get();
int fetcherId = fetcherIdGenerator.getAndIncrement();
fetchersToShutDown.incrementAndGet();
SplitFetcher<E, SplitT> splitFetcher =
new SplitFetcher<>(
fetcherId,
elementsQueue,
splitReader,
errorHandler,
() -> {
fetchers.remove(fetcherId);
fetchersToShutDown.decrementAndGet();
// We need this to synchronize status of fetchers to concurrent partners
// as
// ConcurrentHashMap's aggregate status methods including size, isEmpty,
// and
// containsValue are not designed for program control.
elementsQueue.notifyAvailable();
},
this.splitFinishedHook,
allowUnalignedSourceSplits);
fetchers.put(fetcherId, splitFetcher);
return splitFetcher;
}
|
Synchronize method to ensure no fetcher is created after the split fetcher manager has
closed.
@return the created split fetcher.
@throws IllegalStateException if the split fetcher manager has closed.
|
createSplitFetcher
|
java
|
apache/flink
|
flink-connectors/flink-connector-base/src/main/java/org/apache/flink/connector/base/source/reader/fetcher/SplitFetcherManager.java
|
https://github.com/apache/flink/blob/master/flink-connectors/flink-connector-base/src/main/java/org/apache/flink/connector/base/source/reader/fetcher/SplitFetcherManager.java
|
Apache-2.0
|
public boolean maybeShutdownFinishedFetchers() {
Iterator<Map.Entry<Integer, SplitFetcher<E, SplitT>>> iter = fetchers.entrySet().iterator();
while (iter.hasNext()) {
Map.Entry<Integer, SplitFetcher<E, SplitT>> entry = iter.next();
SplitFetcher<E, SplitT> fetcher = entry.getValue();
if (fetcher.isIdle()) {
LOG.info("Closing splitFetcher {} because it is idle.", entry.getKey());
fetcher.shutdown(true);
iter.remove();
}
}
return fetchers.isEmpty();
}
|
Check and shutdown the fetchers that have completed their work.
@return true if all the fetchers have completed the work, false otherwise.
|
maybeShutdownFinishedFetchers
|
java
|
apache/flink
|
flink-connectors/flink-connector-base/src/main/java/org/apache/flink/connector/base/source/reader/fetcher/SplitFetcherManager.java
|
https://github.com/apache/flink/blob/master/flink-connectors/flink-connector-base/src/main/java/org/apache/flink/connector/base/source/reader/fetcher/SplitFetcherManager.java
|
Apache-2.0
|
@Internal
public FutureCompletingBlockingQueue<RecordsWithSplitIds<E>> getQueue() {
return elementsQueue;
}
|
Return the queue contains data produced by split fetchers.This method is Internal and only
used in {@link SourceReaderBase}.
|
getQueue
|
java
|
apache/flink
|
flink-connectors/flink-connector-base/src/main/java/org/apache/flink/connector/base/source/reader/fetcher/SplitFetcherManager.java
|
https://github.com/apache/flink/blob/master/flink-connectors/flink-connector-base/src/main/java/org/apache/flink/connector/base/source/reader/fetcher/SplitFetcherManager.java
|
Apache-2.0
|
default void pauseOrResumeSplits(
Collection<SplitT> splitsToPause, Collection<SplitT> splitsToResume) {
throw new UnsupportedOperationException(
"This split reader does not support pausing or resuming splits which can lead to unaligned splits.\n"
+ "Unaligned splits are splits where the output watermarks of the splits have diverged more than the allowed limit.\n"
+ "It is highly discouraged to use unaligned source splits, as this leads to unpredictable\n"
+ "watermark alignment if there is more than a single split per reader. It is recommended to implement pausing splits\n"
+ "for this source. At your own risk, you can allow unaligned source splits by setting the\n"
+ "configuration parameter `pipeline.watermark-alignment.allow-unaligned-source-splits' to true.\n"
+ "Beware that this configuration parameter will be dropped in a future Flink release.");
}
|
Pauses or resumes reading of individual splits readers.
<p>Note that no other methods can be called in parallel, so it's fine to non-atomically
update subscriptions. This method is simply providing connectors with more expressive APIs
the opportunity to update all subscriptions at once.
<p>This is currently used to align the watermarks of splits, if watermark alignment is used
and the source reads from more than one split.
<p>The default implementation throws an {@link UnsupportedOperationException} where the
default implementation will be removed in future releases. To be compatible with future
releases, it is recommended to implement this method and override the default implementation.
@param splitsToPause the splits to pause
@param splitsToResume the splits to resume
|
pauseOrResumeSplits
|
java
|
apache/flink
|
flink-connectors/flink-connector-base/src/main/java/org/apache/flink/connector/base/source/reader/splitreader/SplitReader.java
|
https://github.com/apache/flink/blob/master/flink-connectors/flink-connector-base/src/main/java/org/apache/flink/connector/base/source/reader/splitreader/SplitReader.java
|
Apache-2.0
|
@Override
public String toString() {
return String.format("SplitAddition:[%s]", splits());
}
|
A change to add splits.
@param <SplitT> the split type.
|
toString
|
java
|
apache/flink
|
flink-connectors/flink-connector-base/src/main/java/org/apache/flink/connector/base/source/reader/splitreader/SplitsAddition.java
|
https://github.com/apache/flink/blob/master/flink-connectors/flink-connector-base/src/main/java/org/apache/flink/connector/base/source/reader/splitreader/SplitsAddition.java
|
Apache-2.0
|
public CompletableFuture<Void> getAvailabilityFuture() {
return currentFuture;
}
|
Returns the availability future. If the queue is non-empty, then this future will already be
complete. Otherwise the obtained future is guaranteed to get completed the next time the
queue becomes non-empty, or a notification happens via {@link #notifyAvailable()}.
<p>It is important that a completed future is no guarantee that the next call to {@link
#poll()} will return a non-null element. If there are concurrent consumer, another consumer
may have taken the available element. Or there was no element in the first place, because the
future was completed through a call to {@link #notifyAvailable()}.
<p>For that reason, it is important to call this method (to obtain a new future) every time
again after {@link #poll()} returned null and you want to wait for data.
|
getAvailabilityFuture
|
java
|
apache/flink
|
flink-connectors/flink-connector-base/src/main/java/org/apache/flink/connector/base/source/reader/synchronization/FutureCompletingBlockingQueue.java
|
https://github.com/apache/flink/blob/master/flink-connectors/flink-connector-base/src/main/java/org/apache/flink/connector/base/source/reader/synchronization/FutureCompletingBlockingQueue.java
|
Apache-2.0
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.