code
stringlengths
25
201k
docstring
stringlengths
19
96.2k
func_name
stringlengths
0
235
language
stringclasses
1 value
repo
stringlengths
8
51
path
stringlengths
11
314
url
stringlengths
62
377
license
stringclasses
7 values
public static Optional<MetricData> convertGauge( CollectionMetadata collectionMetadata, Gauge<?> gauge, MetricMetadata metricMetadata) { if (!(gauge.getValue() instanceof Number)) { LOG.debug( "Couldn't adapt gauge {} with value {} and type {}", metricMetadata.getName(), gauge.getValue(), gauge.getValue().getClass().getName()); return Optional.empty(); } Number number = (Number) gauge.getValue(); if (number instanceof Long || number instanceof Integer) { return Optional.of( ImmutableMetricData.createLongGauge( collectionMetadata.getOtelResource(), INSTRUMENTATION_SCOPE_INFO, metricMetadata.getName(), "", "", ImmutableGaugeData.create( Collections.singleton( ImmutableLongPointData.create( collectionMetadata.getStartEpochNanos(), collectionMetadata.getEpochNanos(), convertVariables(metricMetadata.getVariables()), number.longValue()))))); } else { return Optional.of( ImmutableMetricData.createDoubleGauge( collectionMetadata.getOtelResource(), INSTRUMENTATION_SCOPE_INFO, metricMetadata.getName(), "", "", ImmutableGaugeData.create( Collections.singleton( ImmutableDoublePointData.create( collectionMetadata.getStartEpochNanos(), collectionMetadata.getEpochNanos(), convertVariables(metricMetadata.getVariables()), number.doubleValue()))))); } }
Converts a Flink Gauge to a {@link MetricData}. @param collectionMetadata The common collection metadata @param gauge The Flink Gauge to convert @param metricMetadata The metric metadata @return A {@link MetricData} if it's able to convert successfully
convertGauge
java
apache/flink
flink-metrics/flink-metrics-otel/src/main/java/org/apache/flink/metrics/otel/OpenTelemetryMetricAdapter.java
https://github.com/apache/flink/blob/master/flink-metrics/flink-metrics-otel/src/main/java/org/apache/flink/metrics/otel/OpenTelemetryMetricAdapter.java
Apache-2.0
public static List<MetricData> convertMeter( CollectionMetadata collectionMetadata, Meter meter, Long count, Long previousCount, MetricMetadata metricMetadata) { List<MetricData> metricData = new ArrayList<>(); convertCounter(collectionMetadata, count, previousCount, metricMetadata.subMetric("count")) .ifPresent(metricData::add); convertGauge(collectionMetadata, meter::getRate, metricMetadata.subMetric("rate")) .ifPresent(metricData::add); return metricData; }
Converts a Flink Meter to a {@link MetricData}. @param collectionMetadata The common collection metadata @param meter The Flink Meter to convert @param metricMetadata The metric metadata @return A {@link MetricData} if it's able to convert successfully
convertMeter
java
apache/flink
flink-metrics/flink-metrics-otel/src/main/java/org/apache/flink/metrics/otel/OpenTelemetryMetricAdapter.java
https://github.com/apache/flink/blob/master/flink-metrics/flink-metrics-otel/src/main/java/org/apache/flink/metrics/otel/OpenTelemetryMetricAdapter.java
Apache-2.0
protected void open(MetricConfig metricConfig) { if (metricConfig.containsKey(OpenTelemetryReporterOptions.SERVICE_NAME.key())) { resource = resource.merge( Resource.create( Attributes.of( ResourceAttributes.SERVICE_NAME, metricConfig.getString( OpenTelemetryReporterOptions.SERVICE_NAME.key(), OpenTelemetryReporterOptions.SERVICE_NAME .defaultValue())))); } if (metricConfig.containsKey(OpenTelemetryReporterOptions.SERVICE_VERSION.key())) { resource = resource.merge( Resource.create( Attributes.of( ResourceAttributes.SERVICE_VERSION, metricConfig.getString( OpenTelemetryReporterOptions.SERVICE_VERSION .key(), OpenTelemetryReporterOptions.SERVICE_VERSION .defaultValue())))); } }
A Flink {@link MetricReporter} which is made to export metrics using Open Telemetry's {@link MetricExporter}.
open
java
apache/flink
flink-metrics/flink-metrics-otel/src/main/java/org/apache/flink/metrics/otel/OpenTelemetryReporterBase.java
https://github.com/apache/flink/blob/master/flink-metrics/flink-metrics-otel/src/main/java/org/apache/flink/metrics/otel/OpenTelemetryReporterBase.java
Apache-2.0
public static String getVariableName(String str) { if (str.length() >= 2 && str.charAt(0) == '<' && str.charAt(str.length() - 1) == '>') { return str.substring(1, str.length() - 1); } return str; }
Removes leading and trailing angle brackets. See ScopeFormat::SCOPE_VARIABLE_PREFIX.
getVariableName
java
apache/flink
flink-metrics/flink-metrics-otel/src/main/java/org/apache/flink/metrics/otel/VariableNameUtil.java
https://github.com/apache/flink/blob/master/flink-metrics/flink-metrics-otel/src/main/java/org/apache/flink/metrics/otel/VariableNameUtil.java
Apache-2.0
@Override public void accept(OutputFrame outputFrame) { final OutputFrame.OutputType outputType = outputFrame.getType(); final String utf8String = outputFrame.getUtf8StringWithoutLineEnding(); String lowerCase = utf8String.toLowerCase(); if (lowerCase.contains("error") || lowerCase.contains("exception")) { logger.error("{}: {}", outputType, utf8String); } else if (lowerCase.contains("warn") || lowerCase.contains("fail")) { logger.warn("{}: {}", outputType, utf8String); } else { logger.info("{}: {}", outputType, utf8String); } }
Similar to {@link Slf4jLogConsumer} but parses output lines and tries to log them with appropriate levels.
accept
java
apache/flink
flink-metrics/flink-metrics-otel/src/test/java/org/apache/flink/metrics/otel/OpenTelemetryTestBase.java
https://github.com/apache/flink/blob/master/flink-metrics/flink-metrics-otel/src/test/java/org/apache/flink/metrics/otel/OpenTelemetryTestBase.java
Apache-2.0
@Override public MetricReporter createMetricReporter(Properties properties) { return new StatsDReporter(); }
A {@link MetricReporterFactory} implementation that creates a {@link StatsDReporter} instance.
createMetricReporter
java
apache/flink
flink-metrics/flink-metrics-statsd/src/main/java/org/apache/flink/metrics/statsd/StatsDReporterFactory.java
https://github.com/apache/flink/blob/master/flink-metrics/flink-metrics-statsd/src/main/java/org/apache/flink/metrics/statsd/StatsDReporterFactory.java
Apache-2.0
public static ServerFactory createWithUrlFactory(UrlFactory urlFactory) { return new InetSocketAddressServerFactory(urlFactory); }
Create a {@link InetSocketAddressServerFactory} that uses the given url factory.
createWithUrlFactory
java
apache/flink
flink-python/src/main/java/org/apache/beam/sdk/fn/server/ServerFactory.java
https://github.com/apache/flink/blob/master/flink-python/src/main/java/org/apache/beam/sdk/fn/server/ServerFactory.java
Apache-2.0
public static ServerFactory createWithPortSupplier(Supplier<Integer> portSupplier) { return new InetSocketAddressServerFactory(UrlFactory.createDefault(), portSupplier); }
Create a {@link InetSocketAddressServerFactory} that uses ports from a supplier.
createWithPortSupplier
java
apache/flink
flink-python/src/main/java/org/apache/beam/sdk/fn/server/ServerFactory.java
https://github.com/apache/flink/blob/master/flink-python/src/main/java/org/apache/beam/sdk/fn/server/ServerFactory.java
Apache-2.0
public static ServerFactory createWithUrlFactoryAndPortSupplier( UrlFactory urlFactory, Supplier<Integer> portSupplier) { return new InetSocketAddressServerFactory(urlFactory, portSupplier); }
Create a {@link InetSocketAddressServerFactory} that uses the given url factory and ports from a supplier.
createWithUrlFactoryAndPortSupplier
java
apache/flink
flink-python/src/main/java/org/apache/beam/sdk/fn/server/ServerFactory.java
https://github.com/apache/flink/blob/master/flink-python/src/main/java/org/apache/beam/sdk/fn/server/ServerFactory.java
Apache-2.0
static UrlFactory createDefault() { return (host, port) -> HostAndPort.fromParts(host, port).toString(); }
Factory that constructs client-accessible URLs from a local server address and port. Necessary when clients access server from a different networking context.
createDefault
java
apache/flink
flink-python/src/main/java/org/apache/beam/sdk/fn/server/ServerFactory.java
https://github.com/apache/flink/blob/master/flink-python/src/main/java/org/apache/beam/sdk/fn/server/ServerFactory.java
Apache-2.0
@Override public ScheduledExecutorService createScheduledExecutor() { return Executors.newSingleThreadScheduledExecutor( GrpcUtil.getThreadFactory("grpc-shared-destroyer-%d", true)); }
A holder for shared resource singletons. <p>Components like client channels and servers need certain resources, e.g. a thread pool, to run. If the user has not provided such resources, these components will use a default one, which is shared as a static resource. This class holds these default resources and manages their life-cycles. <p>A resource is identified by the reference of a {@link Resource} object, which is typically a singleton, provided to the get() and release() methods. Each Resource object (not its class) maps to an object cached in the holder. <p>Resources are ref-counted and shut down after a delay when the ref-count reaches zero.
createScheduledExecutor
java
apache/flink
flink-python/src/main/java/org/apache/beam/vendor/grpc/v1p60p1/io/grpc/internal/SharedResourceHolder.java
https://github.com/apache/flink/blob/master/flink-python/src/main/java/org/apache/beam/vendor/grpc/v1p60p1/io/grpc/internal/SharedResourceHolder.java
Apache-2.0
public static <T> T get(Resource<T> resource) { return holder.getInternal(resource); }
Try to get an existing instance of the given resource. If an instance does not exist, create a new one with the given factory. @param resource the singleton object that identifies the requested static resource
get
java
apache/flink
flink-python/src/main/java/org/apache/beam/vendor/grpc/v1p60p1/io/grpc/internal/SharedResourceHolder.java
https://github.com/apache/flink/blob/master/flink-python/src/main/java/org/apache/beam/vendor/grpc/v1p60p1/io/grpc/internal/SharedResourceHolder.java
Apache-2.0
public static <T> T release(final Resource<T> resource, final T instance) { return holder.releaseInternal(resource, instance); }
Releases an instance of the given resource. <p>The instance must have been obtained from {@link #get(Resource)}. Otherwise will throw IllegalArgumentException. <p>Caller must not release a reference more than once. It's advisory that you clear the reference to the instance with the null returned by this method. @param resource the singleton Resource object that identifies the released static resource @param instance the released static resource @return a null which the caller can use to clear the reference to that instance.
release
java
apache/flink
flink-python/src/main/java/org/apache/beam/vendor/grpc/v1p60p1/io/grpc/internal/SharedResourceHolder.java
https://github.com/apache/flink/blob/master/flink-python/src/main/java/org/apache/beam/vendor/grpc/v1p60p1/io/grpc/internal/SharedResourceHolder.java
Apache-2.0
@SuppressWarnings("unchecked") synchronized <T> T getInternal(Resource<T> resource) { Instance instance = instances.get(resource); if (instance == null) { instance = new Instance(resource.create()); instances.put(resource, instance); } if (instance.destroyTask != null) { instance.destroyTask.cancel(false); instance.destroyTask = null; } instance.refcount++; return (T) instance.payload; }
Visible to unit tests. @see #get(Resource)
getInternal
java
apache/flink
flink-python/src/main/java/org/apache/beam/vendor/grpc/v1p60p1/io/grpc/internal/SharedResourceHolder.java
https://github.com/apache/flink/blob/master/flink-python/src/main/java/org/apache/beam/vendor/grpc/v1p60p1/io/grpc/internal/SharedResourceHolder.java
Apache-2.0
@Override public Object construct(Object[] args) { if (args.length == 0) { return ArrayUtils.EMPTY_BYTE_ARRAY; } else { return super.construct(args); } }
Creates byte arrays (byte[]). Deal with an empty byte array pickled by Python 3.
construct
java
apache/flink
flink-python/src/main/java/org/apache/flink/api/common/python/pickle/ByteArrayConstructor.java
https://github.com/apache/flink/blob/master/flink-python/src/main/java/org/apache/flink/api/common/python/pickle/ByteArrayConstructor.java
Apache-2.0
public static void main(String[] args) throws Throwable { // The python job needs at least 2 args. // e.g. py a.py [user args] // e.g. pym a.b [user args] if (args.length < 2) { LOG.error( "Required at least two arguments, only python file or python module is available."); System.exit(1); } // parse args final CommandLineParser<PythonDriverOptions> commandLineParser = new CommandLineParser<>(new PythonDriverOptionsParserFactory()); PythonDriverOptions pythonDriverOptions = null; try { pythonDriverOptions = commandLineParser.parse(args); } catch (Exception e) { LOG.error("Could not parse command line arguments {}.", args, e); commandLineParser.printHelp(PythonDriver.class.getSimpleName()); System.exit(1); } // Get configuration from ContextEnvironment/OptimizerPlanEnvironment. As the configurations // of // streaming and batch environments are always set at the same time, for streaming jobs we // can // also get its configuration from batch environments. Configuration config = Configuration.fromMap( StreamExecutionEnvironment.getExecutionEnvironment() .getConfiguration() .toMap()); // start gateway server GatewayServer gatewayServer = PythonEnvUtils.startGatewayServer(); PythonEnvUtils.setGatewayServer(gatewayServer); PythonEnvUtils.PythonProcessShutdownHook shutdownHook = null; // commands which will be exec in python progress. final List<String> commands = constructPythonCommands(pythonDriverOptions); try { // prepare the exec environment of python progress. String tmpDir = System.getProperty("java.io.tmpdir") + File.separator + "pyflink" + File.separator + UUID.randomUUID(); // start the python process. Process pythonProcess = PythonEnvUtils.launchPy4jPythonClient( gatewayServer, config, commands, pythonDriverOptions.getEntryPointScript().orElse(null), tmpDir, true); shutdownHook = new PythonEnvUtils.PythonProcessShutdownHook( pythonProcess, gatewayServer, tmpDir); Runtime.getRuntime().addShutdownHook(shutdownHook); BufferedReader in = new BufferedReader( new InputStreamReader( pythonProcess.getInputStream(), StandardCharsets.UTF_8)); LOG.info( "--------------------------- Python Process Started --------------------------"); // print the python process output to stdout and log file while (true) { String line = in.readLine(); if (line == null) { break; } else { System.out.println(line); LOG.info(line); } } int exitCode = pythonProcess.waitFor(); LOG.info( "--------------------------- Python Process Exited ---------------------------"); if (exitCode != 0) { throw new RuntimeException("Python process exits with code: " + exitCode); } } catch (Throwable e) { LOG.error("Run python process failed", e); if (PythonEnvUtils.capturedJavaException != null) { throw PythonEnvUtils.capturedJavaException; } else { // throw ProgramAbortException if the caller is interested in the program plan, // there is no harm to throw ProgramAbortException even if it is not the case. throw new ProgramAbortException(e); } } finally { PythonEnvUtils.setGatewayServer(null); if (shutdownHook != null && Runtime.getRuntime().removeShutdownHook(shutdownHook)) { shutdownHook.run(); } } }
A main class used to launch Python applications. It executes python as a subprocess and then has it connect back to the JVM to access system properties, etc.
main
java
apache/flink
flink-python/src/main/java/org/apache/flink/client/python/PythonDriver.java
https://github.com/apache/flink/blob/master/flink-python/src/main/java/org/apache/flink/client/python/PythonDriver.java
Apache-2.0
static List<String> constructPythonCommands(final PythonDriverOptions pythonDriverOptions) { final List<String> commands = new ArrayList<>(); // disable output buffer commands.add("-u"); if (pythonDriverOptions.getEntryPointScript().isPresent()) { commands.add(pythonDriverOptions.getEntryPointScript().get()); } else { commands.add("-m"); commands.add(pythonDriverOptions.getEntryPointModule()); } commands.addAll(pythonDriverOptions.getProgramArgs()); return commands; }
Constructs the Python commands which will be executed in python process. @param pythonDriverOptions parsed Python command options
constructPythonCommands
java
apache/flink
flink-python/src/main/java/org/apache/flink/client/python/PythonDriver.java
https://github.com/apache/flink/blob/master/flink-python/src/main/java/org/apache/flink/client/python/PythonDriver.java
Apache-2.0
static PythonEnvironment preparePythonEnvironment( ReadableConfig config, String entryPointScript, String tmpDir) throws IOException { PythonEnvironment env = new PythonEnvironment(); // 1. set the path of python interpreter. String pythonExec = config.getOptional(PYTHON_CLIENT_EXECUTABLE) .orElse(System.getenv(PYFLINK_CLIENT_EXECUTABLE)); if (pythonExec != null) { env.pythonExec = pythonExec; } // 2. setup temporary local directory for the user files tmpDir = new File(tmpDir).getAbsolutePath(); Path tmpDirPath = new Path(tmpDir); tmpDirPath.getFileSystem().mkdirs(tmpDirPath); env.tempDirectory = tmpDir; // 3. append the internal lib files to PYTHONPATH. if (System.getenv(ConfigConstants.ENV_FLINK_OPT_DIR) != null) { String pythonLibDir = System.getenv(ConfigConstants.ENV_FLINK_OPT_DIR) + File.separator + "python"; env.pythonPath = getLibFiles(pythonLibDir).stream() .map(p -> p.toFile().getAbsolutePath()) .collect(Collectors.joining(File.pathSeparator)); } // 4. copy relevant python files to tmp dir and set them in PYTHONPATH. if (config.getOptional(PYTHON_FILES).isPresent()) { List<Path> pythonFiles = Arrays.stream(config.get(PYTHON_FILES).split(FILE_DELIMITER)) .map(Path::new) .collect(Collectors.toList()); addToPythonPath(env, pythonFiles); } // 5. set the archives directory as the working directory, then user could access the // content of the archives via relative path if (config.getOptional(PYTHON_ARCHIVES).isPresent() && (config.getOptional(PYTHON_CLIENT_EXECUTABLE).isPresent() || !StringUtils.isNullOrWhitespaceOnly( System.getenv(PYFLINK_CLIENT_EXECUTABLE)))) { env.archivesDirectory = String.join(File.separator, tmpDir, PYTHON_ARCHIVES_DIR); // extract archives to archives directory config.getOptional(PYTHON_ARCHIVES) .ifPresent( pyArchives -> { for (String archive : pyArchives.split(FILE_DELIMITER)) { final Path archivePath; final String targetDirName; final String originalFileName; if (archive.contains(PythonDependencyUtils.PARAM_DELIMITER)) { String[] filePathAndTargetDir = archive.split( PythonDependencyUtils.PARAM_DELIMITER, 2); archivePath = new Path(filePathAndTargetDir[0]); targetDirName = filePathAndTargetDir[1]; originalFileName = archivePath.getName(); } else { archivePath = new Path(archive); originalFileName = archivePath.getName(); targetDirName = originalFileName; } Path localArchivePath = archivePath; try { if (archivePath.getFileSystem().isDistributedFS()) { localArchivePath = new Path( env.tempDirectory, String.join( File.separator, UUID.randomUUID().toString(), originalFileName)); FileUtils.copy(archivePath, localArchivePath, false); } } catch (IOException e) { String msg = String.format( "Error occurred when copying %s to %s.", archivePath, localArchivePath); throw new RuntimeException(msg, e); } try { CompressionUtils.extractFile( localArchivePath.getPath(), String.join( File.separator, env.archivesDirectory, targetDirName), originalFileName); } catch (IOException e) { throw new RuntimeException( "Extract archives to archives directory failed.", e); } } }); } // 4. append configured python.pythonpath to the PYTHONPATH. if (config.getOptional(PYTHON_PATH).isPresent()) { env.pythonPath = String.join( File.pathSeparator, config.getOptional(PYTHON_PATH).get(), env.pythonPath); } if (entryPointScript != null) { addToPythonPath(env, Collections.singletonList(new Path(entryPointScript))); } return env; }
Prepares PythonEnvironment to start python process. @param config The Python configurations. @param entryPointScript The entry point script, optional. @param tmpDir The temporary directory which files will be copied to. @return PythonEnvironment the Python environment which will be executed in Python process.
preparePythonEnvironment
java
apache/flink
flink-python/src/main/java/org/apache/flink/client/python/PythonEnvUtils.java
https://github.com/apache/flink/blob/master/flink-python/src/main/java/org/apache/flink/client/python/PythonEnvUtils.java
Apache-2.0
private static void createSymbolicLink( java.nio.file.Path libPath, java.nio.file.Path symbolicLinkPath) throws IOException { try { Files.createSymbolicLink(symbolicLinkPath, libPath); } catch (IOException e) { LOG.warn( "Create symbol link from {} to {} failed and copy instead.", symbolicLinkPath, libPath, e); Files.copy(libPath, symbolicLinkPath); } }
Creates symbolLink in working directory for pyflink lib. @param libPath the pyflink lib file path. @param symbolicLinkPath the symbolic link to pyflink lib.
createSymbolicLink
java
apache/flink
flink-python/src/main/java/org/apache/flink/client/python/PythonEnvUtils.java
https://github.com/apache/flink/blob/master/flink-python/src/main/java/org/apache/flink/client/python/PythonEnvUtils.java
Apache-2.0
static Process startPythonProcess( PythonEnvironment pythonEnv, List<String> commands, boolean redirectToPipe) throws IOException { ProcessBuilder pythonProcessBuilder = new ProcessBuilder(); Map<String, String> env = pythonProcessBuilder.environment(); if (pythonEnv.pythonPath != null) { String defaultPythonPath = env.get("PYTHONPATH"); if (Strings.isNullOrEmpty(defaultPythonPath)) { env.put("PYTHONPATH", pythonEnv.pythonPath); } else { env.put( "PYTHONPATH", String.join(File.pathSeparator, pythonEnv.pythonPath, defaultPythonPath)); } } if (pythonEnv.archivesDirectory != null) { pythonProcessBuilder.directory(new File(pythonEnv.archivesDirectory)); } pythonEnv.systemEnv.forEach(env::put); commands.add(0, pythonEnv.pythonExec); pythonProcessBuilder.command(commands); // redirect the stderr to stdout pythonProcessBuilder.redirectErrorStream(true); if (redirectToPipe) { pythonProcessBuilder.redirectOutput(ProcessBuilder.Redirect.PIPE); } else { // set the child process the output same as the parent process. pythonProcessBuilder.redirectOutput(ProcessBuilder.Redirect.INHERIT); } LOG.info( "Starting Python process with environment variables: {{}}, command: {}", env.entrySet().stream() .map(e -> e.getKey() + "=" + e.getValue()) .collect(Collectors.joining(", ")), String.join(" ", commands)); Process process = pythonProcessBuilder.start(); if (!process.isAlive()) { throw new RuntimeException("Failed to start Python process. "); } return process; }
Starts python process. @param pythonEnv the python Environment which will be in a process. @param commands the commands that python process will execute. @return the process represent the python process. @throws IOException Thrown if an error occurred when python process start.
startPythonProcess
java
apache/flink
flink-python/src/main/java/org/apache/flink/client/python/PythonEnvUtils.java
https://github.com/apache/flink/blob/master/flink-python/src/main/java/org/apache/flink/client/python/PythonEnvUtils.java
Apache-2.0
static GatewayServer startGatewayServer() throws ExecutionException, InterruptedException { CompletableFuture<GatewayServer> gatewayServerFuture = new CompletableFuture<>(); Thread thread = new Thread( () -> { try (NetUtils.Port port = NetUtils.getAvailablePort()) { int freePort = port.getPort(); GatewayServer server = new GatewayServer.GatewayServerBuilder() .gateway( new Gateway( new ConcurrentHashMap< String, Object>(), new CallbackClient(freePort))) .javaPort(0) .build(); resetCallbackClientExecutorService(server); gatewayServerFuture.complete(server); server.start(true); } catch (Throwable e) { gatewayServerFuture.completeExceptionally(e); } }); thread.setName("py4j-gateway"); thread.setDaemon(true); thread.start(); thread.join(); return gatewayServerFuture.get(); }
Creates a GatewayServer run in a daemon thread. @return The created GatewayServer
startGatewayServer
java
apache/flink
flink-python/src/main/java/org/apache/flink/client/python/PythonEnvUtils.java
https://github.com/apache/flink/blob/master/flink-python/src/main/java/org/apache/flink/client/python/PythonEnvUtils.java
Apache-2.0
private static void resetCallbackClientExecutorService(GatewayServer gatewayServer) throws NoSuchFieldException, IllegalAccessException, NoSuchMethodException, InvocationTargetException { CallbackClient callbackClient = (CallbackClient) gatewayServer.getCallbackClient(); // The Java API of py4j does not provide approach to set "daemonize_connections" parameter. // Use reflect to daemonize the connection thread. Field executor = CallbackClient.class.getDeclaredField("executor"); executor.setAccessible(true); ((ScheduledExecutorService) executor.get(callbackClient)).shutdown(); executor.set(callbackClient, Executors.newScheduledThreadPool(1, Thread::new)); Method setupCleaner = CallbackClient.class.getDeclaredMethod("setupCleaner"); setupCleaner.setAccessible(true); setupCleaner.invoke(callbackClient); }
Reset a daemon thread to the callback client thread pool so that the callback server can be terminated when gate way server is shutting down. We need to shut down the none-daemon thread firstly, then set a new thread created in a daemon thread to the ExecutorService. @param gatewayServer the gateway which creates the callback server.
resetCallbackClientExecutorService
java
apache/flink
flink-python/src/main/java/org/apache/flink/client/python/PythonEnvUtils.java
https://github.com/apache/flink/blob/master/flink-python/src/main/java/org/apache/flink/client/python/PythonEnvUtils.java
Apache-2.0
public static void resetCallbackClient( GatewayServer gatewayServer, String callbackServerListeningAddress, int callbackServerListeningPort) throws UnknownHostException, InvocationTargetException, NoSuchMethodException, IllegalAccessException, NoSuchFieldException { gatewayServer.resetCallbackClient( InetAddress.getByName(callbackServerListeningAddress), callbackServerListeningPort); resetCallbackClientExecutorService(gatewayServer); }
Reset the callback client of gatewayServer with the given callbackListeningAddress and callbackListeningPort after the callback server started. @param callbackServerListeningAddress the listening address of the callback server. @param callbackServerListeningPort the listening port of the callback server.
resetCallbackClient
java
apache/flink
flink-python/src/main/java/org/apache/flink/client/python/PythonEnvUtils.java
https://github.com/apache/flink/blob/master/flink-python/src/main/java/org/apache/flink/client/python/PythonEnvUtils.java
Apache-2.0
@Override public void run() { if (tmpDir != null) { FileUtils.deleteDirectoryQuietly(new File(tmpDir)); } try { shutdownPythonProcess(process, TIMEOUT_MILLIS); } finally { gatewayServer.shutdown(); } }
The shutdown hook used to destroy the Python process.
run
java
apache/flink
flink-python/src/main/java/org/apache/flink/client/python/PythonEnvUtils.java
https://github.com/apache/flink/blob/master/flink-python/src/main/java/org/apache/flink/client/python/PythonEnvUtils.java
Apache-2.0
@Override public PythonFunctionFactory load(CacheKey cacheKey) { try { return createPythonFunctionFactory(cacheKey.config); } catch (Throwable t) { throw new RuntimeException(t); } }
The factory which creates the PythonFunction objects from given module name and object name.
load
java
apache/flink
flink-python/src/main/java/org/apache/flink/client/python/PythonFunctionFactory.java
https://github.com/apache/flink/blob/master/flink-python/src/main/java/org/apache/flink/client/python/PythonFunctionFactory.java
Apache-2.0
public static void main(String[] args) throws IOException, ExecutionException, InterruptedException { GatewayServer gatewayServer = PythonEnvUtils.startGatewayServer(); PythonEnvUtils.setGatewayServer(gatewayServer); int boundPort = gatewayServer.getListeningPort(); Py4JPythonClient callbackClient = gatewayServer.getCallbackClient(); int callbackPort = callbackClient.getPort(); if (boundPort == -1) { System.out.println("GatewayServer failed to bind; exiting"); System.exit(1); } // Tells python side the port of our java rpc server String handshakeFilePath = System.getenv("_PYFLINK_CONN_INFO_PATH"); File handshakeFile = new File(handshakeFilePath); File tmpPath = Files.createTempFile(handshakeFile.getParentFile().toPath(), "connection", ".info") .toFile(); FileOutputStream fileOutputStream = new FileOutputStream(tmpPath); DataOutputStream stream = new DataOutputStream(fileOutputStream); stream.writeInt(boundPort); stream.writeInt(callbackPort); stream.close(); fileOutputStream.close(); if (!tmpPath.renameTo(handshakeFile)) { System.out.println( "Unable to write connection information to handshake file: " + handshakeFilePath + ", now exit..."); System.exit(1); } try { // This ensures that the server dies if its parent program dies. Map<String, Object> entryPoint = (Map<String, Object>) gatewayServer.getGateway().getEntryPoint(); for (int i = 0; i < TIMEOUT_MILLIS / CHECK_INTERVAL; i++) { if (entryPoint.containsKey("Watchdog")) { break; } Thread.sleep(CHECK_INTERVAL); } if (!entryPoint.containsKey("Watchdog")) { System.out.println("Unable to get the Python watchdog object, now exit."); System.exit(1); } Watchdog watchdog = (Watchdog) entryPoint.get("Watchdog"); while (watchdog.ping()) { Thread.sleep(CHECK_INTERVAL); } gatewayServer.shutdown(); System.exit(0); } finally { System.exit(1); } }
Main method to start a local GatewayServer on a ephemeral port. It tells python side via a file. <p>See: py4j.GatewayServer.main()
main
java
apache/flink
flink-python/src/main/java/org/apache/flink/client/python/PythonGatewayServer.java
https://github.com/apache/flink/blob/master/flink-python/src/main/java/org/apache/flink/client/python/PythonGatewayServer.java
Apache-2.0
private static void printError(String msg) { System.err.println(msg); System.err.println( "Valid cluster type are \"local\", \"remote <hostname> <portnumber>\", \"yarn\"."); System.err.println(); System.err.println("Specify the help option (-h or --help) to get help on the command."); }
Prints the error message and help for the client. @param msg error message
printError
java
apache/flink
flink-python/src/main/java/org/apache/flink/client/python/PythonShellParser.java
https://github.com/apache/flink/blob/master/flink-python/src/main/java/org/apache/flink/client/python/PythonShellParser.java
Apache-2.0
private static void constructYarnOption( List<String> options, Option yarnOption, CommandLine commandLine) { if (commandLine.hasOption(yarnOption.getOpt())) { options.add("-y" + yarnOption.getOpt()); options.add(commandLine.getOptionValue(yarnOption.getOpt())); } }
Constructs yarn options. The python shell option will add prefix 'y' to align yarn options in `flink run`. @param options Options that will be used in `flink run`. @param yarnOption Python shell yarn options. @param commandLine Parsed Python shell parser options.
constructYarnOption
java
apache/flink
flink-python/src/main/java/org/apache/flink/client/python/PythonShellParser.java
https://github.com/apache/flink/blob/master/flink-python/src/main/java/org/apache/flink/client/python/PythonShellParser.java
Apache-2.0
public static RowDataToCsvConverters.RowDataToCsvConverter.RowDataToCsvFormatConverterContext createRowDataToCsvFormatConverterContext(CsvMapper mapper, ContainerNode<?> container) { return new RowDataToCsvConverters.RowDataToCsvConverter.RowDataToCsvFormatConverterContext( mapper, container); }
Util for creating a {@link RowDataToCsvConverters.RowDataToCsvConverter.RowDataToCsvFormatConverterContext}.
createRowDataToCsvFormatConverterContext
java
apache/flink
flink-python/src/main/java/org/apache/flink/formats/csv/PythonCsvUtils.java
https://github.com/apache/flink/blob/master/flink-python/src/main/java/org/apache/flink/formats/csv/PythonCsvUtils.java
Apache-2.0
public static BulkWriter.Factory<RowData> createCsvBulkWriterFactory( CsvSchema schema, DataType physicalDataType) { return CsvFileFormatFactory.createCsvBulkWriterFactory( schema, (RowType) physicalDataType.getLogicalType()); }
Util for creating a {@link BulkWriter.Factory} that wraps {@link CsvBulkWriter#forSchema}.
createCsvBulkWriterFactory
java
apache/flink
flink-python/src/main/java/org/apache/flink/formats/csv/PythonCsvUtils.java
https://github.com/apache/flink/blob/master/flink-python/src/main/java/org/apache/flink/formats/csv/PythonCsvUtils.java
Apache-2.0
public Builder failOnMissingField() { this.failOnMissingField = true; return this; }
Configures schema to fail if a JSON field is missing. <p>By default, a missing field is ignored and the field is set to null.
failOnMissingField
java
apache/flink
flink-python/src/main/java/org/apache/flink/formats/json/JsonRowDeserializationSchema.java
https://github.com/apache/flink/blob/master/flink-python/src/main/java/org/apache/flink/formats/json/JsonRowDeserializationSchema.java
Apache-2.0
public Builder withTypeInfo(TypeInformation<Row> typeInfo) { checkArgument(typeInfo instanceof RowTypeInfo, "Only RowTypeInfo is supported"); this.typeInfo = (RowTypeInfo) typeInfo; return this; }
Sets type information for JSON serialization schema. @param typeInfo Type information describing the result type. The field names of {@link Row} are used to parse the JSON properties.
withTypeInfo
java
apache/flink
flink-python/src/main/java/org/apache/flink/formats/json/JsonRowSerializationSchema.java
https://github.com/apache/flink/blob/master/flink-python/src/main/java/org/apache/flink/formats/json/JsonRowSerializationSchema.java
Apache-2.0
public JsonRowSerializationSchema build() { checkArgument(typeInfo != null, "typeInfo should be set."); return new JsonRowSerializationSchema(typeInfo); }
Finalizes the configuration and checks validity. @return Configured {@link JsonRowSerializationSchema}
build
java
apache/flink
flink-python/src/main/java/org/apache/flink/formats/json/JsonRowSerializationSchema.java
https://github.com/apache/flink/blob/master/flink-python/src/main/java/org/apache/flink/formats/json/JsonRowSerializationSchema.java
Apache-2.0
@SuppressWarnings("unchecked") public static void apply(StreamExecutionEnvironment env) throws Exception { if (env.getConfiguration().get(PythonOptions.PYTHON_OPERATOR_CHAINING_ENABLED)) { final Field transformationsField = StreamExecutionEnvironment.class.getDeclaredField("transformations"); transformationsField.setAccessible(true); final List<Transformation<?>> transformations = (List<Transformation<?>>) transformationsField.get(env); transformationsField.set(env, optimize(transformations)); } }
Perform chaining optimization. It will iterate the transformations defined in the given StreamExecutionEnvironment and update them with the chained transformations.
apply
java
apache/flink
flink-python/src/main/java/org/apache/flink/python/chain/PythonOperatorChainingOptimizer.java
https://github.com/apache/flink/blob/master/flink-python/src/main/java/org/apache/flink/python/chain/PythonOperatorChainingOptimizer.java
Apache-2.0
@SuppressWarnings("unchecked") public static Transformation<?> apply( StreamExecutionEnvironment env, Transformation<?> transformation) throws Exception { if (env.getConfiguration().get(PythonOptions.PYTHON_OPERATOR_CHAINING_ENABLED)) { final Field transformationsField = StreamExecutionEnvironment.class.getDeclaredField("transformations"); transformationsField.setAccessible(true); final List<Transformation<?>> transformations = (List<Transformation<?>>) transformationsField.get(env); final Tuple2<List<Transformation<?>>, Transformation<?>> resultTuple = optimize(transformations, transformation); transformationsField.set(env, resultTuple.f0); return resultTuple.f1; } else { return transformation; } }
Perform chaining optimization. It will iterate the transformations defined in the given StreamExecutionEnvironment and update them with the chained transformations. Besides, it will return the transformation after chaining optimization for the given transformation.
apply
java
apache/flink
flink-python/src/main/java/org/apache/flink/python/chain/PythonOperatorChainingOptimizer.java
https://github.com/apache/flink/blob/master/flink-python/src/main/java/org/apache/flink/python/chain/PythonOperatorChainingOptimizer.java
Apache-2.0
public static List<Transformation<?>> optimize(List<Transformation<?>> transformations) { final Map<Transformation<?>, Set<Transformation<?>>> outputMap = buildOutputMap(transformations); final LinkedHashSet<Transformation<?>> chainedTransformations = new LinkedHashSet<>(); final Set<Transformation<?>> alreadyTransformed = Sets.newIdentityHashSet(); final Queue<Transformation<?>> toTransformQueue = Queues.newArrayDeque(transformations); while (!toTransformQueue.isEmpty()) { final Transformation<?> transformation = toTransformQueue.poll(); if (!alreadyTransformed.contains(transformation)) { alreadyTransformed.add(transformation); final ChainInfo chainInfo = chainWithInputIfPossible(transformation, outputMap); chainedTransformations.add(chainInfo.newTransformation); chainedTransformations.removeAll(chainInfo.oldTransformations); alreadyTransformed.addAll(chainInfo.oldTransformations); // Add the chained transformation and its inputs to the to-optimize list toTransformQueue.add(chainInfo.newTransformation); toTransformQueue.addAll(chainInfo.newTransformation.getInputs()); } } return new ArrayList<>(chainedTransformations); }
Perform chaining optimization. It will return the chained transformations for the given transformation list.
optimize
java
apache/flink
flink-python/src/main/java/org/apache/flink/python/chain/PythonOperatorChainingOptimizer.java
https://github.com/apache/flink/blob/master/flink-python/src/main/java/org/apache/flink/python/chain/PythonOperatorChainingOptimizer.java
Apache-2.0
public static Tuple2<List<Transformation<?>>, Transformation<?>> optimize( List<Transformation<?>> transformations, Transformation<?> targetTransformation) { final Map<Transformation<?>, Set<Transformation<?>>> outputMap = buildOutputMap(transformations); final LinkedHashSet<Transformation<?>> chainedTransformations = new LinkedHashSet<>(); final Set<Transformation<?>> alreadyTransformed = Sets.newIdentityHashSet(); final Queue<Transformation<?>> toTransformQueue = Queues.newArrayDeque(); toTransformQueue.add(targetTransformation); while (!toTransformQueue.isEmpty()) { final Transformation<?> toTransform = toTransformQueue.poll(); if (!alreadyTransformed.contains(toTransform)) { alreadyTransformed.add(toTransform); final ChainInfo chainInfo = chainWithInputIfPossible(toTransform, outputMap); chainedTransformations.add(chainInfo.newTransformation); chainedTransformations.removeAll(chainInfo.oldTransformations); alreadyTransformed.addAll(chainInfo.oldTransformations); // Add the chained transformation and its inputs to the to-optimize list toTransformQueue.add(chainInfo.newTransformation); toTransformQueue.addAll(chainInfo.newTransformation.getInputs()); if (toTransform == targetTransformation) { targetTransformation = chainInfo.newTransformation; } } } return Tuple2.of(new ArrayList<>(chainedTransformations), targetTransformation); }
Perform chaining optimization. It will returns the chained transformations and the transformation after chaining optimization for the given transformation.
optimize
java
apache/flink
flink-python/src/main/java/org/apache/flink/python/chain/PythonOperatorChainingOptimizer.java
https://github.com/apache/flink/blob/master/flink-python/src/main/java/org/apache/flink/python/chain/PythonOperatorChainingOptimizer.java
Apache-2.0
public static PythonDependencyInfo create( ReadableConfig config, DistributedCache distributedCache) { Map<String, String> pythonFiles = new LinkedHashMap<>(); for (Map.Entry<String, String> entry : config.getOptional(PYTHON_FILES_DISTRIBUTED_CACHE_INFO) .orElse(new HashMap<>()) .entrySet()) { File pythonFile = distributedCache.getFile(entry.getKey()); String filePath = pythonFile.getAbsolutePath(); pythonFiles.put(filePath, entry.getValue()); } String requirementsFilePath = null; String requirementsCacheDir = null; String requirementsFileName = config.getOptional(PYTHON_REQUIREMENTS_FILE_DISTRIBUTED_CACHE_INFO) .orElse(new HashMap<>()) .get(PythonDependencyUtils.FILE); if (requirementsFileName != null) { requirementsFilePath = distributedCache.getFile(requirementsFileName).getAbsolutePath(); String requirementsFileCacheDir = config.getOptional(PYTHON_REQUIREMENTS_FILE_DISTRIBUTED_CACHE_INFO) .orElse(new HashMap<>()) .get(PythonDependencyUtils.CACHE); if (requirementsFileCacheDir != null) { requirementsCacheDir = distributedCache.getFile(requirementsFileCacheDir).getAbsolutePath(); } } Map<String, String> archives = new HashMap<>(); for (Map.Entry<String, String> entry : config.getOptional(PYTHON_ARCHIVES_DISTRIBUTED_CACHE_INFO) .orElse(new HashMap<>()) .entrySet()) { String archiveFilePath = distributedCache.getFile(entry.getKey()).getAbsolutePath(); String targetPath = entry.getValue(); archives.put(archiveFilePath, targetPath); } String pythonExec = config.get(PYTHON_EXECUTABLE); return new PythonDependencyInfo( pythonFiles, requirementsFilePath, requirementsCacheDir, archives, pythonExec, config.get(PYTHON_EXECUTION_MODE), config.get(PYTHON_PATH)); }
Creates PythonDependencyInfo from GlobalJobParameters and DistributedCache. @param config The config. @param distributedCache The DistributedCache object of current task. @return The PythonDependencyInfo object that contains whole information of python dependency.
create
java
apache/flink
flink-python/src/main/java/org/apache/flink/python/env/PythonDependencyInfo.java
https://github.com/apache/flink/blob/master/flink-python/src/main/java/org/apache/flink/python/env/PythonDependencyInfo.java
Apache-2.0
@Override public PythonEnvironment createEnvironment() throws Exception { Map<String, String> env = new HashMap<>(getPythonEnv()); PythonInterpreterConfig.ExecType execType; String executionMode = dependencyInfo.getExecutionMode(); if (executionMode.equalsIgnoreCase("thread")) { execType = PythonInterpreterConfig.ExecType.MULTI_THREAD; } else { throw new RuntimeException( String.format("Unsupported execution mode %s.", executionMode)); } if (env.containsKey("FLINK_TESTING")) { String flinkHome = env.get("FLINK_HOME"); String sourceRootDir = new File(flinkHome, "../../../../").getCanonicalPath(); String flinkPython = sourceRootDir + "/flink-python"; // add flink-python of source code to PYTHONPATH env.put( "PYTHONPATH", flinkPython + File.pathSeparator + env.getOrDefault("PYTHONPATH", "")); } PythonInterpreterConfig interpreterConfig = PythonInterpreterConfig.newBuilder() .setPythonExec(dependencyInfo.getPythonExec()) .setExcType(execType) .addPythonPaths(env.getOrDefault("PYTHONPATH", "")) .build(); return new EmbeddedPythonEnvironment(interpreterConfig, env); }
The base class of python environment manager which is used to create the PythonEnvironment object. It's used to run python UDF in embedded Python environment.
createEnvironment
java
apache/flink
flink-python/src/main/java/org/apache/flink/python/env/embedded/EmbeddedPythonEnvironmentManager.java
https://github.com/apache/flink/blob/master/flink-python/src/main/java/org/apache/flink/python/env/embedded/EmbeddedPythonEnvironmentManager.java
Apache-2.0
@Override public PythonEnvironment createEnvironment() throws Exception { HashMap<String, String> env = new HashMap<>(resource.env); String runnerScript = PythonEnvironmentManagerUtils.getPythonUdfRunnerScript( dependencyInfo.getPythonExec(), env); return new ProcessPythonEnvironment(runnerScript, env); }
The ProcessPythonEnvironmentManager is used to prepare the working dir of python UDF worker and create ProcessPythonEnvironment object of Beam Fn API. It's used when the python function runner is configured to run python UDF in process mode.
createEnvironment
java
apache/flink
flink-python/src/main/java/org/apache/flink/python/env/process/ProcessPythonEnvironmentManager.java
https://github.com/apache/flink/blob/master/flink-python/src/main/java/org/apache/flink/python/env/process/ProcessPythonEnvironmentManager.java
Apache-2.0
public String getBootLog() throws Exception { File bootLogFile = new File(resource.baseDirectory + File.separator + "flink-python-udf-boot.log"); String msg = "Failed to create stage bundle factory!"; if (bootLogFile.exists()) { byte[] output = Files.readAllBytes(bootLogFile.toPath()); msg += String.format(" %s", new String(output, Charset.defaultCharset())); } return msg; }
Returns the boot log of the Python Environment.
getBootLog
java
apache/flink
flink-python/src/main/java/org/apache/flink/python/env/process/ProcessPythonEnvironmentManager.java
https://github.com/apache/flink/blob/master/flink-python/src/main/java/org/apache/flink/python/env/process/ProcessPythonEnvironmentManager.java
Apache-2.0
@Override public Long getValue() { return (Long) callable.invokeMethod("get_value"); }
Flink {@link Gauge} for Python Gauge.
getValue
java
apache/flink
flink-python/src/main/java/org/apache/flink/python/metric/embedded/MetricGauge.java
https://github.com/apache/flink/blob/master/flink-python/src/main/java/org/apache/flink/python/metric/embedded/MetricGauge.java
Apache-2.0
private MetricsContainerImpl getMetricsContainer(String stepName) { return metricsContainers.getContainer(stepName); }
Helper class for forwarding Python metrics to Java accumulators and metrics.
getMetricsContainer
java
apache/flink
flink-python/src/main/java/org/apache/flink/python/metric/process/FlinkMetricContainer.java
https://github.com/apache/flink/blob/master/flink-python/src/main/java/org/apache/flink/python/metric/process/FlinkMetricContainer.java
Apache-2.0
public void updateMetrics(String stepName, List<MonitoringInfo> monitoringInfos) { getMetricsContainer(stepName).update(monitoringInfos); updateMetrics(stepName); }
Update this container with metrics from the passed {@link MonitoringInfo}s, and send updates along to Flink's internal metrics framework.
updateMetrics
java
apache/flink
flink-python/src/main/java/org/apache/flink/python/metric/process/FlinkMetricContainer.java
https://github.com/apache/flink/blob/master/flink-python/src/main/java/org/apache/flink/python/metric/process/FlinkMetricContainer.java
Apache-2.0
public static Configuration extractPythonConfiguration(ReadableConfig config) { final Configuration pythonDependencyConfig = PythonDependencyUtils.configurePythonDependencies(config); final PythonConfig pythonConfig = new PythonConfig(config, pythonDependencyConfig); return pythonConfig.toConfiguration(); }
Extract the configurations which is used in the Python operators.
extractPythonConfiguration
java
apache/flink
flink-python/src/main/java/org/apache/flink/python/util/PythonConfigUtil.java
https://github.com/apache/flink/blob/master/flink-python/src/main/java/org/apache/flink/python/util/PythonConfigUtil.java
Apache-2.0
@SuppressWarnings("unchecked") public static <T> T createFirstColumnTopicSelector(Class<T> clazz) { return (T) Proxy.newProxyInstance( clazz.getClassLoader(), new Class[] {clazz}, new FirstColumnTopicSelectorInvocationHandler()); }
Creates a selector that returns the first column of a row, and cast it to {@code clazz}. {@code T} should be a sub interface of {@link Function}, which accepts a {@link Row}. @param clazz The desired selector class to cast to, e.g. TopicSelector.class for Kafka. @param <T> An interface
createFirstColumnTopicSelector
java
apache/flink
flink-python/src/main/java/org/apache/flink/python/util/PythonConnectorUtils.java
https://github.com/apache/flink/blob/master/flink-python/src/main/java/org/apache/flink/python/util/PythonConnectorUtils.java
Apache-2.0
@Override public Object invoke(Object proxy, Method method, Object[] args) throws Throwable { Preconditions.checkArgument(method.getName().equals("apply")); Preconditions.checkArgument(args.length == 1); Preconditions.checkArgument(args[0] instanceof Row); Row row = (Row) args[0]; Preconditions.checkArgument(row.getArity() >= 1); return row.getField(0); }
The serializable {@link InvocationHandler} as the proxy for first column selector.
invoke
java
apache/flink
flink-python/src/main/java/org/apache/flink/python/util/PythonConnectorUtils.java
https://github.com/apache/flink/blob/master/flink-python/src/main/java/org/apache/flink/python/util/PythonConnectorUtils.java
Apache-2.0
private void addPythonFile(Configuration pythonDependencyConfig, String filePath) { Preconditions.checkNotNull(filePath); String fileKey = generateUniqueFileKey(PYTHON_FILE_PREFIX, filePath); registerCachedFileIfNotExist(fileKey, filePath); if (!pythonDependencyConfig.contains(PYTHON_FILES_DISTRIBUTED_CACHE_INFO)) { pythonDependencyConfig.set( PYTHON_FILES_DISTRIBUTED_CACHE_INFO, new LinkedHashMap<>()); } pythonDependencyConfig .get(PYTHON_FILES_DISTRIBUTED_CACHE_INFO) .put(fileKey, new File(filePath).getName()); }
Adds a Python dependency which could be .py files, Python packages(.zip, .egg etc.) or local directories. The dependencies will be added to the PYTHONPATH of the Python UDF worker and the local Py4J python client. @param filePath The path of the Python dependency.
addPythonFile
java
apache/flink
flink-python/src/main/java/org/apache/flink/python/util/PythonDependencyUtils.java
https://github.com/apache/flink/blob/master/flink-python/src/main/java/org/apache/flink/python/util/PythonDependencyUtils.java
Apache-2.0
private void setPythonRequirements( Configuration pythonDependencyConfig, String requirementsFilePath) { setPythonRequirements(pythonDependencyConfig, requirementsFilePath, null); }
Specifies the third-party dependencies via a requirements file. These dependencies will be installed by the command "pip install -r [requirements file]" before launching the Python UDF worker. @param requirementsFilePath The path of the requirements file.
setPythonRequirements
java
apache/flink
flink-python/src/main/java/org/apache/flink/python/util/PythonDependencyUtils.java
https://github.com/apache/flink/blob/master/flink-python/src/main/java/org/apache/flink/python/util/PythonDependencyUtils.java
Apache-2.0
private void setPythonRequirements( Configuration pythonDependencyConfig, String requirementsFilePath, @Nullable String requirementsCachedDir) { Preconditions.checkNotNull(requirementsFilePath); if (!pythonDependencyConfig.contains(PYTHON_REQUIREMENTS_FILE_DISTRIBUTED_CACHE_INFO)) { pythonDependencyConfig.set( PYTHON_REQUIREMENTS_FILE_DISTRIBUTED_CACHE_INFO, new HashMap<>()); } pythonDependencyConfig.get(PYTHON_REQUIREMENTS_FILE_DISTRIBUTED_CACHE_INFO).clear(); removeCachedFilesByPrefix(PYTHON_REQUIREMENTS_FILE_PREFIX); removeCachedFilesByPrefix(PYTHON_REQUIREMENTS_CACHE_PREFIX); String fileKey = generateUniqueFileKey(PYTHON_REQUIREMENTS_FILE_PREFIX, requirementsFilePath); registerCachedFileIfNotExist(fileKey, requirementsFilePath); pythonDependencyConfig .get(PYTHON_REQUIREMENTS_FILE_DISTRIBUTED_CACHE_INFO) .put(FILE, fileKey); if (requirementsCachedDir != null) { String cacheDirKey = generateUniqueFileKey( PYTHON_REQUIREMENTS_CACHE_PREFIX, requirementsCachedDir); registerCachedFileIfNotExist(cacheDirKey, requirementsCachedDir); pythonDependencyConfig .get(PYTHON_REQUIREMENTS_FILE_DISTRIBUTED_CACHE_INFO) .put(CACHE, cacheDirKey); } }
Specifies the third-party dependencies via a requirements file. The `requirementsCachedDir` will be uploaded to support offline installation. These dependencies will be installed by the command "pip install -r [requirements file] --find-links [requirements cached dir]" before launching the Python UDF worker. @param requirementsFilePath The path of the requirements file. @param requirementsCachedDir The path of the requirements cached directory.
setPythonRequirements
java
apache/flink
flink-python/src/main/java/org/apache/flink/python/util/PythonDependencyUtils.java
https://github.com/apache/flink/blob/master/flink-python/src/main/java/org/apache/flink/python/util/PythonDependencyUtils.java
Apache-2.0
@Override public Row getKey(Row value) { Object realKey = value.getField(0); Row wrapper = new Row(1); wrapper.setField(0, realKey); return wrapper; }
{@link KeyByKeySelector} is responsible for extracting the first field of the input row as key. The input row is generated by python DataStream map function in the format of (key_selector.get_key(value), value) tuple2.
getKey
java
apache/flink
flink-python/src/main/java/org/apache/flink/streaming/api/functions/python/KeyByKeySelector.java
https://github.com/apache/flink/blob/master/flink-python/src/main/java/org/apache/flink/streaming/api/functions/python/KeyByKeySelector.java
Apache-2.0
@Override public Integer getKey(Row value) throws Exception { return (Integer) value.getField(0); }
The {@link PartitionCustomKeySelector} will return the first field of the input row value. The value of the first field is the desired partition index which is computed according to user defined partitioner and keySelector function.
getKey
java
apache/flink
flink-python/src/main/java/org/apache/flink/streaming/api/functions/python/PartitionCustomKeySelector.java
https://github.com/apache/flink/blob/master/flink-python/src/main/java/org/apache/flink/streaming/api/functions/python/PartitionCustomKeySelector.java
Apache-2.0
@Override public long extractTimestamp(Tuple2<T, Long> element, long recordTimestamp) { return element.f1; }
TimestampAssigner which extracts timestamp from the second field of the input element.
extractTimestamp
java
apache/flink
flink-python/src/main/java/org/apache/flink/streaming/api/functions/python/eventtime/CustomTimestampAssigner.java
https://github.com/apache/flink/blob/master/flink-python/src/main/java/org/apache/flink/streaming/api/functions/python/eventtime/CustomTimestampAssigner.java
Apache-2.0
@Override public T map(Tuple2<T, Long> value) throws Exception { return value.f0; }
MapFunction which removes the timestamp field from the input element.
map
java
apache/flink
flink-python/src/main/java/org/apache/flink/streaming/api/functions/python/eventtime/RemoveTimestampMapFunction.java
https://github.com/apache/flink/blob/master/flink-python/src/main/java/org/apache/flink/streaming/api/functions/python/eventtime/RemoveTimestampMapFunction.java
Apache-2.0
public boolean isBundleFinished() { return elementCount == 0; }
Returns whether the bundle is finished.
isBundleFinished
java
apache/flink
flink-python/src/main/java/org/apache/flink/streaming/api/operators/python/AbstractPythonFunctionOperator.java
https://github.com/apache/flink/blob/master/flink-python/src/main/java/org/apache/flink/streaming/api/operators/python/AbstractPythonFunctionOperator.java
Apache-2.0
private void advanceWatermark(Watermark watermark) throws Exception { if (getTimeServiceManager().isPresent()) { InternalTimeServiceManager<?> timeServiceManager = getTimeServiceManager().get(); // make sure the registered timer are processed before advancing the watermark to // ensure the timers could be triggered drainUnregisteredTimers(); timeServiceManager.advanceWatermark(watermark); while (!isBundleFinished()) { invokeFinishBundle(); // make sure the registered timer are processed before advancing the watermark to // ensure the timers could be triggered drainUnregisteredTimers(); timeServiceManager.advanceWatermark(watermark); } } }
Advances the watermark of all managed timer services, potentially firing event time timers. It also ensures that the fired timers are processed in the Python user-defined functions.
advanceWatermark
java
apache/flink
flink-python/src/main/java/org/apache/flink/streaming/api/operators/python/AbstractPythonFunctionOperator.java
https://github.com/apache/flink/blob/master/flink-python/src/main/java/org/apache/flink/streaming/api/operators/python/AbstractPythonFunctionOperator.java
Apache-2.0
protected void checkInvokeFinishBundleByCount() throws Exception { if (elementCount >= maxBundleSize) { invokeFinishBundle(); } }
Checks whether to invoke finishBundle by elements count. Called in processElement.
checkInvokeFinishBundleByCount
java
apache/flink
flink-python/src/main/java/org/apache/flink/streaming/api/operators/python/AbstractPythonFunctionOperator.java
https://github.com/apache/flink/blob/master/flink-python/src/main/java/org/apache/flink/streaming/api/operators/python/AbstractPythonFunctionOperator.java
Apache-2.0
private void processTimer(TimeDomain timeDomain, InternalTimer<Row, VoidNamespace> timer) throws Exception { Row timerData = timerHandler.buildTimerData( timeDomain, internalTimerService.currentWatermark(), timer.getTimestamp(), timer.getKey(), null); timerDataSerializer.serialize(timerData, baosWrapper); pythonFunctionRunner.processTimer(baos.toByteArray()); baos.reset(); elementCount++; checkInvokeFinishBundleByCount(); emitResults(); }
It is responsible to send timer data to python worker when a registered timer is fired. The input data is a Row containing 4 fields: TimerFlag 0 for proc time, 1 for event time; Timestamp of the fired timer; Current watermark and the key of the timer. @param timeDomain The type of the timer. @param timer The fired timer. @throws Exception The runnerInputSerializer might throw exception.
processTimer
java
apache/flink
flink-python/src/main/java/org/apache/flink/streaming/api/operators/python/process/ExternalPythonKeyedCoProcessOperator.java
https://github.com/apache/flink/blob/master/flink-python/src/main/java/org/apache/flink/streaming/api/operators/python/process/ExternalPythonKeyedCoProcessOperator.java
Apache-2.0
private void processTimer(TimeDomain timeDomain, InternalTimer<Row, Object> timer) throws Exception { Object namespace = timer.getNamespace(); byte[] encodedNamespace; if (VoidNamespace.INSTANCE.equals(namespace)) { encodedNamespace = null; } else { namespaceSerializer.serialize(namespace, baosWrapper); encodedNamespace = baos.toByteArray(); baos.reset(); } Row timerData = timerHandler.buildTimerData( timeDomain, internalTimerService.currentWatermark(), timer.getTimestamp(), timer.getKey(), encodedNamespace); timerDataSerializer.serialize(timerData, baosWrapper); pythonFunctionRunner.processTimer(baos.toByteArray()); baos.reset(); elementCount++; checkInvokeFinishBundleByCount(); emitResults(); }
It is responsible to send timer data to python worker when a registered timer is fired. The input data is a Row containing 4 fields: TimerFlag 0 for proc time, 1 for event time; Timestamp of the fired timer; Current watermark and the key of the timer. @param timeDomain The type of the timer. @param timer The internal timer. @throws Exception The runnerInputSerializer might throw exception.
processTimer
java
apache/flink
flink-python/src/main/java/org/apache/flink/streaming/api/operators/python/process/ExternalPythonKeyedProcessOperator.java
https://github.com/apache/flink/blob/master/flink-python/src/main/java/org/apache/flink/streaming/api/operators/python/process/ExternalPythonKeyedProcessOperator.java
Apache-2.0
public static TimerOperandType valueOf(byte value) { if (mapping.containsKey(value)) { return mapping.get(value); } else { throw new IllegalArgumentException( String.format( "Value '%d' cannot be converted to TimerOperandType.", value)); } }
The flag for indicating the timer operation type.
valueOf
java
apache/flink
flink-python/src/main/java/org/apache/flink/streaming/api/operators/python/process/timer/TimerRegistration.java
https://github.com/apache/flink/blob/master/flink-python/src/main/java/org/apache/flink/streaming/api/operators/python/process/timer/TimerRegistration.java
Apache-2.0
private RunnerApi.Environment createPythonExecutionEnvironment( ReadableConfig config, long memoryLimitBytes) throws Exception { PythonEnvironment environment = environmentManager.createEnvironment(); if (environment instanceof ProcessPythonEnvironment) { ProcessPythonEnvironment processEnvironment = (ProcessPythonEnvironment) environment; Map<String, String> env = processEnvironment.getEnv(); config.getOptional(PythonOptions.PYTHON_JOB_OPTIONS).ifPresent(env::putAll); env.put(PYTHON_WORKER_MEMORY_LIMIT, String.valueOf(memoryLimitBytes)); return Environments.createProcessEnvironment( "", "", processEnvironment.getCommand(), env); } throw new RuntimeException("Currently only ProcessPythonEnvironment is supported."); }
Creates a specification which specifies the portability Python execution environment. It's used by Beam's portability framework to creates the actual Python execution environment.
createPythonExecutionEnvironment
java
apache/flink
flink-python/src/main/java/org/apache/flink/streaming/api/runners/python/beam/BeamPythonFunctionRunner.java
https://github.com/apache/flink/blob/master/flink-python/src/main/java/org/apache/flink/streaming/api/runners/python/beam/BeamPythonFunctionRunner.java
Apache-2.0
private StageBundleFactory createStageBundleFactory( JobBundleFactory jobBundleFactory, RunnerApi.Environment environment) throws Exception { try (TemporaryClassLoaderContext ignored = TemporaryClassLoaderContext.of(getClass().getClassLoader())) { // It loads classes using service loader under context classloader in Beam, // make sure the classloader used to load SPI classes is the same as the class // loader of the current class. return jobBundleFactory.forStage(createExecutableStage(environment)); } catch (Throwable e) { throw new RuntimeException(environmentManager.getBootLog(), e); } }
To make the error messages more user friendly, throws an exception with the boot logs.
createStageBundleFactory
java
apache/flink
flink-python/src/main/java/org/apache/flink/streaming/api/runners/python/beam/BeamPythonFunctionRunner.java
https://github.com/apache/flink/blob/master/flink-python/src/main/java/org/apache/flink/streaming/api/runners/python/beam/BeamPythonFunctionRunner.java
Apache-2.0
public BeamFnApi.StateResponse.Builder handle(BeamFnApi.StateRequest request, S state) throws Exception { switch (request.getRequestCase()) { case GET: return handleGet(request, state); case APPEND: return handleAppend(request, state); case CLEAR: return handleClear(request, state); default: throw new RuntimeException( String.format( "Unsupported request type %s for user state.", request.getRequestCase())); } }
Abstract class extends {@link BeamStateHandler}, which implements the common handle logic.
handle
java
apache/flink
flink-python/src/main/java/org/apache/flink/streaming/api/runners/python/beam/state/AbstractBeamStateHandler.java
https://github.com/apache/flink/blob/master/flink-python/src/main/java/org/apache/flink/streaming/api/runners/python/beam/state/AbstractBeamStateHandler.java
Apache-2.0
@Override public ListState<byte[]> getListState(BeamFnApi.StateRequest request) throws Exception { throw new RuntimeException("Operator list state is still not supported"); }
Currently list state and union-list state is not supported.
getListState
java
apache/flink
flink-python/src/main/java/org/apache/flink/streaming/api/runners/python/beam/state/BeamOperatorStateStore.java
https://github.com/apache/flink/blob/master/flink-python/src/main/java/org/apache/flink/streaming/api/runners/python/beam/state/BeamOperatorStateStore.java
Apache-2.0
public static BeamStateRequestHandler of( @Nullable KeyedStateBackend<?> keyedStateBackend, @Nullable OperatorStateBackend operatorStateBackend, @Nullable TypeSerializer<?> keySerializer, @Nullable TypeSerializer<?> namespaceSerializer, ReadableConfig config) { BeamStateStore keyedStateStore = BeamStateStore.unsupported(); if (keyedStateBackend != null) { assert keySerializer != null; keyedStateStore = new BeamKeyedStateStore(keyedStateBackend, keySerializer, namespaceSerializer); } BeamStateStore operatorStateStore = BeamStateStore.unsupported(); if (operatorStateBackend != null) { operatorStateStore = new BeamOperatorStateStore(operatorStateBackend); } BeamStateHandler<ListState<byte[]>> bagStateBeamStateHandler = new BeamBagStateHandler(namespaceSerializer); BeamStateHandler<MapState<ByteArrayWrapper, byte[]>> mapStateBeamStateHandler = new BeamMapStateHandler(config); return new BeamStateRequestHandler( keyedStateStore, operatorStateStore, bagStateBeamStateHandler, mapStateBeamStateHandler); }
Create a {@link BeamStateRequestHandler}. @param keyedStateBackend if null, {@link BeamStateRequestHandler} would throw an error when receive keyed-state requests. @param operatorStateBackend if null, {@link BeamStateRequestHandler} would throw an error when receive operator-state requests. @param keySerializer key serializer for {@link KeyedStateBackend}, must not be null if {@code keyedStatedBackend} is not null. @param namespaceSerializer namespace serializer for {@link KeyedStateBackend}, could be null when there's no window logic involved. @param config state-related configurations @return A new {@link BeamBagStateHandler}
of
java
apache/flink
flink-python/src/main/java/org/apache/flink/streaming/api/runners/python/beam/state/BeamStateRequestHandler.java
https://github.com/apache/flink/blob/master/flink-python/src/main/java/org/apache/flink/streaming/api/runners/python/beam/state/BeamStateRequestHandler.java
Apache-2.0
public static <K> void setCurrentKeyForStreaming( KeyedStateBackend<K> stateBackend, K currentKey) { if (!inBatchExecutionMode(stateBackend)) { stateBackend.setCurrentKey(currentKey); } }
Set the current key for streaming operator.
setCurrentKeyForStreaming
java
apache/flink
flink-python/src/main/java/org/apache/flink/streaming/api/utils/PythonOperatorUtils.java
https://github.com/apache/flink/blob/master/flink-python/src/main/java/org/apache/flink/streaming/api/utils/PythonOperatorUtils.java
Apache-2.0
@SuppressWarnings("unchecked") public static <T> TypeSerializer<T> typeInfoSerializerConverter( TypeInformation<T> typeInformation) { TypeSerializer<T> typeSerializer = typeInfoToSerializerMap.get(typeInformation.getTypeClass()); if (typeSerializer != null) { return typeSerializer; } else { if (typeInformation instanceof PickledByteArrayTypeInfo) { return (TypeSerializer<T>) BytePrimitiveArraySerializer.INSTANCE; } if (typeInformation instanceof RowTypeInfo) { RowTypeInfo rowTypeInfo = (RowTypeInfo) typeInformation; TypeSerializer<?>[] fieldTypeSerializers = Arrays.stream(rowTypeInfo.getFieldTypes()) .map(f -> typeInfoSerializerConverter(f)) .toArray(TypeSerializer[]::new); return (TypeSerializer<T>) new RowSerializer(fieldTypeSerializers); } if (typeInformation instanceof TupleTypeInfo) { TupleTypeInfo<?> tupleTypeInfo = (TupleTypeInfo<?>) typeInformation; TypeInformation<?>[] typeInformations = new TypeInformation[tupleTypeInfo.getArity()]; for (int idx = 0; idx < tupleTypeInfo.getArity(); idx++) { typeInformations[idx] = tupleTypeInfo.getTypeAt(idx); } TypeSerializer<?>[] fieldTypeSerializers = Arrays.stream(typeInformations) .map(TypeInfoToSerializerConverter::typeInfoSerializerConverter) .toArray(TypeSerializer[]::new); return (TypeSerializer<T>) new TupleSerializer<>( Tuple.getTupleClass(tupleTypeInfo.getArity()), fieldTypeSerializers); } if (typeInformation instanceof BasicArrayTypeInfo) { BasicArrayTypeInfo<?, ?> basicArrayTypeInfo = (BasicArrayTypeInfo<?, ?>) typeInformation; return (TypeSerializer<T>) new GenericArraySerializer( basicArrayTypeInfo.getComponentTypeClass(), typeInfoSerializerConverter( basicArrayTypeInfo.getComponentInfo())); } if (typeInformation instanceof ObjectArrayTypeInfo) { ObjectArrayTypeInfo<?, ?> objectArrayTypeInfo = (ObjectArrayTypeInfo<?, ?>) typeInformation; return (TypeSerializer<T>) new GenericArraySerializer( objectArrayTypeInfo.getComponentInfo().getTypeClass(), typeInfoSerializerConverter( objectArrayTypeInfo.getComponentInfo())); } if (typeInformation instanceof MapTypeInfo) { return (TypeSerializer<T>) new MapSerializer<>( typeInfoSerializerConverter( ((MapTypeInfo<?, ?>) typeInformation).getKeyTypeInfo()), typeInfoSerializerConverter( ((MapTypeInfo<?, ?>) typeInformation) .getValueTypeInfo())); } if (typeInformation instanceof ListTypeInfo) { return (TypeSerializer<T>) new ListSerializer<>( typeInfoSerializerConverter( ((ListTypeInfo<?>) typeInformation) .getElementTypeInfo())); } if (typeInformation instanceof ExternalTypeInfo) { return (TypeSerializer<T>) typeInfoSerializerConverter( LegacyTypeInfoDataTypeConverter.toLegacyTypeInfo( ((ExternalTypeInfo<?>) typeInformation).getDataType())); } if (typeInformation instanceof InternalTypeInfo) { InternalTypeInfo<?> internalTypeInfo = (InternalTypeInfo<?>) typeInformation; return org.apache.flink.table.runtime.typeutils.PythonTypeUtils .toInternalSerializer(internalTypeInfo.toLogicalType()); } if (typeInformation .getClass() .getCanonicalName() .equals( "org.apache.flink.formats.avro.typeutils.GenericRecordAvroTypeInfo")) { return typeInformation.createSerializer(new SerializerConfigImpl()); } } throw new UnsupportedOperationException( String.format( "Could not find type serializer for current type [%s].", typeInformation.toString())); }
Get serializers according to the given typeInformation.
typeInfoSerializerConverter
java
apache/flink
flink-python/src/main/java/org/apache/flink/streaming/api/utils/PythonTypeUtils.java
https://github.com/apache/flink/blob/master/flink-python/src/main/java/org/apache/flink/streaming/api/utils/PythonTypeUtils.java
Apache-2.0
public RowData read(int rowId) { reuseRow.setRowId(rowId); return reuseRow; }
Read the specified row from underlying Arrow format data.
read
java
apache/flink
flink-python/src/main/java/org/apache/flink/table/runtime/arrow/ArrowReader.java
https://github.com/apache/flink/blob/master/flink-python/src/main/java/org/apache/flink/table/runtime/arrow/ArrowReader.java
Apache-2.0
private static void readFully(ReadableByteChannel channel, ByteBuffer dst) throws IOException { int expected = dst.remaining(); while (dst.hasRemaining()) { if (channel.read(dst) < 0) { throw new EOFException( String.format("Not enough bytes in channel (expected %d).", expected)); } } }
Fills a buffer with data read from the channel.
readFully
java
apache/flink
flink-python/src/main/java/org/apache/flink/table/runtime/arrow/ArrowUtils.java
https://github.com/apache/flink/blob/master/flink-python/src/main/java/org/apache/flink/table/runtime/arrow/ArrowUtils.java
Apache-2.0
public void write(IN row) { for (int i = 0; i < fieldWriters.length; i++) { fieldWriters[i].write(row, i); } }
Writes the specified row which is serialized into Arrow format.
write
java
apache/flink
flink-python/src/main/java/org/apache/flink/table/runtime/arrow/ArrowWriter.java
https://github.com/apache/flink/blob/master/flink-python/src/main/java/org/apache/flink/table/runtime/arrow/ArrowWriter.java
Apache-2.0
public void finish() { root.setRowCount(fieldWriters[0].getCount()); for (ArrowFieldWriter<IN> fieldWriter : fieldWriters) { fieldWriter.finish(); } }
Finishes the writing of the current row batch.
finish
java
apache/flink
flink-python/src/main/java/org/apache/flink/table/runtime/arrow/ArrowWriter.java
https://github.com/apache/flink/blob/master/flink-python/src/main/java/org/apache/flink/table/runtime/arrow/ArrowWriter.java
Apache-2.0
public void reset() { root.setRowCount(0); for (ArrowFieldWriter fieldWriter : fieldWriters) { fieldWriter.reset(); } }
Resets the state of the writer to write the next batch of rows.
reset
java
apache/flink
flink-python/src/main/java/org/apache/flink/table/runtime/arrow/ArrowWriter.java
https://github.com/apache/flink/blob/master/flink-python/src/main/java/org/apache/flink/table/runtime/arrow/ArrowWriter.java
Apache-2.0
private ArrowRecordBatch loadBatch(int nextIndexOfArrowDataToProcess) throws IOException { ByteArrayInputStream bais = new ByteArrayInputStream(arrowData[nextIndexOfArrowDataToProcess]); return MessageSerializer.deserializeRecordBatch( new ReadChannel(Channels.newChannel(bais)), allocator); }
Load the specified batch of data to process.
loadBatch
java
apache/flink
flink-python/src/main/java/org/apache/flink/table/runtime/arrow/sources/ArrowSourceFunction.java
https://github.com/apache/flink/blob/master/flink-python/src/main/java/org/apache/flink/table/runtime/arrow/sources/ArrowSourceFunction.java
Apache-2.0
public ValueVector getValueVector() { return valueVector; }
Returns the underlying container which stores the sequence of values of a column.
getValueVector
java
apache/flink
flink-python/src/main/java/org/apache/flink/table/runtime/arrow/writers/ArrowFieldWriter.java
https://github.com/apache/flink/blob/master/flink-python/src/main/java/org/apache/flink/table/runtime/arrow/writers/ArrowFieldWriter.java
Apache-2.0
public int getCount() { return count; }
Returns the current count of elements written.
getCount
java
apache/flink
flink-python/src/main/java/org/apache/flink/table/runtime/arrow/writers/ArrowFieldWriter.java
https://github.com/apache/flink/blob/master/flink-python/src/main/java/org/apache/flink/table/runtime/arrow/writers/ArrowFieldWriter.java
Apache-2.0
public void write(IN row, int ordinal) { doWrite(row, ordinal); count += 1; }
Writes the specified ordinal of the specified row.
write
java
apache/flink
flink-python/src/main/java/org/apache/flink/table/runtime/arrow/writers/ArrowFieldWriter.java
https://github.com/apache/flink/blob/master/flink-python/src/main/java/org/apache/flink/table/runtime/arrow/writers/ArrowFieldWriter.java
Apache-2.0
public void reset() { valueVector.reset(); count = 0; }
Resets the state of the writer to write the next batch of fields.
reset
java
apache/flink
flink-python/src/main/java/org/apache/flink/table/runtime/arrow/writers/ArrowFieldWriter.java
https://github.com/apache/flink/blob/master/flink-python/src/main/java/org/apache/flink/table/runtime/arrow/writers/ArrowFieldWriter.java
Apache-2.0
@Override public void endInput() throws Exception { invokeFinishBundle(); }
Base class for all one input stream operators to execute Python functions.
endInput
java
apache/flink
flink-python/src/main/java/org/apache/flink/table/runtime/operators/python/AbstractOneInputPythonFunctionOperator.java
https://github.com/apache/flink/blob/master/flink-python/src/main/java/org/apache/flink/table/runtime/operators/python/AbstractOneInputPythonFunctionOperator.java
Apache-2.0
@Override public void open() throws Exception { // The structure is: [type]|[normal record]|[timestamp of timer]|[row key] // If the type is 'NORMAL_RECORD', store the RowData object in the 2nd column. // If the type is 'TRIGGER_TIMER', store the timestamp in 3rd column and the row key reuseRowData = new UpdatableRowData(GenericRowData.of(NORMAL_RECORD, null, null, null), 4); reuseTimerRowData = new UpdatableRowData(GenericRowData.of(TRIGGER_TIMER, null, null, null), 4); timerService = new SimpleTimerService( getInternalTimerService( "state-clean-timer", VoidNamespaceSerializer.INSTANCE, this)); initCleanupTimeState(); super.open(); }
Indicates whether state cleaning is enabled. Can be calculated from the `minRetentionTime`.
open
java
apache/flink
flink-python/src/main/java/org/apache/flink/table/runtime/operators/python/aggregate/AbstractPythonStreamGroupAggregateOperator.java
https://github.com/apache/flink/blob/master/flink-python/src/main/java/org/apache/flink/table/runtime/operators/python/aggregate/AbstractPythonStreamGroupAggregateOperator.java
Apache-2.0
private boolean isFinishResult(byte[] rawUdtfResult, int length) { return length == 1 && rawUdtfResult[0] == 0x00; }
The received udtf execution result is a finish message when it is a byte with value 0x00.
isFinishResult
java
apache/flink
flink-python/src/main/java/org/apache/flink/table/runtime/operators/python/table/PythonTableFunctionOperator.java
https://github.com/apache/flink/blob/master/flink-python/src/main/java/org/apache/flink/table/runtime/operators/python/table/PythonTableFunctionOperator.java
Apache-2.0
public static Table createTableFromElement( TableEnvironment tEnv, String filePath, DataType schema, boolean batched) { TableDescriptor.Builder builder = TableDescriptor.forConnector(PythonDynamicTableFactory.IDENTIFIER) .option(PythonDynamicTableOptions.INPUT_FILE_PATH, filePath) .option(PythonDynamicTableOptions.BATCH_MODE, batched) .schema(Schema.newBuilder().fromRowDataType(schema).build()); return tEnv.from(builder.build()); }
Create a table from {@link PythonDynamicTableSource} that read data from input file with specific {@link DataType}. @param tEnv The TableEnvironment to create table. @param filePath the file path of the input data. @param schema The python data type. @param batched Whether to read data in a batch @return Table with InputFormat.
createTableFromElement
java
apache/flink
flink-python/src/main/java/org/apache/flink/table/utils/python/PythonTableUtils.java
https://github.com/apache/flink/blob/master/flink-python/src/main/java/org/apache/flink/table/utils/python/PythonTableUtils.java
Apache-2.0
@Test void testJobName() { String jobName = "MyTestJob"; Configuration config = new Configuration(); config.set(PipelineOptions.NAME, jobName); StreamExecutionEnvironment env = StreamExecutionEnvironment.getExecutionEnvironment(config); env.fromData(Collections.singletonList("test")).sinkTo(new DiscardingSink<>()); StreamGraph streamGraph = env.getStreamGraph(true); assertThat(streamGraph.getJobName()).isEqualTo(jobName); }
A test class to test PythonConfigUtil getting executionEnvironment correctly.
testJobName
java
apache/flink
flink-python/src/test/java/org/apache/flink/python/util/PythonConfigUtilTest.java
https://github.com/apache/flink/blob/master/flink-python/src/test/java/org/apache/flink/python/util/PythonConfigUtilTest.java
Apache-2.0
@Test void testParseStateTtlConfigFromProto() { FlinkFnApi.StateDescriptor.StateTTLConfig.CleanupStrategies cleanupStrategiesProto = FlinkFnApi.StateDescriptor.StateTTLConfig.CleanupStrategies.newBuilder() .setIsCleanupInBackground(true) .addStrategies( FlinkFnApi.StateDescriptor.StateTTLConfig.CleanupStrategies .MapStrategiesEntry.newBuilder() .setStrategy( FlinkFnApi.StateDescriptor.StateTTLConfig .CleanupStrategies.Strategies .FULL_STATE_SCAN_SNAPSHOT) .setEmptyStrategy( FlinkFnApi.StateDescriptor.StateTTLConfig .CleanupStrategies.EmptyCleanupStrategy .EMPTY_STRATEGY)) .addStrategies( FlinkFnApi.StateDescriptor.StateTTLConfig.CleanupStrategies .MapStrategiesEntry.newBuilder() .setStrategy( FlinkFnApi.StateDescriptor.StateTTLConfig .CleanupStrategies.Strategies .INCREMENTAL_CLEANUP) .setIncrementalCleanupStrategy( FlinkFnApi.StateDescriptor.StateTTLConfig .CleanupStrategies .IncrementalCleanupStrategy.newBuilder() .setCleanupSize(10) .setRunCleanupForEveryRecord(true) .build())) .addStrategies( FlinkFnApi.StateDescriptor.StateTTLConfig.CleanupStrategies .MapStrategiesEntry.newBuilder() .setStrategy( FlinkFnApi.StateDescriptor.StateTTLConfig .CleanupStrategies.Strategies .ROCKSDB_COMPACTION_FILTER) .setRocksdbCompactFilterCleanupStrategy( FlinkFnApi.StateDescriptor.StateTTLConfig .CleanupStrategies .RocksdbCompactFilterCleanupStrategy .newBuilder() .setQueryTimeAfterNumEntries(1000) .build())) .build(); FlinkFnApi.StateDescriptor.StateTTLConfig stateTTLConfigProto = FlinkFnApi.StateDescriptor.StateTTLConfig.newBuilder() .setTtl(1000) .setUpdateType( FlinkFnApi.StateDescriptor.StateTTLConfig.UpdateType .OnCreateAndWrite) .setStateVisibility( FlinkFnApi.StateDescriptor.StateTTLConfig.StateVisibility .NeverReturnExpired) .setCleanupStrategies(cleanupStrategiesProto) .build(); StateTtlConfig stateTTLConfig = ProtoUtils.parseStateTtlConfigFromProto(stateTTLConfigProto); assertThat(stateTTLConfig.getUpdateType()) .isEqualTo(StateTtlConfig.UpdateType.OnCreateAndWrite); assertThat(stateTTLConfig.getStateVisibility()) .isEqualTo(StateTtlConfig.StateVisibility.NeverReturnExpired); assertThat(stateTTLConfig.getTimeToLive()).isEqualTo(Duration.ofMillis(1000)); assertThat(stateTTLConfig.getTtlTimeCharacteristic()) .isEqualTo(StateTtlConfig.TtlTimeCharacteristic.ProcessingTime); StateTtlConfig.CleanupStrategies cleanupStrategies = stateTTLConfig.getCleanupStrategies(); assertThat(cleanupStrategies.isCleanupInBackground()).isTrue(); assertThat(cleanupStrategies.inFullSnapshot()).isTrue(); StateTtlConfig.IncrementalCleanupStrategy incrementalCleanupStrategy = cleanupStrategies.getIncrementalCleanupStrategy(); assertThat(incrementalCleanupStrategy).isNotNull(); assertThat(incrementalCleanupStrategy.getCleanupSize()).isEqualTo(10); assertThat(incrementalCleanupStrategy.runCleanupForEveryRecord()).isTrue(); StateTtlConfig.RocksdbCompactFilterCleanupStrategy rocksdbCompactFilterCleanupStrategy = cleanupStrategies.getRocksdbCompactFilterCleanupStrategy(); assertThat(rocksdbCompactFilterCleanupStrategy).isNotNull(); assertThat(rocksdbCompactFilterCleanupStrategy.getQueryTimeAfterNumEntries()) .isEqualTo(1000); assertThat(rocksdbCompactFilterCleanupStrategy.getPeriodicCompactionTime()) .isEqualTo(Duration.ofDays(30)); }
Test class for testing utilities used to construct protobuf objects or construct objects from protobuf objects.
testParseStateTtlConfigFromProto
java
apache/flink
flink-python/src/test/java/org/apache/flink/streaming/api/utils/ProtoUtilsTest.java
https://github.com/apache/flink/blob/master/flink-python/src/test/java/org/apache/flink/streaming/api/utils/ProtoUtilsTest.java
Apache-2.0
@Test void testBasicFunctionality() { try { ByteArrayOutputStream baos = new ByteArrayOutputStream(); Tuple2<ArrowWriter<T>, ArrowStreamWriter> tuple2 = createArrowWriter(baos); ArrowWriter<T> arrowWriter = tuple2.f0; ArrowStreamWriter arrowStreamWriter = tuple2.f1; T[] testData = getTestData(); for (T value : testData) { arrowWriter.write(value); } arrowWriter.finish(); arrowStreamWriter.writeBatch(); ArrowReader arrowReader = createArrowReader(new ByteArrayInputStream(baos.toByteArray())); for (int i = 0; i < testData.length; i++) { RowData deserialized = arrowReader.read(i); assertThat(deserialized) .as("Deserialized value is wrong.") .matches( CustomEqualityMatcher.deeplyEquals(testData[i]) .withChecker(checker)); } } catch (Exception e) { System.err.println(e.getMessage()); e.printStackTrace(); fail("Exception in test: " + e.getMessage()); } }
Abstract test base for {@link ArrowReader} and {@link ArrowWriter}. @param <T> the elment type.
testBasicFunctionality
java
apache/flink
flink-python/src/test/java/org/apache/flink/table/runtime/arrow/ArrowReaderWriterTestBase.java
https://github.com/apache/flink/blob/master/flink-python/src/test/java/org/apache/flink/table/runtime/arrow/ArrowReaderWriterTestBase.java
Apache-2.0
public OneInputStreamOperatorTestHarness<RowData, RowData> getTestHarness(Configuration config) throws Exception { RowType inputType = getInputType(); RowType outputType = getOutputType(); AbstractArrowPythonAggregateFunctionOperator operator = getTestOperator( config, new PythonFunctionInfo[] { new PythonFunctionInfo( PythonScalarFunctionOperatorTestBase.DummyPythonFunction .INSTANCE, new Integer[] {0}) }, inputType, outputType, new int[] {0}, new int[] {2}); OneInputStreamOperatorTestHarness<RowData, RowData> testHarness = new OneInputStreamOperatorTestHarness<>(operator); testHarness .getStreamConfig() .setManagedMemoryFractionOperatorOfUseCase(ManagedMemoryUseCase.PYTHON, 0.5); testHarness.setup(new RowDataSerializer(outputType)); return testHarness; }
Base class for Batch Arrow Python aggregate function operator tests.
getTestHarness
java
apache/flink
flink-python/src/test/java/org/apache/flink/table/runtime/operators/python/aggregate/arrow/batch/AbstractBatchArrowPythonAggregateFunctionOperatorTest.java
https://github.com/apache/flink/blob/master/flink-python/src/test/java/org/apache/flink/table/runtime/operators/python/aggregate/arrow/batch/AbstractBatchArrowPythonAggregateFunctionOperatorTest.java
Apache-2.0
public OneInputStreamOperatorTestHarness<RowData, RowData> getTestHarness(Configuration config) throws Exception { RowType inputType = getInputType(); RowType outputType = getOutputType(); AbstractArrowPythonAggregateFunctionOperator operator = getTestOperator( config, new PythonFunctionInfo[] { new PythonFunctionInfo( PythonScalarFunctionOperatorTestBase.DummyPythonFunction .INSTANCE, new Integer[] {0}) }, inputType, outputType, new int[] {0}, new int[] {2}); int[] grouping = new int[] {0}; RowDataKeySelector keySelector = KeySelectorUtil.getRowDataSelector( Thread.currentThread().getContextClassLoader(), grouping, InternalTypeInfo.of(getInputType())); OneInputStreamOperatorTestHarness<RowData, RowData> testHarness = new KeyedOneInputStreamOperatorTestHarness<>( operator, keySelector, keySelector.getProducedType()); testHarness .getStreamConfig() .setManagedMemoryFractionOperatorOfUseCase(ManagedMemoryUseCase.PYTHON, 0.5); testHarness.setup(new RowDataSerializer(outputType)); return testHarness; }
Base class for Stream Arrow Python aggregate function operator tests.
getTestHarness
java
apache/flink
flink-python/src/test/java/org/apache/flink/table/runtime/operators/python/aggregate/arrow/stream/AbstractStreamArrowPythonAggregateFunctionOperatorTest.java
https://github.com/apache/flink/blob/master/flink-python/src/test/java/org/apache/flink/table/runtime/operators/python/aggregate/arrow/stream/AbstractStreamArrowPythonAggregateFunctionOperatorTest.java
Apache-2.0
public CompletableFuture<?> shutdownAndHandle() { return client.shutdown(); }
Shuts down the client and returns a {@link CompletableFuture} that will be completed when the shutdown process is completed. <p>If an exception is thrown for any reason, then the returned future will be completed exceptionally with that exception. @return A {@link CompletableFuture} for further handling of the shutdown result.
shutdownAndHandle
java
apache/flink
flink-queryable-state/flink-queryable-state-client-java/src/main/java/org/apache/flink/queryablestate/client/QueryableStateClient.java
https://github.com/apache/flink/blob/master/flink-queryable-state/flink-queryable-state-client-java/src/main/java/org/apache/flink/queryablestate/client/QueryableStateClient.java
Apache-2.0
public void shutdownAndWait() { try { client.shutdown().get(); LOG.info("The Queryable State Client was shutdown successfully."); } catch (Exception e) { LOG.warn("The Queryable State Client shutdown failed: ", e); } }
Shuts down the client and waits until shutdown is completed. <p>If an exception is thrown, a warning is logged containing the exception message.
shutdownAndWait
java
apache/flink
flink-queryable-state/flink-queryable-state-client-java/src/main/java/org/apache/flink/queryablestate/client/QueryableStateClient.java
https://github.com/apache/flink/blob/master/flink-queryable-state/flink-queryable-state-client-java/src/main/java/org/apache/flink/queryablestate/client/QueryableStateClient.java
Apache-2.0
public ExecutionConfig setExecutionConfig(ExecutionConfig config) { ExecutionConfig prev = executionConfig; this.executionConfig = config; return prev; }
Replaces the existing {@link ExecutionConfig} (possibly {@code null}), with the provided one. @param config The new {@code configuration}. @return The old configuration, or {@code null} if none was specified.
setExecutionConfig
java
apache/flink
flink-queryable-state/flink-queryable-state-client-java/src/main/java/org/apache/flink/queryablestate/client/QueryableStateClient.java
https://github.com/apache/flink/blob/master/flink-queryable-state/flink-queryable-state-client-java/src/main/java/org/apache/flink/queryablestate/client/QueryableStateClient.java
Apache-2.0
public ClassLoader setUserClassLoader(ClassLoader userClassLoader) { ClassLoader prev = this.userClassLoader; this.userClassLoader = userClassLoader; return prev; }
* Replaces the existing {@link ClassLoader} (possibly {@code null}), with the provided one. @param userClassLoader The new {@code userClassLoader}. @return The old classloader, or {@code null} if none was specified.
setUserClassLoader
java
apache/flink
flink-queryable-state/flink-queryable-state-client-java/src/main/java/org/apache/flink/queryablestate/client/QueryableStateClient.java
https://github.com/apache/flink/blob/master/flink-queryable-state/flink-queryable-state-client-java/src/main/java/org/apache/flink/queryablestate/client/QueryableStateClient.java
Apache-2.0
@PublicEvolving public <K, S extends State, V> CompletableFuture<S> getKvState( final JobID jobId, final String queryableStateName, final K key, final TypeHint<K> keyTypeHint, final StateDescriptor<S, V> stateDescriptor) { Preconditions.checkNotNull(keyTypeHint); TypeInformation<K> keyTypeInfo = keyTypeHint.getTypeInfo(); return getKvState(jobId, queryableStateName, key, keyTypeInfo, stateDescriptor); }
Returns a future holding the request result. @param jobId JobID of the job the queryable state belongs to. @param queryableStateName Name under which the state is queryable. @param key The key we are interested in. @param keyTypeHint A {@link TypeHint} used to extract the type of the key. @param stateDescriptor The {@link StateDescriptor} of the state we want to query. @return Future holding the immutable {@link State} object containing the result.
getKvState
java
apache/flink
flink-queryable-state/flink-queryable-state-client-java/src/main/java/org/apache/flink/queryablestate/client/QueryableStateClient.java
https://github.com/apache/flink/blob/master/flink-queryable-state/flink-queryable-state-client-java/src/main/java/org/apache/flink/queryablestate/client/QueryableStateClient.java
Apache-2.0
@PublicEvolving public <K, S extends State, V> CompletableFuture<S> getKvState( final JobID jobId, final String queryableStateName, final K key, final TypeInformation<K> keyTypeInfo, final StateDescriptor<S, V> stateDescriptor) { return getKvState( jobId, queryableStateName, key, VoidNamespace.INSTANCE, keyTypeInfo, VoidNamespaceTypeInfo.INSTANCE, stateDescriptor); }
Returns a future holding the request result. @param jobId JobID of the job the queryable state belongs to. @param queryableStateName Name under which the state is queryable. @param key The key we are interested in. @param keyTypeInfo The {@link TypeInformation} of the key. @param stateDescriptor The {@link StateDescriptor} of the state we want to query. @return Future holding the immutable {@link State} object containing the result.
getKvState
java
apache/flink
flink-queryable-state/flink-queryable-state-client-java/src/main/java/org/apache/flink/queryablestate/client/QueryableStateClient.java
https://github.com/apache/flink/blob/master/flink-queryable-state/flink-queryable-state-client-java/src/main/java/org/apache/flink/queryablestate/client/QueryableStateClient.java
Apache-2.0
@Override public Iterable<Map.Entry<K, V>> entries() { return Collections.unmodifiableSet(state.entrySet()); }
Returns all the mappings in the state in a {@link Collections#unmodifiableSet(Set)}. @return A read-only iterable view of all the key-value pairs in the state.
entries
java
apache/flink
flink-queryable-state/flink-queryable-state-client-java/src/main/java/org/apache/flink/queryablestate/client/state/ImmutableMapState.java
https://github.com/apache/flink/blob/master/flink-queryable-state/flink-queryable-state-client-java/src/main/java/org/apache/flink/queryablestate/client/state/ImmutableMapState.java
Apache-2.0
@Override public Iterable<K> keys() { return Collections.unmodifiableSet(state.keySet()); }
Returns all the keys in the state in a {@link Collections#unmodifiableSet(Set)}. @return A read-only iterable view of all the keys in the state.
keys
java
apache/flink
flink-queryable-state/flink-queryable-state-client-java/src/main/java/org/apache/flink/queryablestate/client/state/ImmutableMapState.java
https://github.com/apache/flink/blob/master/flink-queryable-state/flink-queryable-state-client-java/src/main/java/org/apache/flink/queryablestate/client/state/ImmutableMapState.java
Apache-2.0
@Override public Iterable<V> values() { return Collections.unmodifiableCollection(state.values()); }
Returns all the values in the state in a {@link Collections#unmodifiableCollection(Collection)}. @return A read-only iterable view of all the values in the state.
values
java
apache/flink
flink-queryable-state/flink-queryable-state-client-java/src/main/java/org/apache/flink/queryablestate/client/state/ImmutableMapState.java
https://github.com/apache/flink/blob/master/flink-queryable-state/flink-queryable-state-client-java/src/main/java/org/apache/flink/queryablestate/client/state/ImmutableMapState.java
Apache-2.0
public static <K, N> byte[] serializeKeyAndNamespace( K key, TypeSerializer<K> keySerializer, N namespace, TypeSerializer<N> namespaceSerializer) throws IOException { DataOutputSerializer dos = new DataOutputSerializer(32); keySerializer.serialize(key, dos); dos.writeByte(MAGIC_NUMBER); namespaceSerializer.serialize(namespace, dos); return dos.getCopyOfBuffer(); }
Serializes the key and namespace into a {@link ByteBuffer}. <p>The serialized format matches the RocksDB state backend key format, i.e. the key and namespace don't have to be deserialized for RocksDB lookups. @param key Key to serialize @param keySerializer Serializer for the key @param namespace Namespace to serialize @param namespaceSerializer Serializer for the namespace @param <K> Key type @param <N> Namespace type @return Buffer holding the serialized key and namespace @throws IOException Serialization errors are forwarded
serializeKeyAndNamespace
java
apache/flink
flink-queryable-state/flink-queryable-state-client-java/src/main/java/org/apache/flink/queryablestate/client/state/serialization/KvStateSerializer.java
https://github.com/apache/flink/blob/master/flink-queryable-state/flink-queryable-state-client-java/src/main/java/org/apache/flink/queryablestate/client/state/serialization/KvStateSerializer.java
Apache-2.0
public static <K, N> Tuple2<K, N> deserializeKeyAndNamespace( byte[] serializedKeyAndNamespace, TypeSerializer<K> keySerializer, TypeSerializer<N> namespaceSerializer) throws IOException { DataInputDeserializer dis = new DataInputDeserializer( serializedKeyAndNamespace, 0, serializedKeyAndNamespace.length); try { K key = keySerializer.deserialize(dis); byte magicNumber = dis.readByte(); if (magicNumber != MAGIC_NUMBER) { throw new IOException("Unexpected magic number " + magicNumber + "."); } N namespace = namespaceSerializer.deserialize(dis); if (dis.available() > 0) { throw new IOException("Unconsumed bytes in the serialized key and namespace."); } return new Tuple2<>(key, namespace); } catch (IOException e) { throw new IOException( "Unable to deserialize key " + "and namespace. This indicates a mismatch in the key/namespace " + "serializers used by the KvState instance and this access.", e); } }
Deserializes the key and namespace into a {@link Tuple2}. @param serializedKeyAndNamespace Serialized key and namespace @param keySerializer Serializer for the key @param namespaceSerializer Serializer for the namespace @param <K> Key type @param <N> Namespace @return Tuple2 holding deserialized key and namespace @throws IOException if the deserialization fails for any reason
deserializeKeyAndNamespace
java
apache/flink
flink-queryable-state/flink-queryable-state-client-java/src/main/java/org/apache/flink/queryablestate/client/state/serialization/KvStateSerializer.java
https://github.com/apache/flink/blob/master/flink-queryable-state/flink-queryable-state-client-java/src/main/java/org/apache/flink/queryablestate/client/state/serialization/KvStateSerializer.java
Apache-2.0
public static <T> byte[] serializeValue(T value, TypeSerializer<T> serializer) throws IOException { if (value != null) { // Serialize DataOutputSerializer dos = new DataOutputSerializer(32); serializer.serialize(value, dos); return dos.getCopyOfBuffer(); } else { return null; } }
Serializes the value with the given serializer. @param value Value of type T to serialize @param serializer Serializer for T @param <T> Type of the value @return Serialized value or <code>null</code> if value <code>null</code> @throws IOException On failure during serialization
serializeValue
java
apache/flink
flink-queryable-state/flink-queryable-state-client-java/src/main/java/org/apache/flink/queryablestate/client/state/serialization/KvStateSerializer.java
https://github.com/apache/flink/blob/master/flink-queryable-state/flink-queryable-state-client-java/src/main/java/org/apache/flink/queryablestate/client/state/serialization/KvStateSerializer.java
Apache-2.0
public static <T> T deserializeValue(byte[] serializedValue, TypeSerializer<T> serializer) throws IOException { if (serializedValue == null) { return null; } else { final DataInputDeserializer deser = new DataInputDeserializer(serializedValue, 0, serializedValue.length); final T value = serializer.deserialize(deser); if (deser.available() > 0) { throw new IOException( "Unconsumed bytes in the deserialized value. " + "This indicates a mismatch in the value serializers " + "used by the KvState instance and this access."); } return value; } }
Deserializes the value with the given serializer. @param serializedValue Serialized value of type T @param serializer Serializer for T @param <T> Type of the value @return Deserialized value or <code>null</code> if the serialized value is <code>null</code> @throws IOException On failure during deserialization
deserializeValue
java
apache/flink
flink-queryable-state/flink-queryable-state-client-java/src/main/java/org/apache/flink/queryablestate/client/state/serialization/KvStateSerializer.java
https://github.com/apache/flink/blob/master/flink-queryable-state/flink-queryable-state-client-java/src/main/java/org/apache/flink/queryablestate/client/state/serialization/KvStateSerializer.java
Apache-2.0
public static <T> List<T> deserializeList(byte[] serializedValue, TypeSerializer<T> serializer) throws IOException { if (serializedValue != null) { final DataInputDeserializer in = new DataInputDeserializer(serializedValue, 0, serializedValue.length); try { final List<T> result = new ArrayList<>(); while (in.available() > 0) { result.add(serializer.deserialize(in)); // The expected binary format has a single byte separator. We // want a consistent binary format in order to not need any // special casing during deserialization. A "cleaner" format // would skip this extra byte, but would require a memory copy // for RocksDB, which stores the data serialized in this way // for lists. if (in.available() > 0) { in.readByte(); } } return result; } catch (IOException e) { throw new IOException( "Unable to deserialize value. " + "This indicates a mismatch in the value serializers " + "used by the KvState instance and this access.", e); } } else { return null; } }
Deserializes all values with the given serializer. @param serializedValue Serialized value of type List&lt;T&gt; @param serializer Serializer for T @param <T> Type of the value @return Deserialized list or <code>null</code> if the serialized value is <code>null</code> @throws IOException On failure during deserialization
deserializeList
java
apache/flink
flink-queryable-state/flink-queryable-state-client-java/src/main/java/org/apache/flink/queryablestate/client/state/serialization/KvStateSerializer.java
https://github.com/apache/flink/blob/master/flink-queryable-state/flink-queryable-state-client-java/src/main/java/org/apache/flink/queryablestate/client/state/serialization/KvStateSerializer.java
Apache-2.0