code stringlengths 25 201k | docstring stringlengths 19 96.2k | func_name stringlengths 0 235 | language stringclasses 1 value | repo stringlengths 8 51 | path stringlengths 11 314 | url stringlengths 62 377 | license stringclasses 7 values |
|---|---|---|---|---|---|---|---|
@Override
public TaskMetricGroup addTask(
final ExecutionAttemptID executionAttemptID, final String taskName) {
return createUnregisteredTaskMetricGroup();
} | A safe drop-in replacement for {@link TaskManagerJobMetricGroup}s. | addTask | java | apache/flink | flink-runtime/src/main/java/org/apache/flink/runtime/metrics/groups/UnregisteredMetricGroups.java | https://github.com/apache/flink/blob/master/flink-runtime/src/main/java/org/apache/flink/runtime/metrics/groups/UnregisteredMetricGroups.java | Apache-2.0 |
@Override
public InternalOperatorMetricGroup getOrAddOperator(OperatorID operatorID, String name) {
return createUnregisteredOperatorMetricGroup(this);
} | A safe drop-in replacement for {@link TaskMetricGroup}s. | getOrAddOperator | java | apache/flink | flink-runtime/src/main/java/org/apache/flink/runtime/metrics/groups/UnregisteredMetricGroups.java | https://github.com/apache/flink/blob/master/flink-runtime/src/main/java/org/apache/flink/runtime/metrics/groups/UnregisteredMetricGroups.java | Apache-2.0 |
public String[] formatScope(
TaskMetricGroup parent, OperatorID operatorID, String operatorName) {
final String[] template = copyTemplate();
final String[] values = {
parent.parent().parent().hostname(),
parent.parent().parent().taskManagerId(),
valueOrNull(parent.parent().jobId()),
valueOrNull(parent.parent().jobName()),
valueOrNull(parent.vertexId()),
valueOrNull(parent.executionId()),
valueOrNull(parent.taskName()),
String.valueOf(parent.subtaskIndex()),
String.valueOf(parent.attemptNumber()),
valueOrNull(operatorID),
valueOrNull(operatorName)
};
return bindVariables(template, values);
} | The scope format for the {@link InternalOperatorMetricGroup}. | formatScope | java | apache/flink | flink-runtime/src/main/java/org/apache/flink/runtime/metrics/scope/OperatorScopeFormat.java | https://github.com/apache/flink/blob/master/flink-runtime/src/main/java/org/apache/flink/runtime/metrics/scope/OperatorScopeFormat.java | Apache-2.0 |
public static String asVariable(String scope) {
return SCOPE_VARIABLE_PREFIX + scope + SCOPE_VARIABLE_SUFFIX;
} | Formats the given string to resemble a scope variable.
@param scope The string to format
@return The formatted string | asVariable | java | apache/flink | flink-runtime/src/main/java/org/apache/flink/runtime/metrics/scope/ScopeFormat.java | https://github.com/apache/flink/blob/master/flink-runtime/src/main/java/org/apache/flink/runtime/metrics/scope/ScopeFormat.java | Apache-2.0 |
public static ScopeFormats fromConfig(Configuration config) {
String jmFormat = config.get(MetricOptions.SCOPE_NAMING_JM);
String jmJobFormat = config.get(MetricOptions.SCOPE_NAMING_JM_JOB);
String tmFormat = config.get(MetricOptions.SCOPE_NAMING_TM);
String tmJobFormat = config.get(MetricOptions.SCOPE_NAMING_TM_JOB);
String taskFormat = config.get(MetricOptions.SCOPE_NAMING_TASK);
String operatorFormat = config.get(MetricOptions.SCOPE_NAMING_OPERATOR);
String jmOperatorFormat = config.get(MetricOptions.SCOPE_NAMING_JM_OPERATOR);
return new ScopeFormats(
jmFormat,
jmJobFormat,
tmFormat,
tmJobFormat,
taskFormat,
operatorFormat,
jmOperatorFormat);
} | Creates the scope formats as defined in the given configuration.
@param config The configuration that defines the formats
@return The ScopeFormats parsed from the configuration | fromConfig | java | apache/flink | flink-runtime/src/main/java/org/apache/flink/runtime/metrics/scope/ScopeFormats.java | https://github.com/apache/flink/blob/master/flink-runtime/src/main/java/org/apache/flink/runtime/metrics/scope/ScopeFormats.java | Apache-2.0 |
public String[] formatScope(
TaskManagerJobMetricGroup parent,
AbstractID vertexId,
ExecutionAttemptID attemptId,
String taskName,
int subtask,
int attemptNumber) {
final String[] template = copyTemplate();
final String[] values = {
parent.parent().hostname(),
parent.parent().taskManagerId(),
valueOrNull(parent.jobId()),
valueOrNull(parent.jobName()),
valueOrNull(vertexId),
valueOrNull(attemptId),
valueOrNull(taskName),
String.valueOf(subtask),
String.valueOf(attemptNumber)
};
return bindVariables(template, values);
} | The scope format for the {@link TaskMetricGroup}. | formatScope | java | apache/flink | flink-runtime/src/main/java/org/apache/flink/runtime/metrics/scope/TaskScopeFormat.java | https://github.com/apache/flink/blob/master/flink-runtime/src/main/java/org/apache/flink/runtime/metrics/scope/TaskScopeFormat.java | Apache-2.0 |
public boolean isRunning() {
return running;
} | Checks if the mini cluster was started and is running. | isRunning | java | apache/flink | flink-runtime/src/main/java/org/apache/flink/runtime/minicluster/MiniCluster.java | https://github.com/apache/flink/blob/master/flink-runtime/src/main/java/org/apache/flink/runtime/minicluster/MiniCluster.java | Apache-2.0 |
public void runDetached(JobGraph job) throws JobExecutionException, InterruptedException {
checkNotNull(job, "job is null");
final CompletableFuture<JobSubmissionResult> submissionFuture = submitJob(job);
try {
submissionFuture.get();
} catch (ExecutionException e) {
throw new JobExecutionException(
job.getJobID(), ExceptionUtils.stripExecutionException(e));
}
} | This method executes a job in detached mode. The method returns immediately after the job has
been added to the
@param job The Flink job to execute
@throws JobExecutionException Thrown if anything went amiss during initial job launch, or if
the job terminally failed. | runDetached | java | apache/flink | flink-runtime/src/main/java/org/apache/flink/runtime/minicluster/MiniCluster.java | https://github.com/apache/flink/blob/master/flink-runtime/src/main/java/org/apache/flink/runtime/minicluster/MiniCluster.java | Apache-2.0 |
protected MetricRegistryImpl createMetricRegistry(
Configuration config, long maximumMessageSizeInBytes) {
return new MetricRegistryImpl(
MetricRegistryConfiguration.fromConfiguration(config, maximumMessageSizeInBytes),
ReporterSetupBuilder.METRIC_SETUP_BUILDER.fromConfiguration(
config,
DefaultReporterFilters::metricsFromConfiguration,
miniClusterConfiguration.getPluginManager()),
ReporterSetupBuilder.TRACE_SETUP_BUILDER.fromConfiguration(
config,
DefaultReporterFilters::tracesFromConfiguration,
miniClusterConfiguration.getPluginManager()),
ReporterSetupBuilder.EVENT_SETUP_BUILDER.fromConfiguration(
config,
DefaultReporterFilters::eventsFromConfiguration,
miniClusterConfiguration.getPluginManager()));
} | Factory method to create the metric registry for the mini cluster.
@param config The configuration of the mini cluster
@param maximumMessageSizeInBytes the maximum message size | createMetricRegistry | java | apache/flink | flink-runtime/src/main/java/org/apache/flink/runtime/minicluster/MiniCluster.java | https://github.com/apache/flink/blob/master/flink-runtime/src/main/java/org/apache/flink/runtime/minicluster/MiniCluster.java | Apache-2.0 |
protected RpcService createRemoteRpcService(
Configuration configuration, String bindAddress, int bindPort, RpcSystem rpcSystem)
throws Exception {
return rpcSystem
.remoteServiceBuilder(configuration, bindAddress, String.valueOf(bindPort))
.withBindAddress(bindAddress)
.withBindPort(bindPort)
.withExecutorConfiguration(RpcUtils.getTestForkJoinExecutorConfiguration())
.createAndStart();
} | Factory method to instantiate the remote RPC service.
@param configuration Flink configuration.
@param bindAddress The address to bind the RPC service to.
@param bindPort The port range to bind the RPC service to.
@param rpcSystem
@return The instantiated RPC service | createRemoteRpcService | java | apache/flink | flink-runtime/src/main/java/org/apache/flink/runtime/minicluster/MiniCluster.java | https://github.com/apache/flink/blob/master/flink-runtime/src/main/java/org/apache/flink/runtime/minicluster/MiniCluster.java | Apache-2.0 |
protected RpcService createRemoteRpcService(
Configuration configuration,
String externalAddress,
String externalPortRange,
String bindAddress,
RpcSystem rpcSystem)
throws Exception {
return rpcSystem
.remoteServiceBuilder(configuration, externalAddress, externalPortRange)
.withBindAddress(bindAddress)
.withExecutorConfiguration(RpcUtils.getTestForkJoinExecutorConfiguration())
.createAndStart();
} | Factory method to instantiate the remote RPC service.
@param configuration Flink configuration.
@param externalAddress The external address to access the RPC service.
@param externalPortRange The external port range to access the RPC service.
@param bindAddress The address to bind the RPC service to.
@param rpcSystem
@return The instantiated RPC service | createRemoteRpcService | java | apache/flink | flink-runtime/src/main/java/org/apache/flink/runtime/minicluster/MiniCluster.java | https://github.com/apache/flink/blob/master/flink-runtime/src/main/java/org/apache/flink/runtime/minicluster/MiniCluster.java | Apache-2.0 |
@Override
public RpcService createRpcService() {
return commonRpcService;
} | Factory which returns always the common {@link RpcService}. | createRpcService | java | apache/flink | flink-runtime/src/main/java/org/apache/flink/runtime/minicluster/MiniCluster.java | https://github.com/apache/flink/blob/master/flink-runtime/src/main/java/org/apache/flink/runtime/minicluster/MiniCluster.java | Apache-2.0 |
@GuardedBy("lock")
private TerminatingFatalErrorHandler create(int index) {
return new TerminatingFatalErrorHandler(index);
} | Create a new {@link TerminatingFatalErrorHandler} for the {@link TaskExecutor} with the
given index.
@param index into the {@link #taskManagers} collection to identify the correct {@link
TaskExecutor}.
@return {@link TerminatingFatalErrorHandler} for the given index | create | java | apache/flink | flink-runtime/src/main/java/org/apache/flink/runtime/minicluster/MiniCluster.java | https://github.com/apache/flink/blob/master/flink-runtime/src/main/java/org/apache/flink/runtime/minicluster/MiniCluster.java | Apache-2.0 |
public static InetAddress findConnectingAddress(
InetSocketAddress targetAddress, long maxWaitMillis, long startLoggingAfter)
throws IOException {
if (targetAddress == null) {
throw new NullPointerException("targetAddress must not be null");
}
if (maxWaitMillis <= 0) {
throw new IllegalArgumentException("Max wait time must be positive");
}
final long startTimeNanos = System.nanoTime();
long currentSleepTime = MIN_SLEEP_TIME;
long elapsedTimeMillis = 0;
final List<AddressDetectionState> strategies =
Collections.unmodifiableList(
Arrays.asList(
AddressDetectionState.LOCAL_HOST,
AddressDetectionState.ADDRESS,
AddressDetectionState.FAST_CONNECT,
AddressDetectionState.SLOW_CONNECT));
// loop while there is time left
while (elapsedTimeMillis < maxWaitMillis) {
boolean logging = elapsedTimeMillis >= startLoggingAfter;
if (logging) {
LOG.info("Trying to connect to " + targetAddress);
}
// Try each strategy in order
for (AddressDetectionState strategy : strategies) {
InetAddress address = findAddressUsingStrategy(strategy, targetAddress, logging);
if (address != null) {
return address;
}
}
// we have made a pass with all strategies over all interfaces
// sleep for a while before we make the next pass
elapsedTimeMillis = (System.nanoTime() - startTimeNanos) / 1_000_000;
long toWait = Math.min(maxWaitMillis - elapsedTimeMillis, currentSleepTime);
if (toWait > 0) {
if (logging) {
LOG.info("Could not connect. Waiting for {} msecs before next attempt", toWait);
} else {
LOG.debug(
"Could not connect. Waiting for {} msecs before next attempt", toWait);
}
try {
Thread.sleep(toWait);
} catch (InterruptedException e) {
throw new IOException("Connection attempts have been interrupted.");
}
}
// increase the exponential backoff timer
currentSleepTime = Math.min(2 * currentSleepTime, MAX_SLEEP_TIME);
}
// our attempts timed out. use the heuristic fallback
LOG.warn(
"Could not connect to {}. Selecting a local address using heuristics.",
targetAddress);
InetAddress heuristic =
findAddressUsingStrategy(AddressDetectionState.HEURISTIC, targetAddress, true);
if (heuristic != null) {
return heuristic;
} else {
LOG.warn(
"Could not find any IPv4 address that is not loopback or link-local. Using localhost address.");
return InetAddress.getLocalHost();
}
} | Finds the local network address from which this machine can connect to the target address.
This method tries to establish a proper network connection to the given target, so it only
succeeds if the target socket address actually accepts connections. The method tries various
strategies multiple times and uses an exponential backoff timer between tries.
<p>If no connection attempt was successful after the given maximum time, the method will
choose some address based on heuristics (excluding link-local and loopback addresses.)
<p>This method will initially not log on info level (to not flood the log while the backoff
time is still very low). It will start logging after a certain time has passes.
@param targetAddress The address that the method tries to connect to.
@param maxWaitMillis The maximum time that this method tries to connect, before falling back
to the heuristics.
@param startLoggingAfter The time after which the method will log on INFO level. | findConnectingAddress | java | apache/flink | flink-runtime/src/main/java/org/apache/flink/runtime/net/ConnectionUtils.java | https://github.com/apache/flink/blob/master/flink-runtime/src/main/java/org/apache/flink/runtime/net/ConnectionUtils.java | Apache-2.0 |
private static InetAddress tryLocalHostBeforeReturning(
InetAddress preliminaryResult, SocketAddress targetAddress, boolean logging)
throws IOException {
InetAddress localhostName = InetAddress.getLocalHost();
if (preliminaryResult.equals(localhostName)) {
// preliminary result is equal to the local host name
return preliminaryResult;
} else if (tryToConnect(
localhostName,
targetAddress,
AddressDetectionState.SLOW_CONNECT.getTimeout(),
logging)) {
// success, we were able to use local host to connect
LOG.debug(
"Preferring {} (InetAddress.getLocalHost()) for local bind point over previous candidate {}",
localhostName,
preliminaryResult);
return localhostName;
} else {
// we have to make the preliminary result the final result
return preliminaryResult;
}
} | This utility method tries to connect to the JobManager using the InetAddress returned by
InetAddress.getLocalHost(). The purpose of the utility is to have a final try connecting to
the target address using the LocalHost before using the address returned. We do a second try
because the JM might have been unavailable during the first check.
@param preliminaryResult The address detected by the heuristic
@return either the preliminaryResult or the address returned by InetAddress.getLocalHost()
(if we are able to connect to targetAddress from there) | tryLocalHostBeforeReturning | java | apache/flink | flink-runtime/src/main/java/org/apache/flink/runtime/net/ConnectionUtils.java | https://github.com/apache/flink/blob/master/flink-runtime/src/main/java/org/apache/flink/runtime/net/ConnectionUtils.java | Apache-2.0 |
private static InetAddress findAddressUsingStrategy(
AddressDetectionState strategy, InetSocketAddress targetAddress, boolean logging)
throws IOException {
// try LOCAL_HOST strategy independent of the network interfaces
if (strategy == AddressDetectionState.LOCAL_HOST) {
InetAddress localhostName;
try {
localhostName = InetAddress.getLocalHost();
} catch (UnknownHostException uhe) {
LOG.warn("Could not resolve local hostname to an IP address: {}", uhe.getMessage());
return null;
}
if (tryToConnect(localhostName, targetAddress, strategy.getTimeout(), logging)) {
LOG.debug(
"Using InetAddress.getLocalHost() immediately for the connecting address");
// Here, we are not calling tryLocalHostBeforeReturning() because it is the
// LOCAL_HOST strategy
return localhostName;
} else {
return null;
}
}
final InetAddress address = targetAddress.getAddress();
if (address == null) {
return null;
}
final byte[] targetAddressBytes = address.getAddress();
// for each network interface
Enumeration<NetworkInterface> e = NetworkInterface.getNetworkInterfaces();
while (e.hasMoreElements()) {
NetworkInterface netInterface = e.nextElement();
// for each address of the network interface
Enumeration<InetAddress> ee = netInterface.getInetAddresses();
while (ee.hasMoreElements()) {
InetAddress interfaceAddress = ee.nextElement();
switch (strategy) {
case ADDRESS:
if (hasCommonPrefix(targetAddressBytes, interfaceAddress.getAddress())) {
LOG.debug(
"Target address {} and local address {} share prefix - trying to connect.",
targetAddress,
interfaceAddress);
if (tryToConnect(
interfaceAddress,
targetAddress,
strategy.getTimeout(),
logging)) {
return tryLocalHostBeforeReturning(
interfaceAddress, targetAddress, logging);
}
}
break;
case FAST_CONNECT:
case SLOW_CONNECT:
LOG.debug(
"Trying to connect to {} from local address {} with timeout {}",
targetAddress,
interfaceAddress,
strategy.getTimeout());
if (tryToConnect(
interfaceAddress, targetAddress, strategy.getTimeout(), logging)) {
return tryLocalHostBeforeReturning(
interfaceAddress, targetAddress, logging);
}
break;
case HEURISTIC:
if (LOG.isDebugEnabled()) {
LOG.debug(
"Choosing InetAddress.getLocalHost() address as a heuristic.");
}
return InetAddress.getLocalHost();
default:
throw new RuntimeException("Unsupported strategy: " + strategy);
}
} // end for each address of the interface
} // end for each interface
return null;
} | Try to find a local address which allows as to connect to the targetAddress using the given
strategy.
@param strategy Depending on the strategy, the method will enumerate all interfaces, trying
to connect to the target address
@param targetAddress The address we try to connect to
@param logging Boolean indicating the logging verbosity
@return null if we could not find an address using this strategy, otherwise, the local
address.
@throws IOException | findAddressUsingStrategy | java | apache/flink | flink-runtime/src/main/java/org/apache/flink/runtime/net/ConnectionUtils.java | https://github.com/apache/flink/blob/master/flink-runtime/src/main/java/org/apache/flink/runtime/net/ConnectionUtils.java | Apache-2.0 |
private static boolean hasCommonPrefix(byte[] address, byte[] address2) {
return address[0] == address2[0] && address[1] == address2[1];
} | Checks if two addresses have a common prefix (first 2 bytes). Example: 192.168.???.??? Works
also with ipv6, but accepts probably too many addresses | hasCommonPrefix | java | apache/flink | flink-runtime/src/main/java/org/apache/flink/runtime/net/ConnectionUtils.java | https://github.com/apache/flink/blob/master/flink-runtime/src/main/java/org/apache/flink/runtime/net/ConnectionUtils.java | Apache-2.0 |
private static boolean tryToConnect(
InetAddress fromAddress, SocketAddress toSocket, int timeout, boolean logFailed)
throws IOException {
String detailedMessage =
String.format(
"connect to [%s] from local address [%s] with timeout [%s]",
toSocket, fromAddress, timeout);
if (LOG.isDebugEnabled()) {
LOG.debug("Trying to " + detailedMessage);
}
try (Socket socket = new Socket()) {
// port 0 = let the OS choose the port
SocketAddress bindP = new InetSocketAddress(fromAddress, 0);
// machine
socket.bind(bindP);
socket.connect(toSocket, timeout);
return true;
} catch (Exception ex) {
String message = "Failed to " + detailedMessage + " due to: " + ex.getMessage();
if (LOG.isDebugEnabled()) {
LOG.debug(message, ex);
} else if (logFailed) {
LOG.info(message);
}
return false;
}
} | @param fromAddress The address to connect from.
@param toSocket The socket address to connect to.
@param timeout The timeout fr the connection.
@param logFailed Flag to indicate whether to log failed attempts on info level (failed
attempts are always logged on DEBUG level).
@return True, if the connection was successful, false otherwise.
@throws IOException Thrown if the socket cleanup fails. | tryToConnect | java | apache/flink | flink-runtime/src/main/java/org/apache/flink/runtime/net/ConnectionUtils.java | https://github.com/apache/flink/blob/master/flink-runtime/src/main/java/org/apache/flink/runtime/net/ConnectionUtils.java | Apache-2.0 |
public static ServerSocketFactory createSSLServerSocketFactory(Configuration config)
throws Exception {
SSLContext sslContext = createInternalSSLContext(config, false);
if (sslContext == null) {
throw new IllegalConfigurationException("SSL is not enabled");
}
String[] protocols = getEnabledProtocols(config);
String[] cipherSuites = getEnabledCipherSuites(config);
SSLServerSocketFactory factory = sslContext.getServerSocketFactory();
return new ConfiguringSSLServerSocketFactory(factory, protocols, cipherSuites);
} | Creates a factory for SSL Server Sockets from the given configuration. SSL Server Sockets are
always part of internal communication. | createSSLServerSocketFactory | java | apache/flink | flink-runtime/src/main/java/org/apache/flink/runtime/net/SSLUtils.java | https://github.com/apache/flink/blob/master/flink-runtime/src/main/java/org/apache/flink/runtime/net/SSLUtils.java | Apache-2.0 |
public static SocketFactory createSSLClientSocketFactory(Configuration config)
throws Exception {
SSLContext sslContext = createInternalSSLContext(config, true);
if (sslContext == null) {
throw new IllegalConfigurationException("SSL is not enabled");
}
return sslContext.getSocketFactory();
} | Creates a factory for SSL Client Sockets from the given configuration. SSL Client Sockets are
always part of internal communication. | createSSLClientSocketFactory | java | apache/flink | flink-runtime/src/main/java/org/apache/flink/runtime/net/SSLUtils.java | https://github.com/apache/flink/blob/master/flink-runtime/src/main/java/org/apache/flink/runtime/net/SSLUtils.java | Apache-2.0 |
public static SSLHandlerFactory createInternalServerSSLEngineFactory(final Configuration config)
throws Exception {
SslContext sslContext = createInternalNettySSLContext(config, false);
if (sslContext == null) {
throw new IllegalConfigurationException(
"SSL is not enabled for internal communication.");
}
return new SSLHandlerFactory(
sslContext,
config.get(SecurityOptions.SSL_INTERNAL_HANDSHAKE_TIMEOUT),
config.get(SecurityOptions.SSL_INTERNAL_CLOSE_NOTIFY_FLUSH_TIMEOUT));
} | Creates a SSLEngineFactory to be used by internal communication server endpoints. | createInternalServerSSLEngineFactory | java | apache/flink | flink-runtime/src/main/java/org/apache/flink/runtime/net/SSLUtils.java | https://github.com/apache/flink/blob/master/flink-runtime/src/main/java/org/apache/flink/runtime/net/SSLUtils.java | Apache-2.0 |
public static SSLHandlerFactory createInternalClientSSLEngineFactory(final Configuration config)
throws Exception {
SslContext sslContext = createInternalNettySSLContext(config, true);
if (sslContext == null) {
throw new IllegalConfigurationException(
"SSL is not enabled for internal communication.");
}
return new SSLHandlerFactory(
sslContext,
config.get(SecurityOptions.SSL_INTERNAL_HANDSHAKE_TIMEOUT),
config.get(SecurityOptions.SSL_INTERNAL_CLOSE_NOTIFY_FLUSH_TIMEOUT));
} | Creates a SSLEngineFactory to be used by internal communication client endpoints. | createInternalClientSSLEngineFactory | java | apache/flink | flink-runtime/src/main/java/org/apache/flink/runtime/net/SSLUtils.java | https://github.com/apache/flink/blob/master/flink-runtime/src/main/java/org/apache/flink/runtime/net/SSLUtils.java | Apache-2.0 |
public static SSLHandlerFactory createRestServerSSLEngineFactory(final Configuration config)
throws Exception {
ClientAuth clientAuth =
SecurityOptions.isRestSSLAuthenticationEnabled(config)
? ClientAuth.REQUIRE
: ClientAuth.NONE;
SslContext sslContext = createRestNettySSLContext(config, false, clientAuth);
if (sslContext == null) {
throw new IllegalConfigurationException("SSL is not enabled for REST endpoints.");
}
return new SSLHandlerFactory(sslContext, -1, -1);
} | Creates a {@link SSLHandlerFactory} to be used by the REST Servers.
@param config The application configuration. | createRestServerSSLEngineFactory | java | apache/flink | flink-runtime/src/main/java/org/apache/flink/runtime/net/SSLUtils.java | https://github.com/apache/flink/blob/master/flink-runtime/src/main/java/org/apache/flink/runtime/net/SSLUtils.java | Apache-2.0 |
@SuppressWarnings("unchecked")
protected Collector<OT> getLastOutputCollector() {
int numChained = this.chainedTasks.size();
return (numChained == 0)
? output
: (Collector<OT>) chainedTasks.get(numChained - 1).getOutputCollector();
} | @return the last output collector in the collector chain | getLastOutputCollector | java | apache/flink | flink-runtime/src/main/java/org/apache/flink/runtime/operators/BatchTask.java | https://github.com/apache/flink/blob/master/flink-runtime/src/main/java/org/apache/flink/runtime/operators/BatchTask.java | Apache-2.0 |
protected void initInputReaders() throws Exception {
final int numInputs = getNumTaskInputs();
final MutableReader<?>[] inputReaders = new MutableReader<?>[numInputs];
int currentReaderOffset = 0;
for (int i = 0; i < numInputs; i++) {
// ---------------- create the input readers ---------------------
// in case where a logical input unions multiple physical inputs, create a union reader
final int groupSize = this.config.getGroupSize(i);
if (groupSize == 1) {
// non-union case
inputReaders[i] =
new MutableRecordReader<>(
getEnvironment().getInputGate(currentReaderOffset),
getEnvironment().getTaskManagerInfo().getTmpDirectories());
} else if (groupSize > 1) {
// union case
IndexedInputGate[] readers = new IndexedInputGate[groupSize];
for (int j = 0; j < groupSize; ++j) {
readers[j] = getEnvironment().getInputGate(currentReaderOffset + j);
}
inputReaders[i] =
new MutableRecordReader<>(
new UnionInputGate(readers),
getEnvironment().getTaskManagerInfo().getTmpDirectories());
} else {
throw new Exception("Illegal input group size in task configuration: " + groupSize);
}
currentReaderOffset += groupSize;
}
this.inputReaders = inputReaders;
// final sanity check
if (currentReaderOffset != this.config.getNumInputs()) {
throw new Exception(
"Illegal configuration: Number of input gates and group sizes are not consistent.");
}
} | Creates the record readers for the number of inputs as defined by {@link
#getNumTaskInputs()}. This method requires that the task configuration, the driver, and the
user-code class loader are set. | initInputReaders | java | apache/flink | flink-runtime/src/main/java/org/apache/flink/runtime/operators/BatchTask.java | https://github.com/apache/flink/blob/master/flink-runtime/src/main/java/org/apache/flink/runtime/operators/BatchTask.java | Apache-2.0 |
protected void initBroadcastInputReaders() throws Exception {
final int numBroadcastInputs = this.config.getNumBroadcastInputs();
final MutableReader<?>[] broadcastInputReaders = new MutableReader<?>[numBroadcastInputs];
int currentReaderOffset = config.getNumInputs();
for (int i = 0; i < this.config.getNumBroadcastInputs(); i++) {
// ---------------- create the input readers ---------------------
// in case where a logical input unions multiple physical inputs, create a union reader
final int groupSize = this.config.getBroadcastGroupSize(i);
if (groupSize == 1) {
// non-union case
broadcastInputReaders[i] =
new MutableRecordReader<>(
getEnvironment().getInputGate(currentReaderOffset),
getEnvironment().getTaskManagerInfo().getTmpDirectories());
} else if (groupSize > 1) {
// union case
IndexedInputGate[] readers = new IndexedInputGate[groupSize];
for (int j = 0; j < groupSize; ++j) {
readers[j] = getEnvironment().getInputGate(currentReaderOffset + j);
}
broadcastInputReaders[i] =
new MutableRecordReader<>(
new UnionInputGate(readers),
getEnvironment().getTaskManagerInfo().getTmpDirectories());
} else {
throw new Exception("Illegal input group size in task configuration: " + groupSize);
}
currentReaderOffset += groupSize;
}
this.broadcastInputReaders = broadcastInputReaders;
} | Creates the record readers for the extra broadcast inputs as configured by {@link
TaskConfig#getNumBroadcastInputs()}. This method requires that the task configuration, the
driver, and the user-code class loader are set. | initBroadcastInputReaders | java | apache/flink | flink-runtime/src/main/java/org/apache/flink/runtime/operators/BatchTask.java | https://github.com/apache/flink/blob/master/flink-runtime/src/main/java/org/apache/flink/runtime/operators/BatchTask.java | Apache-2.0 |
protected void initInputsSerializersAndComparators(int numInputs, int numComparators) {
this.inputSerializers = new TypeSerializerFactory<?>[numInputs];
this.inputComparators = numComparators > 0 ? new TypeComparator<?>[numComparators] : null;
this.inputIterators = new MutableObjectIterator<?>[numInputs];
ClassLoader userCodeClassLoader = getUserCodeClassLoader();
for (int i = 0; i < numInputs; i++) {
final TypeSerializerFactory<?> serializerFactory =
this.config.getInputSerializer(i, userCodeClassLoader);
this.inputSerializers[i] = serializerFactory;
this.inputIterators[i] =
createInputIterator(this.inputReaders[i], this.inputSerializers[i]);
}
// ---------------- create the driver's comparators ---------------------
for (int i = 0; i < numComparators; i++) {
if (this.inputComparators != null) {
final TypeComparatorFactory<?> comparatorFactory =
this.config.getDriverComparator(i, userCodeClassLoader);
this.inputComparators[i] = comparatorFactory.createComparator();
}
}
} | Creates all the serializers and comparators. | initInputsSerializersAndComparators | java | apache/flink | flink-runtime/src/main/java/org/apache/flink/runtime/operators/BatchTask.java | https://github.com/apache/flink/blob/master/flink-runtime/src/main/java/org/apache/flink/runtime/operators/BatchTask.java | Apache-2.0 |
protected void initBroadcastInputsSerializers(int numBroadcastInputs) {
this.broadcastInputSerializers = new TypeSerializerFactory<?>[numBroadcastInputs];
ClassLoader userCodeClassLoader = getUserCodeClassLoader();
for (int i = 0; i < numBroadcastInputs; i++) {
// ---------------- create the serializer first ---------------------
final TypeSerializerFactory<?> serializerFactory =
this.config.getBroadcastInputSerializer(i, userCodeClassLoader);
this.broadcastInputSerializers[i] = serializerFactory;
}
} | Creates all the serializers and iterators for the broadcast inputs. | initBroadcastInputsSerializers | java | apache/flink | flink-runtime/src/main/java/org/apache/flink/runtime/operators/BatchTask.java | https://github.com/apache/flink/blob/master/flink-runtime/src/main/java/org/apache/flink/runtime/operators/BatchTask.java | Apache-2.0 |
public static String constructLogString(
String message, String taskName, AbstractInvokable parent) {
return message
+ ": "
+ taskName
+ " ("
+ (parent.getEnvironment().getTaskInfo().getIndexOfThisSubtask() + 1)
+ '/'
+ parent.getEnvironment().getTaskInfo().getNumberOfParallelSubtasks()
+ ')';
} | Utility function that composes a string for logging purposes. The string includes the given
message, the given name of the task and the index in its subtask group as well as the number
of instances that exist in its subtask group.
@param message The main message for the log.
@param taskName The name of the task.
@param parent The task that contains the code producing the message.
@return The string for logging. | constructLogString | java | apache/flink | flink-runtime/src/main/java/org/apache/flink/runtime/operators/BatchTask.java | https://github.com/apache/flink/blob/master/flink-runtime/src/main/java/org/apache/flink/runtime/operators/BatchTask.java | Apache-2.0 |
public static void logAndThrowException(Exception ex, AbstractInvokable parent)
throws Exception {
String taskName;
if (ex instanceof ExceptionInChainedStubException) {
do {
ExceptionInChainedStubException cex = (ExceptionInChainedStubException) ex;
taskName = cex.getTaskName();
ex = cex.getWrappedException();
} while (ex instanceof ExceptionInChainedStubException);
} else {
taskName = parent.getEnvironment().getTaskInfo().getTaskName();
}
if (LOG.isErrorEnabled()) {
LOG.error(constructLogString("Error in task code", taskName, parent), ex);
}
throw ex;
} | Prints an error message and throws the given exception. If the exception is of the type
{@link ExceptionInChainedStubException} then the chain of contained exceptions is followed
until an exception of a different type is found.
@param ex The exception to be thrown.
@param parent The parent task, whose information is included in the log message.
@throws Exception Always thrown. | logAndThrowException | java | apache/flink | flink-runtime/src/main/java/org/apache/flink/runtime/operators/BatchTask.java | https://github.com/apache/flink/blob/master/flink-runtime/src/main/java/org/apache/flink/runtime/operators/BatchTask.java | Apache-2.0 |
public static void openChainedTasks(List<ChainedDriver<?, ?>> tasks, AbstractInvokable parent)
throws Exception {
// start all chained tasks
for (ChainedDriver<?, ?> task : tasks) {
if (LOG.isDebugEnabled()) {
LOG.debug(constructLogString("Start task code", task.getTaskName(), parent));
}
task.openTask();
}
} | Opens all chained tasks, in the order as they are stored in the array. The opening process
creates a standardized log info message.
@param tasks The tasks to be opened.
@param parent The parent task, used to obtain parameters to include in the log message.
@throws Exception Thrown, if the opening encounters an exception. | openChainedTasks | java | apache/flink | flink-runtime/src/main/java/org/apache/flink/runtime/operators/BatchTask.java | https://github.com/apache/flink/blob/master/flink-runtime/src/main/java/org/apache/flink/runtime/operators/BatchTask.java | Apache-2.0 |
public static void closeChainedTasks(List<ChainedDriver<?, ?>> tasks, AbstractInvokable parent)
throws Exception {
for (ChainedDriver<?, ?> task : tasks) {
task.closeTask();
if (LOG.isDebugEnabled()) {
LOG.debug(constructLogString("Finished task code", task.getTaskName(), parent));
}
}
} | Closes all chained tasks, in the order as they are stored in the array. The closing process
creates a standardized log info message.
@param tasks The tasks to be closed.
@param parent The parent task, used to obtain parameters to include in the log message.
@throws Exception Thrown, if the closing encounters an exception. | closeChainedTasks | java | apache/flink | flink-runtime/src/main/java/org/apache/flink/runtime/operators/BatchTask.java | https://github.com/apache/flink/blob/master/flink-runtime/src/main/java/org/apache/flink/runtime/operators/BatchTask.java | Apache-2.0 |
public static void cancelChainedTasks(List<ChainedDriver<?, ?>> tasks) {
for (ChainedDriver<?, ?> task : tasks) {
try {
task.cancelTask();
} catch (Throwable t) {
// do nothing
}
}
} | Cancels all tasks via their {@link ChainedDriver#cancelTask()} method. Any occurring
exception and error is suppressed, such that the canceling method of every task is invoked in
all cases.
@param tasks The tasks to be canceled. | cancelChainedTasks | java | apache/flink | flink-runtime/src/main/java/org/apache/flink/runtime/operators/BatchTask.java | https://github.com/apache/flink/blob/master/flink-runtime/src/main/java/org/apache/flink/runtime/operators/BatchTask.java | Apache-2.0 |
public boolean isMaterializing() {
return this != PIPELINED;
} | Checks whether this enumeration represents some form of materialization, either with a full
dam or without.
@return True, if this enumeration constant represents a materializing behavior, false
otherwise. | isMaterializing | java | apache/flink | flink-runtime/src/main/java/org/apache/flink/runtime/operators/DamBehavior.java | https://github.com/apache/flink/blob/master/flink-runtime/src/main/java/org/apache/flink/runtime/operators/DamBehavior.java | Apache-2.0 |
private void initOutputFormat() {
ClassLoader userCodeClassLoader = getUserCodeClassLoader();
// obtain task configuration (including stub parameters)
Configuration taskConf = getTaskConfiguration();
this.config = new TaskConfig(taskConf);
final Pair<OperatorID, OutputFormat<IT>> operatorIDAndOutputFormat;
InputOutputFormatContainer formatContainer =
new InputOutputFormatContainer(config, userCodeClassLoader);
try {
operatorIDAndOutputFormat = formatContainer.getUniqueOutputFormat();
this.format = operatorIDAndOutputFormat.getValue();
// check if the class is a subclass, if the check is required
if (!OutputFormat.class.isAssignableFrom(this.format.getClass())) {
throw new RuntimeException(
"The class '"
+ this.format.getClass().getName()
+ "' is not a subclass of '"
+ OutputFormat.class.getName()
+ "' as is required.");
}
} catch (ClassCastException ccex) {
throw new RuntimeException(
"The stub class is not a proper subclass of " + OutputFormat.class.getName(),
ccex);
}
Thread thread = Thread.currentThread();
ClassLoader original = thread.getContextClassLoader();
// configure the stub. catch exceptions here extra, to report them as originating from the
// user code
try {
thread.setContextClassLoader(userCodeClassLoader);
this.format.configure(
formatContainer.getParameters(operatorIDAndOutputFormat.getKey()));
} catch (Throwable t) {
throw new RuntimeException(
"The user defined 'configure()' method in the Output Format caused an error: "
+ t.getMessage(),
t);
} finally {
thread.setContextClassLoader(original);
}
} | Initializes the OutputFormat implementation and configuration.
@throws RuntimeException Throws if instance of OutputFormat implementation can not be
obtained. | initOutputFormat | java | apache/flink | flink-runtime/src/main/java/org/apache/flink/runtime/operators/DataSinkTask.java | https://github.com/apache/flink/blob/master/flink-runtime/src/main/java/org/apache/flink/runtime/operators/DataSinkTask.java | Apache-2.0 |
@SuppressWarnings("unchecked")
private void initInputReaders() throws Exception {
int numGates = 0;
// ---------------- create the input readers ---------------------
// in case where a logical input unions multiple physical inputs, create a union reader
final int groupSize = this.config.getGroupSize(0);
numGates += groupSize;
if (groupSize == 1) {
// non-union case
inputReader =
new MutableRecordReader<DeserializationDelegate<IT>>(
getEnvironment().getInputGate(0),
getEnvironment().getTaskManagerInfo().getTmpDirectories());
} else if (groupSize > 1) {
// union case
inputReader =
new MutableRecordReader<IOReadableWritable>(
new UnionInputGate(getEnvironment().getAllInputGates()),
getEnvironment().getTaskManagerInfo().getTmpDirectories());
} else {
throw new Exception("Illegal input group size in task configuration: " + groupSize);
}
this.inputTypeSerializerFactory =
this.config.getInputSerializer(0, getUserCodeClassLoader());
@SuppressWarnings({"rawtypes"})
final MutableObjectIterator<?> iter =
new ReaderIterator(inputReader, this.inputTypeSerializerFactory.getSerializer());
this.reader = (MutableObjectIterator<IT>) iter;
// final sanity check
if (numGates != this.config.getNumInputs()) {
throw new Exception(
"Illegal configuration: Number of input gates and group sizes are not consistent.");
}
} | Initializes the input readers of the DataSinkTask.
@throws RuntimeException Thrown in case of invalid task input configuration. | initInputReaders | java | apache/flink | flink-runtime/src/main/java/org/apache/flink/runtime/operators/DataSinkTask.java | https://github.com/apache/flink/blob/master/flink-runtime/src/main/java/org/apache/flink/runtime/operators/DataSinkTask.java | Apache-2.0 |
private void initInputFormat() {
ClassLoader userCodeClassLoader = getUserCodeClassLoader();
// obtain task configuration (including stub parameters)
Configuration taskConf = getTaskConfiguration();
this.config = new TaskConfig(taskConf);
final Pair<OperatorID, InputFormat<OT, InputSplit>> operatorIdAndInputFormat;
InputOutputFormatContainer formatContainer =
new InputOutputFormatContainer(config, userCodeClassLoader);
try {
operatorIdAndInputFormat = formatContainer.getUniqueInputFormat();
this.format = operatorIdAndInputFormat.getValue();
// check if the class is a subclass, if the check is required
if (!InputFormat.class.isAssignableFrom(this.format.getClass())) {
throw new RuntimeException(
"The class '"
+ this.format.getClass().getName()
+ "' is not a subclass of '"
+ InputFormat.class.getName()
+ "' as is required.");
}
} catch (ClassCastException ccex) {
throw new RuntimeException(
"The stub class is not a proper subclass of " + InputFormat.class.getName(),
ccex);
}
Thread thread = Thread.currentThread();
ClassLoader original = thread.getContextClassLoader();
// configure the stub. catch exceptions here extra, to report them as originating from the
// user code
try {
thread.setContextClassLoader(userCodeClassLoader);
this.format.configure(formatContainer.getParameters(operatorIdAndInputFormat.getKey()));
} catch (Throwable t) {
throw new RuntimeException(
"The user defined 'configure()' method caused an error: " + t.getMessage(), t);
} finally {
thread.setContextClassLoader(original);
}
// get the factory for the type serializer
this.serializerFactory = this.config.getOutputSerializer(userCodeClassLoader);
} | Initializes the InputFormat implementation and configuration.
@throws RuntimeException Throws if instance of InputFormat implementation can not be
obtained. | initInputFormat | java | apache/flink | flink-runtime/src/main/java/org/apache/flink/runtime/operators/DataSourceTask.java | https://github.com/apache/flink/blob/master/flink-runtime/src/main/java/org/apache/flink/runtime/operators/DataSourceTask.java | Apache-2.0 |
public long getOversizedRecordCount() {
return oversizedRecordCount;
} | Gets the number of oversized records handled by this combiner.
@return The number of oversized records handled by this combiner. | getOversizedRecordCount | java | apache/flink | flink-runtime/src/main/java/org/apache/flink/runtime/operators/GroupReduceCombineDriver.java | https://github.com/apache/flink/blob/master/flink-runtime/src/main/java/org/apache/flink/runtime/operators/GroupReduceCombineDriver.java | Apache-2.0 |
long getCheckpointID() {
return checkpointId;
} | The ID of the checkpoint that this event is related to. | getCheckpointID | java | apache/flink | flink-runtime/src/main/java/org/apache/flink/runtime/operators/coordination/AcknowledgeCheckpointEvent.java | https://github.com/apache/flink/blob/master/flink-runtime/src/main/java/org/apache/flink/runtime/operators/coordination/AcknowledgeCheckpointEvent.java | Apache-2.0 |
public static CompletableFuture<Void> closeAsyncWithTimeout(
String componentName, Runnable closingSequence, Duration closeTimeout) {
return closeAsyncWithTimeout(
componentName, (ThrowingRunnable<Exception>) closingSequence::run, closeTimeout);
} | Close a component with a timeout.
@param componentName the name of the component.
@param closingSequence the closing logic which is a callable that can throw exceptions.
@param closeTimeout the timeout to wait for the component to close.
@return An optional throwable which is non-empty if an error occurred when closing the
component. | closeAsyncWithTimeout | java | apache/flink | flink-runtime/src/main/java/org/apache/flink/runtime/operators/coordination/ComponentClosingUtils.java | https://github.com/apache/flink/blob/master/flink-runtime/src/main/java/org/apache/flink/runtime/operators/coordination/ComponentClosingUtils.java | Apache-2.0 |
public static CompletableFuture<Void> closeAsyncWithTimeout(
String componentName,
ThrowingRunnable<Exception> closingSequence,
Duration closeTimeout) {
final CompletableFuture<Void> future = new CompletableFuture<>();
// Start a dedicate thread to close the component.
final Thread t =
new Thread(
() -> {
try {
closingSequence.run();
future.complete(null);
} catch (Throwable error) {
future.completeExceptionally(error);
}
});
t.start();
// if the future fails due to a timeout, we interrupt the thread
future.exceptionally(
(error) -> {
if (error instanceof TimeoutException && t.isAlive()) {
abortThread(t);
}
return null;
});
FutureUtils.orTimeout(
future,
closeTimeout.toMillis(),
TimeUnit.MILLISECONDS,
String.format(
"Failed to close the %s before timeout of %d ms",
componentName, closeTimeout.toMillis()));
return future;
} | Close a component with a timeout.
@param componentName the name of the component.
@param closingSequence the closing logic.
@param closeTimeout the timeout to wait for the component to close.
@return An optional throwable which is non-empty if an error occurred when closing the
component. | closeAsyncWithTimeout | java | apache/flink | flink-runtime/src/main/java/org/apache/flink/runtime/operators/coordination/ComponentClosingUtils.java | https://github.com/apache/flink/blob/master/flink-runtime/src/main/java/org/apache/flink/runtime/operators/coordination/ComponentClosingUtils.java | Apache-2.0 |
@SuppressWarnings("ResultOfMethodCallIgnored")
public static boolean tryShutdownExecutorElegantly(ExecutorService executor, Duration timeout) {
try {
executor.shutdown();
executor.awaitTermination(timeout.toMillis(), TimeUnit.MILLISECONDS);
} catch (InterruptedException ie) {
// Let it go.
}
if (!executor.isTerminated()) {
shutdownExecutorForcefully(executor, Duration.ZERO, false);
}
return executor.isTerminated();
} | A util method that tries to shut down an {@link ExecutorService} elegantly within the given
timeout. If the executor has not been shut down before it hits timeout or the thread is
interrupted when waiting for the termination, a forceful shutdown will be attempted on the
executor.
@param executor the {@link ExecutorService} to shut down.
@param timeout the timeout duration.
@return true if the given executor has been successfully closed, false otherwise. | tryShutdownExecutorElegantly | java | apache/flink | flink-runtime/src/main/java/org/apache/flink/runtime/operators/coordination/ComponentClosingUtils.java | https://github.com/apache/flink/blob/master/flink-runtime/src/main/java/org/apache/flink/runtime/operators/coordination/ComponentClosingUtils.java | Apache-2.0 |
public static boolean shutdownExecutorForcefully(ExecutorService executor, Duration timeout) {
return shutdownExecutorForcefully(executor, timeout, true);
} | Shutdown the given executor forcefully within the given timeout. The method returns if it is
interrupted.
@param executor the executor to shut down.
@param timeout the timeout duration.
@return true if the given executor is terminated, false otherwise. | shutdownExecutorForcefully | java | apache/flink | flink-runtime/src/main/java/org/apache/flink/runtime/operators/coordination/ComponentClosingUtils.java | https://github.com/apache/flink/blob/master/flink-runtime/src/main/java/org/apache/flink/runtime/operators/coordination/ComponentClosingUtils.java | Apache-2.0 |
default boolean supportsBatchSnapshot() {
return false;
} | Whether the operator coordinator supports taking snapshot in no-checkpoint/batch scenarios.
If it returns true, the {@link OperatorCoordinator#checkpointCoordinator} and {@link
OperatorCoordinator#resetToCheckpoint} methods supports taking snapshot and restoring from a
snapshot in batch processing scenarios. In such scenarios, the checkpointId will always be
-1. | supportsBatchSnapshot | java | apache/flink | flink-runtime/src/main/java/org/apache/flink/runtime/operators/coordination/OperatorCoordinator.java | https://github.com/apache/flink/blob/master/flink-runtime/src/main/java/org/apache/flink/runtime/operators/coordination/OperatorCoordinator.java | Apache-2.0 |
default boolean isLossTolerant() {
return false;
} | @return true if event is optional and an occasional loss or inability to deliver that event
doesn't affect the job's correctness. | isLossTolerant | java | apache/flink | flink-runtime/src/main/java/org/apache/flink/runtime/operators/coordination/OperatorEvent.java | https://github.com/apache/flink/blob/master/flink-runtime/src/main/java/org/apache/flink/runtime/operators/coordination/OperatorEvent.java | Apache-2.0 |
static Collection<OperatorID> getIds(Collection<? extends OperatorInfo> infos) {
return infos.stream().map(OperatorInfo::operatorId).collect(Collectors.toList());
} | An interface to access basic properties of an operator in the context of its coordinator. | getIds | java | apache/flink | flink-runtime/src/main/java/org/apache/flink/runtime/operators/coordination/OperatorInfo.java | https://github.com/apache/flink/blob/master/flink-runtime/src/main/java/org/apache/flink/runtime/operators/coordination/OperatorInfo.java | Apache-2.0 |
void markForCheckpoint(long checkpointId) {
checkRunsInMainThread();
if (checkpointId > latestAttemptedCheckpointId) {
currentMarkedCheckpointIds.add(checkpointId);
latestAttemptedCheckpointId = checkpointId;
} else if (checkpointId != BATCH_CHECKPOINT_ID) {
throw new IllegalStateException(
String.format(
"Regressing checkpoint IDs. Previous checkpointId = %d, new checkpointId = %d",
latestAttemptedCheckpointId, checkpointId));
}
} | Marks the gateway for the next checkpoint. This remembers the checkpoint ID and will only
allow closing the gateway for this specific checkpoint.
<p>This is the gateway's mechanism to detect situations where multiple coordinator
checkpoints would be attempted overlapping, which is currently not supported (the gateway
doesn't keep a list of events blocked per checkpoint). It also helps to identify situations
where the checkpoint was aborted even before the gateway was closed (by finding out that the
{@code currentCheckpointId} was already reset to {@code NO_CHECKPOINT}. | markForCheckpoint | java | apache/flink | flink-runtime/src/main/java/org/apache/flink/runtime/operators/coordination/SubtaskGatewayImpl.java | https://github.com/apache/flink/blob/master/flink-runtime/src/main/java/org/apache/flink/runtime/operators/coordination/SubtaskGatewayImpl.java | Apache-2.0 |
private static int getPartitioningFanOutNoEstimates(int numBuffers) {
return Math.max(10, Math.min(numBuffers / 10, MAX_NUM_PARTITIONS));
} | Gets the number of partitions to be used for an initial hash-table, when no estimates are
available.
<p>The current logic makes sure that there are always between 10 and 32 partitions, and close
to 0.1 of the number of buffers.
@param numBuffers The number of buffers available.
@return The number of partitions to use. | getPartitioningFanOutNoEstimates | java | apache/flink | flink-runtime/src/main/java/org/apache/flink/runtime/operators/hash/CompactingHashTable.java | https://github.com/apache/flink/blob/master/flink-runtime/src/main/java/org/apache/flink/runtime/operators/hash/CompactingHashTable.java | Apache-2.0 |
private long getSize() {
long numSegments = 0;
numSegments += this.availableMemory.size();
numSegments += this.buckets.length;
for (InMemoryPartition<T> p : this.partitions) {
numSegments += p.getBlockCount();
numSegments += p.numOverflowSegments;
}
numSegments += this.compactionMemory.getBlockCount();
return numSegments * this.segmentSize;
} | Size of all memory segments owned by this hash table
@return size in bytes | getSize | java | apache/flink | flink-runtime/src/main/java/org/apache/flink/runtime/operators/hash/CompactingHashTable.java | https://github.com/apache/flink/blob/master/flink-runtime/src/main/java/org/apache/flink/runtime/operators/hash/CompactingHashTable.java | Apache-2.0 |
private long getPartitionSize() {
long numSegments = 0;
for (InMemoryPartition<T> p : this.partitions) {
numSegments += p.getBlockCount();
}
return numSegments * this.segmentSize;
} | Size of all memory segments owned by the partitions of this hash table excluding the
compaction partition
@return size in bytes | getPartitionSize | java | apache/flink | flink-runtime/src/main/java/org/apache/flink/runtime/operators/hash/CompactingHashTable.java | https://github.com/apache/flink/blob/master/flink-runtime/src/main/java/org/apache/flink/runtime/operators/hash/CompactingHashTable.java | Apache-2.0 |
private int getMaxPartition() {
int maxPartition = 0;
for (InMemoryPartition<T> p1 : this.partitions) {
if (p1.getBlockCount() > maxPartition) {
maxPartition = p1.getBlockCount();
}
}
return maxPartition;
} | @return number of memory segments in the largest partition | getMaxPartition | java | apache/flink | flink-runtime/src/main/java/org/apache/flink/runtime/operators/hash/CompactingHashTable.java | https://github.com/apache/flink/blob/master/flink-runtime/src/main/java/org/apache/flink/runtime/operators/hash/CompactingHashTable.java | Apache-2.0 |
private int getMinPartition() {
int minPartition = Integer.MAX_VALUE;
for (InMemoryPartition<T> p1 : this.partitions) {
if (p1.getBlockCount() < minPartition) {
minPartition = p1.getBlockCount();
}
}
return minPartition;
} | @return number of memory segments in the smallest partition | getMinPartition | java | apache/flink | flink-runtime/src/main/java/org/apache/flink/runtime/operators/hash/CompactingHashTable.java | https://github.com/apache/flink/blob/master/flink-runtime/src/main/java/org/apache/flink/runtime/operators/hash/CompactingHashTable.java | Apache-2.0 |
private int getOverflowSegmentCount() {
int result = 0;
for (InMemoryPartition<T> p : this.partitions) {
result += p.numOverflowSegments;
}
return result;
} | @return number of memory segments used in overflow buckets | getOverflowSegmentCount | java | apache/flink | flink-runtime/src/main/java/org/apache/flink/runtime/operators/hash/CompactingHashTable.java | https://github.com/apache/flink/blob/master/flink-runtime/src/main/java/org/apache/flink/runtime/operators/hash/CompactingHashTable.java | Apache-2.0 |
private static int getInitialTableSize(
int numBuffers, int bufferSize, int numPartitions, int recordLenBytes) {
final long totalSize = ((long) bufferSize) * numBuffers;
final long numRecordsStorable = totalSize / (recordLenBytes + RECORD_OVERHEAD_BYTES);
final long bucketBytes = numRecordsStorable * RECORD_OVERHEAD_BYTES;
long numBuckets = bucketBytes / (2 * HASH_BUCKET_SIZE) + 1;
numBuckets += numPartitions - numBuckets % numPartitions;
return numBuckets > Integer.MAX_VALUE ? Integer.MAX_VALUE : (int) numBuckets;
} | tries to find a good value for the number of buckets will ensure that the number of buckets
is a multiple of numPartitions
@return number of buckets | getInitialTableSize | java | apache/flink | flink-runtime/src/main/java/org/apache/flink/runtime/operators/hash/CompactingHashTable.java | https://github.com/apache/flink/blob/master/flink-runtime/src/main/java/org/apache/flink/runtime/operators/hash/CompactingHashTable.java | Apache-2.0 |
private static byte assignPartition(int bucket, byte numPartitions) {
return (byte) (bucket % numPartitions);
} | Assigns a partition to a bucket.
@param bucket bucket index
@param numPartitions number of partitions
@return The hash code for the integer. | assignPartition | java | apache/flink | flink-runtime/src/main/java/org/apache/flink/runtime/operators/hash/CompactingHashTable.java | https://github.com/apache/flink/blob/master/flink-runtime/src/main/java/org/apache/flink/runtime/operators/hash/CompactingHashTable.java | Apache-2.0 |
private void compactPartition(final int partitionNumber) throws IOException {
// do nothing if table was closed, parameter is invalid or no garbage exists
if (this.closed
|| partitionNumber >= this.partitions.size()
|| this.partitions.get(partitionNumber).isCompacted()) {
return;
}
// release all segments owned by compaction partition
this.compactionMemory.clearAllMemory(availableMemory);
this.compactionMemory.allocateSegments(1);
this.compactionMemory.pushDownPages();
T tempHolder = this.buildSideSerializer.createInstance();
final int numPartitions = this.partitions.size();
InMemoryPartition<T> partition = this.partitions.remove(partitionNumber);
MemorySegment[] overflowSegments = partition.overflowSegments;
long pointer;
int pointerOffset;
int bucketOffset;
final int bucketsPerSegment = this.bucketsPerSegmentMask + 1;
for (int i = 0, bucket = partitionNumber;
i < this.buckets.length && bucket < this.numBuckets;
i++) {
MemorySegment segment = this.buckets[i];
// go over all buckets in the segment belonging to the partition
for (int k = bucket % bucketsPerSegment;
k < bucketsPerSegment && bucket < this.numBuckets;
k += numPartitions, bucket += numPartitions) {
bucketOffset = k * HASH_BUCKET_SIZE;
if ((int) segment.get(bucketOffset + HEADER_PARTITION_OFFSET) != partitionNumber) {
throw new IOException(
"Accessed wrong bucket! wanted: "
+ partitionNumber
+ " got: "
+ segment.get(bucketOffset + HEADER_PARTITION_OFFSET));
}
// loop over all segments that are involved in the bucket (original bucket plus
// overflow buckets)
int countInSegment = segment.getInt(bucketOffset + HEADER_COUNT_OFFSET);
int numInSegment = 0;
pointerOffset = bucketOffset + BUCKET_POINTER_START_OFFSET;
while (true) {
while (numInSegment < countInSegment) {
pointer = segment.getLong(pointerOffset);
tempHolder = partition.readRecordAt(pointer, tempHolder);
pointer = this.compactionMemory.appendRecord(tempHolder);
segment.putLong(pointerOffset, pointer);
pointerOffset += POINTER_LEN;
numInSegment++;
}
// this segment is done. check if there is another chained bucket
final long forwardPointer =
segment.getLong(bucketOffset + HEADER_FORWARD_OFFSET);
if (forwardPointer == BUCKET_FORWARD_POINTER_NOT_SET) {
break;
}
final int overflowSegNum = (int) (forwardPointer >>> 32);
segment = overflowSegments[overflowSegNum];
bucketOffset = (int) forwardPointer;
countInSegment = segment.getInt(bucketOffset + HEADER_COUNT_OFFSET);
pointerOffset = bucketOffset + BUCKET_POINTER_START_OFFSET;
numInSegment = 0;
}
segment = this.buckets[i];
}
}
// swap partition with compaction partition
this.compactionMemory.setPartitionNumber(partitionNumber);
this.partitions.add(partitionNumber, compactionMemory);
this.partitions.get(partitionNumber).overflowSegments = partition.overflowSegments;
this.partitions.get(partitionNumber).numOverflowSegments = partition.numOverflowSegments;
this.partitions.get(partitionNumber).nextOverflowBucket = partition.nextOverflowBucket;
this.partitions.get(partitionNumber).setIsCompacted(true);
// this.partitions.get(partitionNumber).pushDownPages();
this.compactionMemory = partition;
this.compactionMemory.resetRecordCounter();
this.compactionMemory.setPartitionNumber(-1);
this.compactionMemory.overflowSegments = null;
this.compactionMemory.numOverflowSegments = 0;
this.compactionMemory.nextOverflowBucket = 0;
// try to allocate maximum segment count
this.compactionMemory.clearAllMemory(this.availableMemory);
int maxSegmentNumber = this.getMaxPartition();
this.compactionMemory.allocateSegments(maxSegmentNumber);
this.compactionMemory.resetRWViews();
this.compactionMemory.pushDownPages();
} | Compacts (garbage collects) partition with copy-compact strategy using compaction partition
@param partitionNumber partition to compact
@throws IOException | compactPartition | java | apache/flink | flink-runtime/src/main/java/org/apache/flink/runtime/operators/hash/CompactingHashTable.java | https://github.com/apache/flink/blob/master/flink-runtime/src/main/java/org/apache/flink/runtime/operators/hash/CompactingHashTable.java | Apache-2.0 |
public <BT, PT> MutableHashTable<BT, PT> getHashJoin(
TypeSerializer<BT> buildSideSerializer,
TypeComparator<BT> buildSideComparator,
TypeSerializer<PT> probeSideSerializer,
TypeComparator<PT> probeSideComparator,
TypePairComparator<PT, BT> pairComparator,
MemoryManager memManager,
IOManager ioManager,
AbstractInvokable ownerTask,
double memoryFraction,
boolean useBloomFilters)
throws MemoryAllocationException {
final int numPages = memManager.computeNumberOfPages(memoryFraction);
final List<MemorySegment> memorySegments = memManager.allocatePages(ownerTask, numPages);
return new MutableHashTable<BT, PT>(
buildSideSerializer,
probeSideSerializer,
buildSideComparator,
probeSideComparator,
pairComparator,
memorySegments,
ioManager,
useBloomFilters);
} | Common methods for all Hash Join Iterators. | getHashJoin | java | apache/flink | flink-runtime/src/main/java/org/apache/flink/runtime/operators/hash/HashJoinIteratorBase.java | https://github.com/apache/flink/blob/master/flink-runtime/src/main/java/org/apache/flink/runtime/operators/hash/HashJoinIteratorBase.java | Apache-2.0 |
protected void setFurtherPatitioning(boolean v) {
furtherPartitioning = v;
} | @param <BT> The type of the build side records.
@param <PT> The type of the probe side records. | setFurtherPatitioning | java | apache/flink | flink-runtime/src/main/java/org/apache/flink/runtime/operators/hash/HashPartition.java | https://github.com/apache/flink/blob/master/flink-runtime/src/main/java/org/apache/flink/runtime/operators/hash/HashPartition.java | Apache-2.0 |
public void setPartitionNumber(int number) {
this.partitionNumber = number;
} | overwrites partition number and should only be used on compaction partition
@param number new partition | setPartitionNumber | java | apache/flink | flink-runtime/src/main/java/org/apache/flink/runtime/operators/hash/InMemoryPartition.java | https://github.com/apache/flink/blob/master/flink-runtime/src/main/java/org/apache/flink/runtime/operators/hash/InMemoryPartition.java | Apache-2.0 |
public int getBlockCount() {
return this.partitionPages.size();
} | @return number of segments owned by partition | getBlockCount | java | apache/flink | flink-runtime/src/main/java/org/apache/flink/runtime/operators/hash/InMemoryPartition.java | https://github.com/apache/flink/blob/master/flink-runtime/src/main/java/org/apache/flink/runtime/operators/hash/InMemoryPartition.java | Apache-2.0 |
public long getRecordCount() {
return this.recordCounter;
} | number of records in partition including garbage
@return number record count | getRecordCount | java | apache/flink | flink-runtime/src/main/java/org/apache/flink/runtime/operators/hash/InMemoryPartition.java | https://github.com/apache/flink/blob/master/flink-runtime/src/main/java/org/apache/flink/runtime/operators/hash/InMemoryPartition.java | Apache-2.0 |
public void resetRecordCounter() {
this.recordCounter = 0L;
} | sets record counter to zero and should only be used on compaction partition | resetRecordCounter | java | apache/flink | flink-runtime/src/main/java/org/apache/flink/runtime/operators/hash/InMemoryPartition.java | https://github.com/apache/flink/blob/master/flink-runtime/src/main/java/org/apache/flink/runtime/operators/hash/InMemoryPartition.java | Apache-2.0 |
public ArrayList<MemorySegment> resetOverflowBuckets() {
this.numOverflowSegments = 0;
this.nextOverflowBucket = 0;
ArrayList<MemorySegment> result =
new ArrayList<MemorySegment>(this.overflowSegments.length);
for (int i = 0; i < this.overflowSegments.length; i++) {
if (this.overflowSegments[i] != null) {
result.add(this.overflowSegments[i]);
}
}
this.overflowSegments = new MemorySegment[2];
return result;
} | resets overflow bucket counters and returns freed memory and should only be used for resizing
@return freed memory segments | resetOverflowBuckets | java | apache/flink | flink-runtime/src/main/java/org/apache/flink/runtime/operators/hash/InMemoryPartition.java | https://github.com/apache/flink/blob/master/flink-runtime/src/main/java/org/apache/flink/runtime/operators/hash/InMemoryPartition.java | Apache-2.0 |
public boolean isCompacted() {
return this.compacted;
} | @return true if garbage exists in partition | isCompacted | java | apache/flink | flink-runtime/src/main/java/org/apache/flink/runtime/operators/hash/InMemoryPartition.java | https://github.com/apache/flink/blob/master/flink-runtime/src/main/java/org/apache/flink/runtime/operators/hash/InMemoryPartition.java | Apache-2.0 |
public void setIsCompacted(boolean compacted) {
this.compacted = compacted;
} | sets compaction status (should only be set <code>true</code> directly after compaction and
<code>false</code> when garbage was created)
@param compacted compaction status | setIsCompacted | java | apache/flink | flink-runtime/src/main/java/org/apache/flink/runtime/operators/hash/InMemoryPartition.java | https://github.com/apache/flink/blob/master/flink-runtime/src/main/java/org/apache/flink/runtime/operators/hash/InMemoryPartition.java | Apache-2.0 |
@Deprecated
public void overwriteRecordAt(long pointer, T record) throws IOException {
long tmpPointer = this.writeView.getCurrentPointer();
this.writeView.resetTo(pointer);
this.serializer.serialize(record, this.writeView);
this.writeView.resetTo(tmpPointer);
} | UNSAFE!! overwrites record causes inconsistency or data loss for overwriting everything but
records of the exact same size
@param pointer pointer to start of record
@param record record to overwrite old one with
@throws IOException
@deprecated Don't use this, overwrites record and causes inconsistency or data loss for
overwriting everything but records of the exact same size | overwriteRecordAt | java | apache/flink | flink-runtime/src/main/java/org/apache/flink/runtime/operators/hash/InMemoryPartition.java | https://github.com/apache/flink/blob/master/flink-runtime/src/main/java/org/apache/flink/runtime/operators/hash/InMemoryPartition.java | Apache-2.0 |
public void clearAllMemory(List<MemorySegment> target) {
// return the overflow segments
if (this.overflowSegments != null) {
for (int k = 0; k < this.numOverflowSegments; k++) {
target.add(this.overflowSegments[k]);
}
}
// return the partition buffers
target.addAll(this.partitionPages);
this.partitionPages.clear();
} | releases all of the partition's segments (pages and overflow buckets)
@param target memory pool to release segments to | clearAllMemory | java | apache/flink | flink-runtime/src/main/java/org/apache/flink/runtime/operators/hash/InMemoryPartition.java | https://github.com/apache/flink/blob/master/flink-runtime/src/main/java/org/apache/flink/runtime/operators/hash/InMemoryPartition.java | Apache-2.0 |
public long getCapacity() {
return numAllMemorySegments * (long) segmentSize;
} | Gets the total capacity of this hash table, in bytes.
@return The hash table's total capacity. | getCapacity | java | apache/flink | flink-runtime/src/main/java/org/apache/flink/runtime/operators/hash/InPlaceMutableHashTable.java | https://github.com/apache/flink/blob/master/flink-runtime/src/main/java/org/apache/flink/runtime/operators/hash/InPlaceMutableHashTable.java | Apache-2.0 |
@Override
public void insertOrReplaceRecord(T record) throws IOException {
if (closed) {
return;
}
T match = prober.getMatchFor(record, reuse);
if (match == null) {
prober.insertAfterNoMatch(record);
} else {
prober.updateMatch(record);
}
} | Searches the hash table for a record with the given key. If it is found, then it is
overridden with the specified record. Otherwise, the specified record is inserted.
@param record The record to insert or to replace with.
@throws IOException (EOFException specifically, if memory ran out) | insertOrReplaceRecord | java | apache/flink | flink-runtime/src/main/java/org/apache/flink/runtime/operators/hash/InPlaceMutableHashTable.java | https://github.com/apache/flink/blob/master/flink-runtime/src/main/java/org/apache/flink/runtime/operators/hash/InPlaceMutableHashTable.java | Apache-2.0 |
private void rebuild(long newNumBucketSegments) throws IOException {
// Get new bucket segments
releaseBucketSegments();
allocateBucketSegments((int) newNumBucketSegments);
T record = buildSideSerializer.createInstance();
try {
EntryIterator iter = getEntryIterator();
recordArea.resetAppendPosition();
recordArea.setWritePosition(0);
while ((record = iter.next(record)) != null && !closed) {
final int hashCode = MathUtils.jenkinsHash(buildSideComparator.hash(record));
final int bucket = hashCode & numBucketsMask;
final int bucketSegmentIndex =
bucket >>> numBucketsPerSegmentBits; // which segment contains the bucket
final MemorySegment bucketSegment = bucketSegments[bucketSegmentIndex];
final int bucketOffset =
(bucket & numBucketsPerSegmentMask)
<< bucketSizeBits; // offset of the bucket in the segment
final long firstPointer = bucketSegment.getLong(bucketOffset);
long ptrToAppended = recordArea.noSeekAppendPointerAndRecord(firstPointer, record);
bucketSegment.putLong(bucketOffset, ptrToAppended);
}
recordArea.freeSegmentsAfterAppendPosition();
holes = 0;
} catch (EOFException ex) {
throw new RuntimeException(
"Bug in InPlaceMutableHashTable: we shouldn't get out of memory during a rebuild, "
+ "because we aren't allocating any new memory.");
}
} | Same as above, but the number of bucket segments of the new table can be specified. | rebuild | java | apache/flink | flink-runtime/src/main/java/org/apache/flink/runtime/operators/hash/InPlaceMutableHashTable.java | https://github.com/apache/flink/blob/master/flink-runtime/src/main/java/org/apache/flink/runtime/operators/hash/InPlaceMutableHashTable.java | Apache-2.0 |
private void compactOrThrow() throws IOException {
if (holes > (double) recordArea.getTotalSize() * 0.05) {
rebuild();
} else {
throw new EOFException(
"InPlaceMutableHashTable memory ran out. " + getMemoryConsumptionString());
}
} | If there is wasted space (due to updated records not fitting in their old places), then do a
compaction. Else, throw EOFException to indicate that memory ran out.
@throws IOException | compactOrThrow | java | apache/flink | flink-runtime/src/main/java/org/apache/flink/runtime/operators/hash/InPlaceMutableHashTable.java | https://github.com/apache/flink/blob/master/flink-runtime/src/main/java/org/apache/flink/runtime/operators/hash/InPlaceMutableHashTable.java | Apache-2.0 |
private void addSegment() throws EOFException {
MemorySegment m = allocateSegment();
if (m == null) {
throw new EOFException();
}
segments.add(m);
} | This class encapsulates the memory segments that belong to the record area. It - can append a
record - can overwrite a record at an arbitrary position (WARNING: the new record must have
the same size as the old one) - can be rewritten by calling resetAppendPosition - takes
memory from InPlaceMutableHashTable.freeMemorySegments on append | addSegment | java | apache/flink | flink-runtime/src/main/java/org/apache/flink/runtime/operators/hash/InPlaceMutableHashTable.java | https://github.com/apache/flink/blob/master/flink-runtime/src/main/java/org/apache/flink/runtime/operators/hash/InPlaceMutableHashTable.java | Apache-2.0 |
public void resetAppendPosition() {
appendPosition = 0;
// this is just for safety (making sure that we fail immediately
// if a write happens without calling setWritePosition)
outView.currentSegmentIndex = -1;
outView.seekOutput(null, -1);
} | Sets appendPosition and the write position to 0, so that appending starts overwriting
elements from the beginning. (This is used in rebuild.)
<p>Note: if data was written to the area after the current appendPosition before a call
to resetAppendPosition, it should still be readable. To release the segments after the
current append position, call freeSegmentsAfterAppendPosition() | resetAppendPosition | java | apache/flink | flink-runtime/src/main/java/org/apache/flink/runtime/operators/hash/InPlaceMutableHashTable.java | https://github.com/apache/flink/blob/master/flink-runtime/src/main/java/org/apache/flink/runtime/operators/hash/InPlaceMutableHashTable.java | Apache-2.0 |
public void freeSegmentsAfterAppendPosition() {
final int appendSegmentIndex = (int) (appendPosition >>> segmentSizeBits);
while (segments.size() > appendSegmentIndex + 1 && !closed) {
freeMemorySegments.add(segments.get(segments.size() - 1));
segments.remove(segments.size() - 1);
}
} | Releases the memory segments that are after the current append position. Note: The
situation that there are segments after the current append position can arise from a call
to resetAppendPosition(). | freeSegmentsAfterAppendPosition | java | apache/flink | flink-runtime/src/main/java/org/apache/flink/runtime/operators/hash/InPlaceMutableHashTable.java | https://github.com/apache/flink/blob/master/flink-runtime/src/main/java/org/apache/flink/runtime/operators/hash/InPlaceMutableHashTable.java | Apache-2.0 |
public void overwritePointerAt(long pointer, long value) throws IOException {
setWritePosition(pointer);
outView.writeLong(value);
} | Overwrites the long value at the specified position.
@param pointer Points to the position to overwrite.
@param value The value to write.
@throws IOException | overwritePointerAt | java | apache/flink | flink-runtime/src/main/java/org/apache/flink/runtime/operators/hash/InPlaceMutableHashTable.java | https://github.com/apache/flink/blob/master/flink-runtime/src/main/java/org/apache/flink/runtime/operators/hash/InPlaceMutableHashTable.java | Apache-2.0 |
public void overwriteRecordAt(long pointer, DataInputView input, int size)
throws IOException {
setWritePosition(pointer);
outView.write(input, size);
} | Overwrites a record at the specified position. The record is read from a DataInputView
(this will be the staging area). WARNING: The record must not be larger than the original
record.
@param pointer Points to the position to overwrite.
@param input The DataInputView to read the record from
@param size The size of the record
@throws IOException | overwriteRecordAt | java | apache/flink | flink-runtime/src/main/java/org/apache/flink/runtime/operators/hash/InPlaceMutableHashTable.java | https://github.com/apache/flink/blob/master/flink-runtime/src/main/java/org/apache/flink/runtime/operators/hash/InPlaceMutableHashTable.java | Apache-2.0 |
public long appendPointerAndCopyRecord(long pointer, DataInputView input, int recordSize)
throws IOException {
setWritePosition(appendPosition);
final long oldLastPosition = appendPosition;
outView.writeLong(pointer);
outView.write(input, recordSize);
appendPosition += 8 + recordSize;
return oldLastPosition;
} | Appends a pointer and a record. The record is read from a DataInputView (this will be the
staging area).
@param pointer The pointer to write (Note: this is NOT the position to write to!)
@param input The DataInputView to read the record from
@param recordSize The size of the record
@return A pointer to the written data
@throws IOException (EOFException specifically, if memory ran out) | appendPointerAndCopyRecord | java | apache/flink | flink-runtime/src/main/java/org/apache/flink/runtime/operators/hash/InPlaceMutableHashTable.java | https://github.com/apache/flink/blob/master/flink-runtime/src/main/java/org/apache/flink/runtime/operators/hash/InPlaceMutableHashTable.java | Apache-2.0 |
public long appendPointerAndRecord(long pointer, T record) throws IOException {
setWritePosition(appendPosition);
return noSeekAppendPointerAndRecord(pointer, record);
} | Appends a pointer and a record.
@param pointer The pointer to write (Note: this is NOT the position to write to!)
@param record The record to write
@return A pointer to the written data
@throws IOException (EOFException specifically, if memory ran out) | appendPointerAndRecord | java | apache/flink | flink-runtime/src/main/java/org/apache/flink/runtime/operators/hash/InPlaceMutableHashTable.java | https://github.com/apache/flink/blob/master/flink-runtime/src/main/java/org/apache/flink/runtime/operators/hash/InPlaceMutableHashTable.java | Apache-2.0 |
@Override
public void updateMatch(T newRecord) throws IOException {
if (closed) {
return;
}
if (curElemPtr == END_OF_LIST) {
throw new RuntimeException(
"updateMatch was called after getMatchFor returned no match");
}
try {
// determine the new size
stagingSegmentsOutView.reset();
buildSideSerializer.serialize(newRecord, stagingSegmentsOutView);
final int newRecordSize = (int) stagingSegmentsOutView.getWritePosition();
stagingSegmentsInView.setReadPosition(0);
// Determine the size of the place of the old record.
final int oldRecordSize = (int) (recordEnd - (curElemPtr + RECORD_OFFSET_IN_LINK));
if (newRecordSize == oldRecordSize) {
// overwrite record at its original place
recordArea.overwriteRecordAt(
curElemPtr + RECORD_OFFSET_IN_LINK,
stagingSegmentsInView,
newRecordSize);
} else {
// new record has a different size than the old one, append new at the end of
// the record area.
// Note: we have to do this, even if the new record is smaller, because
// otherwise EntryIterator
// wouldn't know the size of this place, and wouldn't know where does the next
// record start.
final long pointerToAppended =
recordArea.appendPointerAndCopyRecord(
nextPtr, stagingSegmentsInView, newRecordSize);
// modify the pointer in the previous link
if (prevElemPtr == INVALID_PREV_POINTER) {
// list had only one element, so prev is in the bucketSegments
bucketSegments[bucketSegmentIndex].putLong(bucketOffset, pointerToAppended);
} else {
recordArea.overwritePointerAt(prevElemPtr, pointerToAppended);
}
// write the negated size of the hole to the place where the next pointer was,
// so that EntryIterator
// will know the size of the place without reading the old record.
// The negative sign will mean that the record is abandoned, and the
// the -1 is for avoiding trouble in case of a record having 0 size. (though I
// think this should
// never actually happen)
// Note: the last record in the record area can't be abandoned. (EntryIterator
// makes use of this fact.)
recordArea.overwritePointerAt(curElemPtr, -oldRecordSize - 1);
holes += oldRecordSize;
}
} catch (EOFException ex) {
compactOrThrow();
insertOrReplaceRecord(newRecord);
}
} | This method can be called after getMatchFor returned a match. It will overwrite the
record that was found by getMatchFor. Warning: The new record should have the same key as
the old! WARNING; Don't do any modifications to the table between getMatchFor and
updateMatch!
@param newRecord The record to override the old record with.
@throws IOException (EOFException specifically, if memory ran out) | updateMatch | java | apache/flink | flink-runtime/src/main/java/org/apache/flink/runtime/operators/hash/InPlaceMutableHashTable.java | https://github.com/apache/flink/blob/master/flink-runtime/src/main/java/org/apache/flink/runtime/operators/hash/InPlaceMutableHashTable.java | Apache-2.0 |
public void insertAfterNoMatch(T record) throws IOException {
if (closed) {
return;
}
// create new link
long pointerToAppended;
try {
pointerToAppended = recordArea.appendPointerAndRecord(END_OF_LIST, record);
} catch (EOFException ex) {
compactOrThrow();
insert(record);
return;
}
// add new link to the end of the list
if (prevElemPtr == INVALID_PREV_POINTER) {
// list was empty
bucketSegments[bucketSegmentIndex].putLong(bucketOffset, pointerToAppended);
} else {
// update the pointer of the last element of the list.
recordArea.overwritePointerAt(prevElemPtr, pointerToAppended);
}
numElements++;
resizeTableIfNecessary();
} | This method can be called after getMatchFor returned null. It inserts the given record to
the hash table. Important: The given record should have the same key as the record that
was given to getMatchFor! WARNING; Don't do any modifications to the table between
getMatchFor and insertAfterNoMatch!
@throws IOException (EOFException specifically, if memory ran out) | insertAfterNoMatch | java | apache/flink | flink-runtime/src/main/java/org/apache/flink/runtime/operators/hash/InPlaceMutableHashTable.java | https://github.com/apache/flink/blob/master/flink-runtime/src/main/java/org/apache/flink/runtime/operators/hash/InPlaceMutableHashTable.java | Apache-2.0 |
public void updateTableEntryWithReduce(T record) throws Exception {
T match = prober.getMatchFor(record, reuse);
if (match == null) {
prober.insertAfterNoMatch(record);
} else {
// do the reduce step
T res = reducer.reduce(match, record);
// We have given reuse to the reducer UDF, so create new one if object reuse is
// disabled
if (!objectReuseEnabled) {
reuse = buildSideSerializer.createInstance();
}
prober.updateMatch(res);
}
} | Looks up the table entry that has the same key as the given record, and updates it by
performing a reduce step.
@param record The record to update.
@throws Exception | updateTableEntryWithReduce | java | apache/flink | flink-runtime/src/main/java/org/apache/flink/runtime/operators/hash/InPlaceMutableHashTable.java | https://github.com/apache/flink/blob/master/flink-runtime/src/main/java/org/apache/flink/runtime/operators/hash/InPlaceMutableHashTable.java | Apache-2.0 |
public void open(
final MutableObjectIterator<BT> buildSide, final MutableObjectIterator<PT> probeSide)
throws IOException {
open(buildSide, probeSide, false);
} | Opens the hash join. This method reads the build-side input and constructs the initial hash
table, gradually spilling partitions that do not fit into memory.
@param buildSide Build side input.
@param probeSide Probe side input.
@throws IOException Thrown, if an I/O problem occurs while spilling a partition. | open | java | apache/flink | flink-runtime/src/main/java/org/apache/flink/runtime/operators/hash/MutableHashTable.java | https://github.com/apache/flink/blob/master/flink-runtime/src/main/java/org/apache/flink/runtime/operators/hash/MutableHashTable.java | Apache-2.0 |
protected void releaseTable() {
// set the counters back
this.numBuckets = 0;
if (this.buckets != null) {
for (MemorySegment bucket : this.buckets) {
this.availableMemory.add(bucket);
}
this.buckets = null;
}
} | Releases the table (the array of buckets) and returns the occupied memory segments to the
list of free segments. | releaseTable | java | apache/flink | flink-runtime/src/main/java/org/apache/flink/runtime/operators/hash/MutableHashTable.java | https://github.com/apache/flink/blob/master/flink-runtime/src/main/java/org/apache/flink/runtime/operators/hash/MutableHashTable.java | Apache-2.0 |
public static byte assignPartition(int bucket, byte numPartitions) {
return (byte) (bucket % numPartitions);
} | Assigns a partition to a bucket.
@param bucket The bucket to get the partition for.
@param numPartitions The number of partitions.
@return The partition for the bucket. | assignPartition | java | apache/flink | flink-runtime/src/main/java/org/apache/flink/runtime/operators/hash/MutableHashTable.java | https://github.com/apache/flink/blob/master/flink-runtime/src/main/java/org/apache/flink/runtime/operators/hash/MutableHashTable.java | Apache-2.0 |
private boolean moveToNextOnHeapBucket() {
while (!moveToNextBucket()) {
if (scanCount >= totalBucketNumber) {
return false;
}
}
return true;
} | Loop to make sure that it would move to next on heap bucket, return true while move to a
on heap bucket, return false if there is no more bucket. | moveToNextOnHeapBucket | java | apache/flink | flink-runtime/src/main/java/org/apache/flink/runtime/operators/hash/MutableHashTable.java | https://github.com/apache/flink/blob/master/flink-runtime/src/main/java/org/apache/flink/runtime/operators/hash/MutableHashTable.java | Apache-2.0 |
int spillInMemoryPartition(
FileIOChannel.ID targetChannel,
IOManager ioManager,
LinkedBlockingQueue<MemorySegment> writeBehindBuffers)
throws IOException {
this.initialPartitionBuffersCount = partitionBuffers.length; // for ReOpenableHashMap
this.initialBuildSideChannel = targetChannel;
initialBuildSideWriter =
ioManager.createBlockChannelWriter(targetChannel, writeBehindBuffers);
final int numSegments = this.partitionBuffers.length;
for (int i = 0; i < numSegments; i++) {
initialBuildSideWriter.writeBlock(partitionBuffers[i]);
}
this.partitionBuffers = null;
initialBuildSideWriter.close();
// num partitions are now in the writeBehindBuffers. We propagate this information back
return numSegments;
} | Spills this partition to disk. This method is invoked once after the initial open() method
@return Number of memorySegments in the writeBehindBuffers! | spillInMemoryPartition | java | apache/flink | flink-runtime/src/main/java/org/apache/flink/runtime/operators/hash/ReOpenableHashPartition.java | https://github.com/apache/flink/blob/master/flink-runtime/src/main/java/org/apache/flink/runtime/operators/hash/ReOpenableHashPartition.java | Apache-2.0 |
@SuppressWarnings("unchecked")
public List<RecordWriter<SerializationDelegate<T>>> getWriters() {
return Collections.unmodifiableList(Arrays.asList(this.writers));
} | List of writers that are associated with this output collector
@return list of writers | getWriters | java | apache/flink | flink-runtime/src/main/java/org/apache/flink/runtime/operators/shipping/OutputCollector.java | https://github.com/apache/flink/blob/master/flink-runtime/src/main/java/org/apache/flink/runtime/operators/shipping/OutputCollector.java | Apache-2.0 |
private void crossFirst1withNValues(
final T1 val1,
final T2 firstValN,
final Iterator<T2> valsN,
final FlatJoinFunction<T1, T2, O> joinFunction,
final Collector<O> collector)
throws Exception {
T1 copy1 = createCopy(serializer1, val1, this.copy1);
joinFunction.join(copy1, firstValN, collector);
// set copy and join first element
boolean more = true;
do {
final T2 nRec = valsN.next();
if (valsN.hasNext()) {
copy1 = createCopy(serializer1, val1, this.copy1);
joinFunction.join(copy1, nRec, collector);
} else {
joinFunction.join(val1, nRec, collector);
more = false;
}
} while (more);
} | Crosses a single value from the first input with N values, all sharing a common key.
Effectively realizes a <i>1:N</i> join.
@param val1 The value form the <i>1</i> side.
@param firstValN The first of the values from the <i>N</i> side.
@param valsN Iterator over remaining <i>N</i> side values.
@throws Exception Forwards all exceptions thrown by the stub. | crossFirst1withNValues | java | apache/flink | flink-runtime/src/main/java/org/apache/flink/runtime/operators/sort/AbstractMergeIterator.java | https://github.com/apache/flink/blob/master/flink-runtime/src/main/java/org/apache/flink/runtime/operators/sort/AbstractMergeIterator.java | Apache-2.0 |
static <T> CircularElement<T> endMarker() {
@SuppressWarnings("unchecked")
CircularElement<T> c = (CircularElement<T>) EOF_MARKER;
return c;
} | Gets the element that is passed as marker for the end of data.
@return The element that is passed as marker for the end of data. | endMarker | java | apache/flink | flink-runtime/src/main/java/org/apache/flink/runtime/operators/sort/CircularElement.java | https://github.com/apache/flink/blob/master/flink-runtime/src/main/java/org/apache/flink/runtime/operators/sort/CircularElement.java | Apache-2.0 |
static <T> CircularElement<T> spillingMarker() {
@SuppressWarnings("unchecked")
CircularElement<T> c = (CircularElement<T>) SPILLING_MARKER;
return c;
} | Gets the element that is passed as marker for signal beginning of spilling.
@return The element that is passed as marker for signal beginning of spilling. | spillingMarker | java | apache/flink | flink-runtime/src/main/java/org/apache/flink/runtime/operators/sort/CircularElement.java | https://github.com/apache/flink/blob/master/flink-runtime/src/main/java/org/apache/flink/runtime/operators/sort/CircularElement.java | Apache-2.0 |
private void startThreads() {
if (this.readThread != null) {
this.readThread.start();
}
this.sortThread.start();
this.spillThread.start();
} | Starts all the threads that are used by this sort-merger. | startThreads | java | apache/flink | flink-runtime/src/main/java/org/apache/flink/runtime/operators/sort/ExternalSorter.java | https://github.com/apache/flink/blob/master/flink-runtime/src/main/java/org/apache/flink/runtime/operators/sort/ExternalSorter.java | Apache-2.0 |
public ExternalSorter<T> build(MutableObjectIterator<T> input)
throws MemoryAllocationException {
return doBuild(
(exceptionHandler, dispatcher, largeRecordHandler, startSpillingBytes) ->
new ReadingThread<>(
exceptionHandler,
input,
dispatcher,
largeRecordHandler,
serializer.createInstance(),
startSpillingBytes));
} | Creates a pull-based {@link Sorter}. The {@link Sorter#getIterator()} will return when all
the records from the given input are consumed. Will spawn three threads: read, sort, spill. | build | java | apache/flink | flink-runtime/src/main/java/org/apache/flink/runtime/operators/sort/ExternalSorterBuilder.java | https://github.com/apache/flink/blob/master/flink-runtime/src/main/java/org/apache/flink/runtime/operators/sort/ExternalSorterBuilder.java | Apache-2.0 |
private final boolean lessThan(T a, T b) {
return comparator.compare(a, b) < 0;
} | Determines the ordering of objects in this priority queue.
@param a The first element.
@param b The second element.
@return True, if a < b, false otherwise. | lessThan | java | apache/flink | flink-runtime/src/main/java/org/apache/flink/runtime/operators/sort/PartialOrderPriorityQueue.java | https://github.com/apache/flink/blob/master/flink-runtime/src/main/java/org/apache/flink/runtime/operators/sort/PartialOrderPriorityQueue.java | Apache-2.0 |
public int remainingCapacity() {
return capacity - size;
} | Returns the remaining capacity of the backing array.
@return The remaining capacity of the backing array. | remainingCapacity | java | apache/flink | flink-runtime/src/main/java/org/apache/flink/runtime/operators/sort/PartialOrderPriorityQueue.java | https://github.com/apache/flink/blob/master/flink-runtime/src/main/java/org/apache/flink/runtime/operators/sort/PartialOrderPriorityQueue.java | Apache-2.0 |
public final void put(T element) {
size++;
heap[size] = element;
upHeap();
} | Adds a buffer to a PriorityQueue in log(size) time. If one tries to add more objects than
maxSize from initialize a RuntimeException (ArrayIndexOutOfBound) is thrown. | put | java | apache/flink | flink-runtime/src/main/java/org/apache/flink/runtime/operators/sort/PartialOrderPriorityQueue.java | https://github.com/apache/flink/blob/master/flink-runtime/src/main/java/org/apache/flink/runtime/operators/sort/PartialOrderPriorityQueue.java | Apache-2.0 |
public boolean offer(T element) {
if (size < capacity) {
put(element);
return true;
} else if (size > 0 && !lessThan(element, peek())) {
heap[1] = element;
adjustTop();
return true;
} else {
return false;
}
} | Adds element to the PriorityQueue in log(size) time if either the PriorityQueue is not full,
or not lessThan(element, top()).
@param element The element to insert,
@return True, if element is added, false otherwise. | offer | java | apache/flink | flink-runtime/src/main/java/org/apache/flink/runtime/operators/sort/PartialOrderPriorityQueue.java | https://github.com/apache/flink/blob/master/flink-runtime/src/main/java/org/apache/flink/runtime/operators/sort/PartialOrderPriorityQueue.java | Apache-2.0 |
public final T peek() {
if (size > 0) {
return heap[1];
} else {
return null;
}
} | Returns the least element of the PriorityQueue in constant time, but does not remove it from
the priority queue.
@return The least element. | peek | java | apache/flink | flink-runtime/src/main/java/org/apache/flink/runtime/operators/sort/PartialOrderPriorityQueue.java | https://github.com/apache/flink/blob/master/flink-runtime/src/main/java/org/apache/flink/runtime/operators/sort/PartialOrderPriorityQueue.java | Apache-2.0 |
public final T poll() {
if (size > 0) {
T result = heap[1]; // save first value
heap[1] = heap[size]; // move last to first
heap[size] = null; // permit GC of objects
size--;
downHeap(); // adjust heap
return result;
} else {
return null;
}
} | Removes and returns the least element of the PriorityQueue in log(size) time.
@return The least element. | poll | java | apache/flink | flink-runtime/src/main/java/org/apache/flink/runtime/operators/sort/PartialOrderPriorityQueue.java | https://github.com/apache/flink/blob/master/flink-runtime/src/main/java/org/apache/flink/runtime/operators/sort/PartialOrderPriorityQueue.java | Apache-2.0 |
public final int size() {
return size;
} | Returns the number of elements currently stored in the PriorityQueue.
@return The number of elements in the queue. | size | java | apache/flink | flink-runtime/src/main/java/org/apache/flink/runtime/operators/sort/PartialOrderPriorityQueue.java | https://github.com/apache/flink/blob/master/flink-runtime/src/main/java/org/apache/flink/runtime/operators/sort/PartialOrderPriorityQueue.java | Apache-2.0 |
private static void fix(IndexedSortable s, int pN, int pO, int rN, int rO) {
if (s.compare(pN, pO, rN, rO) > 0) {
s.swap(pN, pO, rN, rO);
}
} | Fix the records into sorted order, swapping when the first record is greater than the second
record.
@param s paged sortable
@param pN page number of first record
@param pO page offset of first record
@param rN page number of second record
@param rO page offset of second record | fix | java | apache/flink | flink-runtime/src/main/java/org/apache/flink/runtime/operators/sort/QuickSort.java | https://github.com/apache/flink/blob/master/flink-runtime/src/main/java/org/apache/flink/runtime/operators/sort/QuickSort.java | Apache-2.0 |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.