code
stringlengths
25
201k
docstring
stringlengths
19
96.2k
func_name
stringlengths
0
235
language
stringclasses
1 value
repo
stringlengths
8
51
path
stringlengths
11
314
url
stringlengths
62
377
license
stringclasses
7 values
public static <UK, UV> byte[] serializeMap( Iterable<Map.Entry<UK, UV>> entries, TypeSerializer<UK> keySerializer, TypeSerializer<UV> valueSerializer) throws IOException { if (entries != null) { // Serialize DataOutputSerializer dos = new DataOutputSerializer(32); for (Map.Entry<UK, UV> entry : entries) { keySerializer.serialize(entry.getKey(), dos); if (entry.getValue() == null) { dos.writeBoolean(true); } else { dos.writeBoolean(false); valueSerializer.serialize(entry.getValue(), dos); } } return dos.getCopyOfBuffer(); } else { return null; } }
Serializes all values of the Iterable with the given serializer. @param entries Key-value pairs to serialize @param keySerializer Serializer for UK @param valueSerializer Serializer for UV @param <UK> Type of the keys @param <UV> Type of the values @return Serialized values or <code>null</code> if values <code>null</code> or empty @throws IOException On failure during serialization
serializeMap
java
apache/flink
flink-queryable-state/flink-queryable-state-client-java/src/main/java/org/apache/flink/queryablestate/client/state/serialization/KvStateSerializer.java
https://github.com/apache/flink/blob/master/flink-queryable-state/flink-queryable-state-client-java/src/main/java/org/apache/flink/queryablestate/client/state/serialization/KvStateSerializer.java
Apache-2.0
public static <UK, UV> Map<UK, UV> deserializeMap( byte[] serializedValue, TypeSerializer<UK> keySerializer, TypeSerializer<UV> valueSerializer) throws IOException { if (serializedValue != null) { DataInputDeserializer in = new DataInputDeserializer(serializedValue, 0, serializedValue.length); Map<UK, UV> result = new HashMap<>(); while (in.available() > 0) { UK key = keySerializer.deserialize(in); boolean isNull = in.readBoolean(); UV value = isNull ? null : valueSerializer.deserialize(in); result.put(key, value); } return result; } else { return null; } }
Deserializes all kv pairs with the given serializer. @param serializedValue Serialized value of type Map&lt;UK, UV&gt; @param keySerializer Serializer for UK @param valueSerializer Serializer for UV @param <UK> Type of the key @param <UV> Type of the value. @return Deserialized map or <code>null</code> if the serialized value is <code>null</code> @throws IOException On failure during deserialization
deserializeMap
java
apache/flink
flink-queryable-state/flink-queryable-state-client-java/src/main/java/org/apache/flink/queryablestate/client/state/serialization/KvStateSerializer.java
https://github.com/apache/flink/blob/master/flink-queryable-state/flink-queryable-state-client-java/src/main/java/org/apache/flink/queryablestate/client/state/serialization/KvStateSerializer.java
Apache-2.0
@Override public KvStateRequest deserializeMessage(ByteBuf buf) { JobID jobId = new JobID(buf.readLong(), buf.readLong()); int statenameLength = buf.readInt(); Preconditions.checkArgument( statenameLength >= 0, "Negative length for state name. " + "This indicates a serialization error."); String stateName = ""; if (statenameLength > 0) { byte[] name = new byte[statenameLength]; buf.readBytes(name); stateName = new String(name, ConfigConstants.DEFAULT_CHARSET); } int keyHashCode = buf.readInt(); int knamespaceLength = buf.readInt(); Preconditions.checkArgument( knamespaceLength >= 0, "Negative length for key and namespace. " + "This indicates a serialization error."); byte[] serializedKeyAndNamespace = new byte[knamespaceLength]; if (knamespaceLength > 0) { buf.readBytes(serializedKeyAndNamespace); } return new KvStateRequest(jobId, stateName, keyHashCode, serializedKeyAndNamespace); }
A {@link MessageDeserializer deserializer} for {@link KvStateRequest}.
deserializeMessage
java
apache/flink
flink-queryable-state/flink-queryable-state-client-java/src/main/java/org/apache/flink/queryablestate/messages/KvStateRequest.java
https://github.com/apache/flink/blob/master/flink-queryable-state/flink-queryable-state-client-java/src/main/java/org/apache/flink/queryablestate/messages/KvStateRequest.java
Apache-2.0
@Override public KvStateResponse deserializeMessage(ByteBuf buf) { int length = buf.readInt(); Preconditions.checkArgument( length >= 0, "Negative length for state content. " + "This indicates a serialization error."); byte[] content = new byte[length]; buf.readBytes(content); return new KvStateResponse(content); }
A {@link MessageDeserializer deserializer} for {@link KvStateResponseDeserializer}.
deserializeMessage
java
apache/flink
flink-queryable-state/flink-queryable-state-client-java/src/main/java/org/apache/flink/queryablestate/messages/KvStateResponse.java
https://github.com/apache/flink/blob/master/flink-queryable-state/flink-queryable-state-client-java/src/main/java/org/apache/flink/queryablestate/messages/KvStateResponse.java
Apache-2.0
private ExecutorService createQueryExecutor() { ThreadFactory threadFactory = new ThreadFactoryBuilder() .setDaemon(true) .setNameFormat("Flink " + getServerName() + " Thread %d") .build(); return Executors.newFixedThreadPool(numQueryThreads, threadFactory); }
Creates a thread pool for the query execution. @return Thread pool for query execution
createQueryExecutor
java
apache/flink
flink-queryable-state/flink-queryable-state-client-java/src/main/java/org/apache/flink/queryablestate/network/AbstractServerBase.java
https://github.com/apache/flink/blob/master/flink-queryable-state/flink-queryable-state-client-java/src/main/java/org/apache/flink/queryablestate/network/AbstractServerBase.java
Apache-2.0
protected ExecutorService getQueryExecutor() { return queryExecutor; }
Returns the thread-pool responsible for processing incoming requests.
getQueryExecutor
java
apache/flink
flink-queryable-state/flink-queryable-state-client-java/src/main/java/org/apache/flink/queryablestate/network/AbstractServerBase.java
https://github.com/apache/flink/blob/master/flink-queryable-state/flink-queryable-state-client-java/src/main/java/org/apache/flink/queryablestate/network/AbstractServerBase.java
Apache-2.0
public String getServerName() { return serverName; }
Gets the name of the server. This is useful for debugging. @return The name of the server.
getServerName
java
apache/flink
flink-queryable-state/flink-queryable-state-client-java/src/main/java/org/apache/flink/queryablestate/network/AbstractServerBase.java
https://github.com/apache/flink/blob/master/flink-queryable-state/flink-queryable-state-client-java/src/main/java/org/apache/flink/queryablestate/network/AbstractServerBase.java
Apache-2.0
public InetSocketAddress getServerAddress() { Preconditions.checkState( serverAddress != null, "Server " + serverName + " has not been started."); return serverAddress; }
Returns the address of this server. @return AbstractServerBase address @throws IllegalStateException If server has not been started yet
getServerAddress
java
apache/flink
flink-queryable-state/flink-queryable-state-client-java/src/main/java/org/apache/flink/queryablestate/network/AbstractServerBase.java
https://github.com/apache/flink/blob/master/flink-queryable-state/flink-queryable-state-client-java/src/main/java/org/apache/flink/queryablestate/network/AbstractServerBase.java
Apache-2.0
public void start() throws Throwable { Preconditions.checkState( serverAddress == null && serverShutdownFuture.get() == null, serverName + " is already running @ " + serverAddress + ". "); Iterator<Integer> portIterator = bindPortRange.iterator(); while (portIterator.hasNext() && !attemptToBind(portIterator.next())) {} if (serverAddress != null) { log.info("Started {} @ {}.", serverName, serverAddress); } else { log.info( "Unable to start {}. All ports in provided range ({}) are occupied.", serverName, bindPortRange); throw new FlinkRuntimeException( "Unable to start " + serverName + ". All ports in provided range are occupied."); } }
Starts the server by binding to the configured bind address (blocking). @throws Exception If something goes wrong during the bind operation.
start
java
apache/flink
flink-queryable-state/flink-queryable-state-client-java/src/main/java/org/apache/flink/queryablestate/network/AbstractServerBase.java
https://github.com/apache/flink/blob/master/flink-queryable-state/flink-queryable-state-client-java/src/main/java/org/apache/flink/queryablestate/network/AbstractServerBase.java
Apache-2.0
private boolean attemptToBind(final int port) throws Throwable { log.debug("Attempting to start {} on port {}.", serverName, port); this.queryExecutor = createQueryExecutor(); this.handler = initializeHandler(); final NettyBufferPool bufferPool = new NettyBufferPool(numEventLoopThreads); final ThreadFactory threadFactory = new ThreadFactoryBuilder() .setDaemon(true) .setNameFormat("Flink " + serverName + " EventLoop Thread %d") .build(); final NioEventLoopGroup nioGroup = new NioEventLoopGroup(numEventLoopThreads, threadFactory); this.bootstrap = new ServerBootstrap() .localAddress(bindAddress, port) .group(nioGroup) .channel(NioServerSocketChannel.class) .option(ChannelOption.ALLOCATOR, bufferPool) .childOption(ChannelOption.ALLOCATOR, bufferPool) .childHandler(new ServerChannelInitializer<>(handler)); final int defaultHighWaterMark = 64 * 1024; // from DefaultChannelConfig (not exposed) //noinspection ConstantConditions // (ignore warning here to make this flexible in case the configuration values change) if (LOW_WATER_MARK > defaultHighWaterMark) { bootstrap.childOption(ChannelOption.WRITE_BUFFER_HIGH_WATER_MARK, HIGH_WATER_MARK); bootstrap.childOption(ChannelOption.WRITE_BUFFER_LOW_WATER_MARK, LOW_WATER_MARK); } else { // including (newHighWaterMark < defaultLowWaterMark) bootstrap.childOption(ChannelOption.WRITE_BUFFER_LOW_WATER_MARK, LOW_WATER_MARK); bootstrap.childOption(ChannelOption.WRITE_BUFFER_HIGH_WATER_MARK, HIGH_WATER_MARK); } try { final ChannelFuture future = bootstrap.bind().sync(); if (future.isSuccess()) { final InetSocketAddress localAddress = (InetSocketAddress) future.channel().localAddress(); serverAddress = new InetSocketAddress(localAddress.getAddress(), localAddress.getPort()); return true; } // the following throw is to bypass Netty's "optimization magic" // and catch the bind exception. // the exception is thrown by the sync() call above. throw future.cause(); } catch (BindException e) { log.debug("Failed to start {} on port {}: {}.", serverName, port, e.getMessage()); try { // we shutdown the server but we reset the future every time because in // case of failure to bind, we will call attemptToBind() here, and not resetting // the flag will interfere with future shutdown attempts. shutdownServer() .whenComplete((ignoredV, ignoredT) -> serverShutdownFuture.getAndSet(null)) .get(); } catch (Exception r) { // Here we were seeing this problem: // https://github.com/netty/netty/issues/4357 if we do a get(). // this is why we now simply wait a bit so that everything is shut down. log.warn("Problem while shutting down {}: {}", serverName, r.getMessage()); } } // any other type of exception we let it bubble up. return false; }
Tries to start the server at the provided port. <p>This, in conjunction with {@link #start()}, try to start the server on a free port among the port range provided at the constructor. @param port the port to try to bind the server to. @throws Exception If something goes wrong during the bind operation.
attemptToBind
java
apache/flink
flink-queryable-state/flink-queryable-state-client-java/src/main/java/org/apache/flink/queryablestate/network/AbstractServerBase.java
https://github.com/apache/flink/blob/master/flink-queryable-state/flink-queryable-state-client-java/src/main/java/org/apache/flink/queryablestate/network/AbstractServerBase.java
Apache-2.0
public CompletableFuture<Void> shutdownServer() { CompletableFuture<Void> shutdownFuture = new CompletableFuture<>(); if (serverShutdownFuture.compareAndSet(null, shutdownFuture)) { log.info("Shutting down {} @ {}", serverName, serverAddress); final CompletableFuture<Void> groupShutdownFuture = new CompletableFuture<>(); if (bootstrap != null) { EventLoopGroup group = bootstrap.config().group(); if (group != null && !group.isShutdown()) { group.shutdownGracefully(0L, 0L, TimeUnit.MILLISECONDS) .addListener( finished -> { if (finished.isSuccess()) { groupShutdownFuture.complete(null); } else { groupShutdownFuture.completeExceptionally( finished.cause()); } }); } else { groupShutdownFuture.complete(null); } } else { groupShutdownFuture.complete(null); } final CompletableFuture<Void> handlerShutdownFuture = new CompletableFuture<>(); if (handler == null) { handlerShutdownFuture.complete(null); } else { handler.shutdown() .whenComplete( (result, throwable) -> { if (throwable != null) { handlerShutdownFuture.completeExceptionally(throwable); } else { handlerShutdownFuture.complete(null); } }); } final CompletableFuture<Void> queryExecShutdownFuture = CompletableFuture.runAsync( () -> { if (queryExecutor != null) { ExecutorUtils.gracefulShutdown( 10L, TimeUnit.MINUTES, queryExecutor); } }); CompletableFuture.allOf( queryExecShutdownFuture, groupShutdownFuture, handlerShutdownFuture) .whenComplete( (result, throwable) -> { if (throwable != null) { shutdownFuture.completeExceptionally(throwable); } else { shutdownFuture.complete(null); } }); } return serverShutdownFuture.get(); }
Shuts down the server and all related thread pools. @return A {@link CompletableFuture} that will be completed upon termination of the shutdown process.
shutdownServer
java
apache/flink
flink-queryable-state/flink-queryable-state-client-java/src/main/java/org/apache/flink/queryablestate/network/AbstractServerBase.java
https://github.com/apache/flink/blob/master/flink-queryable-state/flink-queryable-state-client-java/src/main/java/org/apache/flink/queryablestate/network/AbstractServerBase.java
Apache-2.0
@Override public void operationComplete(ChannelFuture future) throws Exception { long durationNanos = System.nanoTime() - creationNanos; long durationMillis = TimeUnit.MILLISECONDS.convert(durationNanos, TimeUnit.NANOSECONDS); if (future.isSuccess()) { LOG.debug( "Request {} was successfully answered after {} ms.", request, durationMillis); stats.reportSuccessfulRequest(durationMillis); } else { LOG.debug( "Request {} failed after {} ms", request, durationMillis, future.cause()); stats.reportFailedRequest(); } }
Callback after query result has been written. <p>Gathers stats and logs errors.
operationComplete
java
apache/flink
flink-queryable-state/flink-queryable-state-client-java/src/main/java/org/apache/flink/queryablestate/network/AbstractServerHandler.java
https://github.com/apache/flink/blob/master/flink-queryable-state/flink-queryable-state-client-java/src/main/java/org/apache/flink/queryablestate/network/AbstractServerHandler.java
Apache-2.0
private InternalConnection<REQ, RESP> createEstablishedConnection(Channel channel) { if (failureCause != null || !running) { // Close the channel and we are done. Any queued requests // are removed on the close/failure call and after that no // new ones can be enqueued. channel.close(); return this; } else { final EstablishedConnection<REQ, RESP> establishedConnection = connectionFactory.apply(channel); while (!queuedRequests.isEmpty()) { final PendingConnection.PendingRequest<REQ, RESP> pending = queuedRequests.poll(); FutureUtils.forward( establishedConnection.sendRequest(pending.getRequest()), pending); } return establishedConnection; } }
Creates an established connection from the given channel. @param channel Channel to create an established connection from
createEstablishedConnection
java
apache/flink
flink-queryable-state/flink-queryable-state-client-java/src/main/java/org/apache/flink/queryablestate/network/ServerConnection.java
https://github.com/apache/flink/blob/master/flink-queryable-state/flink-queryable-state-client-java/src/main/java/org/apache/flink/queryablestate/network/ServerConnection.java
Apache-2.0
@Override public CompletableFuture<Void> close() { return close(new ClosedChannelException()); }
Close the connecting channel with a ClosedChannelException.
close
java
apache/flink
flink-queryable-state/flink-queryable-state-client-java/src/main/java/org/apache/flink/queryablestate/network/ServerConnection.java
https://github.com/apache/flink/blob/master/flink-queryable-state/flink-queryable-state-client-java/src/main/java/org/apache/flink/queryablestate/network/ServerConnection.java
Apache-2.0
private CompletableFuture<Void> close(Throwable cause) { if (running) { running = false; failureCause = cause; for (PendingConnection.PendingRequest<REQ, RESP> pendingRequest : queuedRequests) { pendingRequest.completeExceptionally(cause); } queuedRequests.clear(); closeFuture.completeExceptionally(cause); } return closeFuture; }
Close the connecting channel with an Exception (can be {@code null}) or forward to the established channel.
close
java
apache/flink
flink-queryable-state/flink-queryable-state-client-java/src/main/java/org/apache/flink/queryablestate/network/ServerConnection.java
https://github.com/apache/flink/blob/master/flink-queryable-state/flink-queryable-state-client-java/src/main/java/org/apache/flink/queryablestate/network/ServerConnection.java
Apache-2.0
public REQ getRequest() { return request; }
A pending request queued while the channel is connecting.
getRequest
java
apache/flink
flink-queryable-state/flink-queryable-state-client-java/src/main/java/org/apache/flink/queryablestate/network/ServerConnection.java
https://github.com/apache/flink/blob/master/flink-queryable-state/flink-queryable-state-client-java/src/main/java/org/apache/flink/queryablestate/network/ServerConnection.java
Apache-2.0
private CompletableFuture<Void> close(final Throwable cause) { synchronized (lock) { if (running) { running = false; channel.close() .addListener( finished -> { stats.reportInactiveConnection(); for (long requestId : pendingRequests.keySet()) { EstablishedConnection.TimestampedCompletableFuture<RESP> pending = pendingRequests.remove(requestId); if (pending != null && pending.completeExceptionally(cause)) { stats.reportFailedRequest(); } } // when finishing, if netty successfully closes the channel, // then the provided exception is used // as the reason for the closing. If there was something // wrong // at the netty side, then that exception // is prioritized over the provided one. if (finished.isSuccess()) { closeFuture.completeExceptionally(cause); } else { LOG.warn( "Something went wrong when trying to close connection due to : ", cause); closeFuture.completeExceptionally(finished.cause()); } }); } } return closeFuture; }
Close the channel with a cause. @param cause The cause to close the channel with. @return Channel close future
close
java
apache/flink
flink-queryable-state/flink-queryable-state-client-java/src/main/java/org/apache/flink/queryablestate/network/ServerConnection.java
https://github.com/apache/flink/blob/master/flink-queryable-state/flink-queryable-state-client-java/src/main/java/org/apache/flink/queryablestate/network/ServerConnection.java
Apache-2.0
private static void writeHeader(final ByteBuf buf, final MessageType messageType) { buf.writeInt(VERSION); buf.writeInt(messageType.ordinal()); }
Helper for serializing the header. @param buf The {@link ByteBuf} to serialize the header into. @param messageType The {@link MessageType} of the message this header refers to.
writeHeader
java
apache/flink
flink-queryable-state/flink-queryable-state-client-java/src/main/java/org/apache/flink/queryablestate/network/messages/MessageSerializer.java
https://github.com/apache/flink/blob/master/flink-queryable-state/flink-queryable-state-client-java/src/main/java/org/apache/flink/queryablestate/network/messages/MessageSerializer.java
Apache-2.0
private static ByteBuf writePayload( final ByteBufAllocator alloc, final long requestId, final MessageType messageType, final byte[] payload) { final int frameLength = HEADER_LENGTH + REQUEST_ID_SIZE + payload.length; final ByteBuf buf = alloc.ioBuffer(frameLength + Integer.BYTES); buf.writeInt(frameLength); writeHeader(buf, messageType); buf.writeLong(requestId); buf.writeBytes(payload); return buf; }
Helper for serializing the messages. @param alloc The {@link ByteBufAllocator} used to allocate the buffer to serialize the message into. @param requestId The id of the request to which the message refers to. @param messageType The {@link MessageType type of the message}. @param payload The serialized version of the message. @return A {@link ByteBuf} containing the serialized message.
writePayload
java
apache/flink
flink-queryable-state/flink-queryable-state-client-java/src/main/java/org/apache/flink/queryablestate/network/messages/MessageSerializer.java
https://github.com/apache/flink/blob/master/flink-queryable-state/flink-queryable-state-client-java/src/main/java/org/apache/flink/queryablestate/network/messages/MessageSerializer.java
Apache-2.0
public static MessageType deserializeHeader(final ByteBuf buf) { // checking the version int version = buf.readInt(); Preconditions.checkState( version == VERSION, "Version Mismatch: Found " + version + ", Expected: " + VERSION + '.'); // fetching the message type int msgType = buf.readInt(); MessageType[] values = MessageType.values(); Preconditions.checkState( msgType >= 0 && msgType < values.length, "Illegal message type with index " + msgType + '.'); return values[msgType]; }
De-serializes the header and returns the {@link MessageType}. <pre> <b>The buffer is expected to be at the header position.</b> </pre> @param buf The {@link ByteBuf} containing the serialized header. @return The message type. @throws IllegalStateException If unexpected message version or message type.
deserializeHeader
java
apache/flink
flink-queryable-state/flink-queryable-state-client-java/src/main/java/org/apache/flink/queryablestate/network/messages/MessageSerializer.java
https://github.com/apache/flink/blob/master/flink-queryable-state/flink-queryable-state-client-java/src/main/java/org/apache/flink/queryablestate/network/messages/MessageSerializer.java
Apache-2.0
public static long getRequestId(final ByteBuf buf) { return buf.readLong(); }
De-serializes the header and returns the {@link MessageType}. <pre> <b>The buffer is expected to be at the request id position.</b> </pre> @param buf The {@link ByteBuf} containing the serialized request id. @return The request id.
getRequestId
java
apache/flink
flink-queryable-state/flink-queryable-state-client-java/src/main/java/org/apache/flink/queryablestate/network/messages/MessageSerializer.java
https://github.com/apache/flink/blob/master/flink-queryable-state/flink-queryable-state-client-java/src/main/java/org/apache/flink/queryablestate/network/messages/MessageSerializer.java
Apache-2.0
public long getRequestId() { return requestId; }
Returns the request ID responding to. @return Request ID responding to
getRequestId
java
apache/flink
flink-queryable-state/flink-queryable-state-client-java/src/main/java/org/apache/flink/queryablestate/network/messages/RequestFailure.java
https://github.com/apache/flink/blob/master/flink-queryable-state/flink-queryable-state-client-java/src/main/java/org/apache/flink/queryablestate/network/messages/RequestFailure.java
Apache-2.0
@Override public KvStateInternalRequest deserializeMessage(ByteBuf buf) { KvStateID kvStateId = new KvStateID(buf.readLong(), buf.readLong()); int length = buf.readInt(); Preconditions.checkArgument( length >= 0, "Negative length for key and namespace. " + "This indicates a serialization error."); byte[] serializedKeyAndNamespace = new byte[length]; if (length > 0) { buf.readBytes(serializedKeyAndNamespace); } return new KvStateInternalRequest(kvStateId, serializedKeyAndNamespace); }
A {@link MessageDeserializer deserializer} for {@link KvStateInternalRequest}.
deserializeMessage
java
apache/flink
flink-queryable-state/flink-queryable-state-runtime/src/main/java/org/apache/flink/queryablestate/messages/KvStateInternalRequest.java
https://github.com/apache/flink/blob/master/flink-queryable-state/flink-queryable-state-runtime/src/main/java/org/apache/flink/queryablestate/messages/KvStateInternalRequest.java
Apache-2.0
@Test void testKvStateLocationOracle() { final JobID jobId1 = new JobID(); final TestingKvStateLocationOracle kvStateLocationOracle1 = new TestingKvStateLocationOracle(); kvStateClientProxy.updateKvStateLocationOracle(jobId1, kvStateLocationOracle1); final JobID jobId2 = new JobID(); final TestingKvStateLocationOracle kvStateLocationOracle2 = new TestingKvStateLocationOracle(); kvStateClientProxy.updateKvStateLocationOracle(jobId2, kvStateLocationOracle2); assertThat(kvStateClientProxy.getKvStateLocationOracle(new JobID())).isNull(); assertThat(kvStateClientProxy.getKvStateLocationOracle(jobId1)) .isEqualTo(kvStateLocationOracle1); assertThat(kvStateClientProxy.getKvStateLocationOracle(jobId2)) .isEqualTo(kvStateLocationOracle2); kvStateClientProxy.updateKvStateLocationOracle(jobId1, null); assertThat(kvStateClientProxy.getKvStateLocationOracle(jobId1)).isNull(); }
Tests that we can set and retrieve the {@link KvStateLocationOracle}.
testKvStateLocationOracle
java
apache/flink
flink-queryable-state/flink-queryable-state-runtime/src/test/java/org/apache/flink/queryablestate/client/proxy/KvStateClientProxyImplTest.java
https://github.com/apache/flink/blob/master/flink-queryable-state/flink-queryable-state-runtime/src/test/java/org/apache/flink/queryablestate/client/proxy/KvStateClientProxyImplTest.java
Apache-2.0
@Test void testLegacyCodePathPreference() { final TestingKvStateLocationOracle kvStateLocationOracle = new TestingKvStateLocationOracle(); kvStateClientProxy.updateKvStateLocationOracle( HighAvailabilityServices.DEFAULT_JOB_ID, kvStateLocationOracle); final JobID jobId = new JobID(); kvStateClientProxy.updateKvStateLocationOracle(jobId, new TestingKvStateLocationOracle()); assertThat(kvStateClientProxy.getKvStateLocationOracle(jobId)) .isEqualTo(kvStateLocationOracle); }
Tests that {@link KvStateLocationOracle} registered under {@link HighAvailabilityServices#DEFAULT_JOB_ID} will be used for all requests.
testLegacyCodePathPreference
java
apache/flink
flink-queryable-state/flink-queryable-state-runtime/src/test/java/org/apache/flink/queryablestate/client/proxy/KvStateClientProxyImplTest.java
https://github.com/apache/flink/blob/master/flink-queryable-state/flink-queryable-state-runtime/src/test/java/org/apache/flink/queryablestate/client/proxy/KvStateClientProxyImplTest.java
Apache-2.0
@BeforeEach void setUp() throws Exception { this.env = createEnv(); assertThat(clusterClient).isNotNull(); maxParallelism = 4; }
Client shared between all the test.
setUp
java
apache/flink
flink-queryable-state/flink-queryable-state-runtime/src/test/java/org/apache/flink/queryablestate/itcases/AbstractQueryableStateTestBase.java
https://github.com/apache/flink/blob/master/flink-queryable-state/flink-queryable-state-runtime/src/test/java/org/apache/flink/queryablestate/itcases/AbstractQueryableStateTestBase.java
Apache-2.0
@Test void testServerInitializationFailure() throws Throwable { List<Integer> portList = Collections.singletonList(0); try (TestServer server1 = new TestServer( "Test Server 1", new DisabledKvStateRequestStats(), portList.iterator())) { server1.start(); try (TestServer server2 = new TestServer( "Test Server 2", new DisabledKvStateRequestStats(), Collections.singletonList(server1.getServerAddress().getPort()) .iterator())) { // the expected exception along with the adequate message assertThatThrownBy(() -> server2.start()) .hasMessage( "Unable to start Test Server 2. All ports in provided range are occupied."); } } }
Tests that in case of port collision, a FlinkRuntimeException is thrown with a specific message.
testServerInitializationFailure
java
apache/flink
flink-queryable-state/flink-queryable-state-runtime/src/test/java/org/apache/flink/queryablestate/network/AbstractServerTest.java
https://github.com/apache/flink/blob/master/flink-queryable-state/flink-queryable-state-runtime/src/test/java/org/apache/flink/queryablestate/network/AbstractServerTest.java
Apache-2.0
@Override public TestMessage deserializeMessage(ByteBuf buf) { int length = buf.readInt(); String message = ""; if (length > 0) { byte[] name = new byte[length]; buf.readBytes(name); message = new String(name, ConfigConstants.DEFAULT_CHARSET); } return new TestMessage(message); }
The deserializer for our {@link TestMessage test messages}.
deserializeMessage
java
apache/flink
flink-queryable-state/flink-queryable-state-runtime/src/test/java/org/apache/flink/queryablestate/network/AbstractServerTest.java
https://github.com/apache/flink/blob/master/flink-queryable-state/flink-queryable-state-runtime/src/test/java/org/apache/flink/queryablestate/network/AbstractServerTest.java
Apache-2.0
@Test void testSimpleRequests() throws Exception { AtomicKvStateRequestStats stats = new AtomicKvStateRequestStats(); MessageSerializer<KvStateInternalRequest, KvStateResponse> serializer = new MessageSerializer<>( new KvStateInternalRequest.KvStateInternalRequestDeserializer(), new KvStateResponse.KvStateResponseDeserializer()); Client<KvStateInternalRequest, KvStateResponse> client = null; Channel serverChannel = null; try { client = new Client<>("Test Client", 1, serializer, stats); // Random result final byte[] expected = new byte[1024]; ThreadLocalRandom.current().nextBytes(expected); final LinkedBlockingQueue<ByteBuf> received = new LinkedBlockingQueue<>(); final AtomicReference<Channel> channel = new AtomicReference<>(); serverChannel = createServerChannel(new ChannelDataCollectingHandler(channel, received)); InetSocketAddress serverAddress = getKvStateServerAddress(serverChannel); long numQueries = 1024L; List<CompletableFuture<KvStateResponse>> futures = new ArrayList<>(); for (long i = 0L; i < numQueries; i++) { KvStateInternalRequest request = new KvStateInternalRequest(new KvStateID(), new byte[0]); futures.add(client.sendRequest(serverAddress, request)); } // Respond to messages Exception testException = new RuntimeException("Expected test Exception"); for (long i = 0L; i < numQueries; i++) { ByteBuf buf = received.take(); assertThat(buf).withFailMessage("Receive timed out").isNotNull(); Channel ch = channel.get(); assertThat(ch).withFailMessage("Channel not active").isNotNull(); assertThat(MessageType.REQUEST).isEqualTo(MessageSerializer.deserializeHeader(buf)); long requestId = MessageSerializer.getRequestId(buf); buf.release(); if (i % 2L == 0L) { ByteBuf response = MessageSerializer.serializeResponse( serverChannel.alloc(), requestId, new KvStateResponse(expected)); ch.writeAndFlush(response); } else { ByteBuf response = MessageSerializer.serializeRequestFailure( serverChannel.alloc(), requestId, testException); ch.writeAndFlush(response); } } for (long i = 0L; i < numQueries; i++) { if (i % 2L == 0L) { KvStateResponse serializedResult = futures.get((int) i).get(); assertThat(expected).containsExactly(serializedResult.getContent()); } else { CompletableFuture<KvStateResponse> future = futures.get((int) i); FlinkAssertions.assertThatFuture(future) .eventuallyFailsWith(ExecutionException.class) .satisfies(FlinkAssertions.anyCauseMatches(RuntimeException.class)); } } assertThat(numQueries).isEqualTo(stats.getNumRequests()); long expectedRequests = numQueries / 2L; // Counts can take some time to propagate while (stats.getNumSuccessful() != expectedRequests || stats.getNumFailed() != expectedRequests) { Thread.sleep(100L); } assertThat(expectedRequests).isEqualTo(stats.getNumSuccessful()); assertThat(expectedRequests).isEqualTo(stats.getNumFailed()); } finally { if (client != null) { Exception exc = null; try { // todo here we were seeing this problem: // https://github.com/netty/netty/issues/4357 if we do a get(). // this is why we now simply wait a bit so that everything is // shut down and then we check client.shutdown().get(); } catch (Exception e) { exc = e; LOG.error("An exception occurred while shutting down netty.", e); } assertThat(client.isEventGroupShutdown()) .withFailMessage(ExceptionUtils.stringifyException(exc)) .isTrue(); } if (serverChannel != null) { serverChannel.close(); } assertThat(stats.getNumConnections()).withFailMessage("Channel leak").isZero(); } }
Tests simple queries, of which half succeed and half fail.
testSimpleRequests
java
apache/flink
flink-queryable-state/flink-queryable-state-runtime/src/test/java/org/apache/flink/queryablestate/network/ClientTest.java
https://github.com/apache/flink/blob/master/flink-queryable-state/flink-queryable-state-runtime/src/test/java/org/apache/flink/queryablestate/network/ClientTest.java
Apache-2.0
@Test void testRequestUnavailableHost() { AtomicKvStateRequestStats stats = new AtomicKvStateRequestStats(); MessageSerializer<KvStateInternalRequest, KvStateResponse> serializer = new MessageSerializer<>( new KvStateInternalRequest.KvStateInternalRequestDeserializer(), new KvStateResponse.KvStateResponseDeserializer()); Client<KvStateInternalRequest, KvStateResponse> client = null; try { client = new Client<>("Test Client", 1, serializer, stats); // Since no real servers are created based on the server address, the given fixed port // is enough. InetSocketAddress serverAddress = new InetSocketAddress("flink-qs-client-test-unavailable-host", 12345); KvStateInternalRequest request = new KvStateInternalRequest(new KvStateID(), new byte[0]); CompletableFuture<KvStateResponse> future = client.sendRequest(serverAddress, request); assertThat(future).isNotNull(); assertThatThrownBy(future::get).hasRootCauseInstanceOf(ConnectException.class); } finally { if (client != null) { try { client.shutdown().get(); } catch (Exception e) { e.printStackTrace(); } assertThat(client.isEventGroupShutdown()).isTrue(); } assertThat(stats.getNumConnections()).withFailMessage("Channel leak").isZero(); } }
Tests that a request to an unavailable host is failed with ConnectException.
testRequestUnavailableHost
java
apache/flink
flink-queryable-state/flink-queryable-state-runtime/src/test/java/org/apache/flink/queryablestate/network/ClientTest.java
https://github.com/apache/flink/blob/master/flink-queryable-state/flink-queryable-state-runtime/src/test/java/org/apache/flink/queryablestate/network/ClientTest.java
Apache-2.0
@Test void testFailureClosesChannel() throws Exception { AtomicKvStateRequestStats stats = new AtomicKvStateRequestStats(); final MessageSerializer<KvStateInternalRequest, KvStateResponse> serializer = new MessageSerializer<>( new KvStateInternalRequest.KvStateInternalRequestDeserializer(), new KvStateResponse.KvStateResponseDeserializer()); Client<KvStateInternalRequest, KvStateResponse> client = null; Channel serverChannel = null; try { client = new Client<>("Test Client", 1, serializer, stats); final LinkedBlockingQueue<ByteBuf> received = new LinkedBlockingQueue<>(); final AtomicReference<Channel> channel = new AtomicReference<>(); serverChannel = createServerChannel(new ChannelDataCollectingHandler(channel, received)); InetSocketAddress serverAddress = getKvStateServerAddress(serverChannel); // Requests List<CompletableFuture<KvStateResponse>> futures = new ArrayList<>(); KvStateInternalRequest request = new KvStateInternalRequest(new KvStateID(), new byte[0]); futures.add(client.sendRequest(serverAddress, request)); futures.add(client.sendRequest(serverAddress, request)); ByteBuf buf = received.take(); assertThat(buf).withFailMessage("Receive timed out").isNotNull(); buf.release(); buf = received.take(); assertThat(buf).withFailMessage("Receive timed out").isNotNull(); buf.release(); assertThat(stats.getNumConnections()).isEqualTo(1L); Channel ch = channel.get(); assertThat(ch).withFailMessage("Channel not active").isNotNull(); // Respond with failure ch.writeAndFlush( MessageSerializer.serializeServerFailure( serverChannel.alloc(), new RuntimeException("Expected test server failure"))); CompletableFuture<KvStateResponse> removedFuture = futures.remove(0); FlinkAssertions.assertThatFuture(removedFuture) .eventuallyFailsWith(ExecutionException.class) .satisfies(FlinkAssertions.anyCauseMatches(RuntimeException.class)); removedFuture = futures.remove(0); FlinkAssertions.assertThatFuture(removedFuture) .eventuallyFailsWith(ExecutionException.class) .satisfies(FlinkAssertions.anyCauseMatches(RuntimeException.class)); assertThat(stats.getNumConnections()).isZero(); // Counts can take some time to propagate while (stats.getNumSuccessful() != 0L || stats.getNumFailed() != 2L) { Thread.sleep(100L); } assertThat(stats.getNumRequests()).isEqualTo(2L); assertThat(stats.getNumSuccessful()).isZero(); assertThat(stats.getNumFailed()).isEqualTo(2L); } finally { if (client != null) { try { client.shutdown().get(); } catch (Exception e) { e.printStackTrace(); } assertThat(client.isEventGroupShutdown()).isTrue(); } if (serverChannel != null) { serverChannel.close(); } assertThat(stats.getNumConnections()).withFailMessage("Channel leak").isZero(); } }
Tests that a server failure closes the connection and removes it from the established connections.
testFailureClosesChannel
java
apache/flink
flink-queryable-state/flink-queryable-state-runtime/src/test/java/org/apache/flink/queryablestate/network/ClientTest.java
https://github.com/apache/flink/blob/master/flink-queryable-state/flink-queryable-state-runtime/src/test/java/org/apache/flink/queryablestate/network/ClientTest.java
Apache-2.0
@Test void testServerClosesChannel() throws Exception { AtomicKvStateRequestStats stats = new AtomicKvStateRequestStats(); final MessageSerializer<KvStateInternalRequest, KvStateResponse> serializer = new MessageSerializer<>( new KvStateInternalRequest.KvStateInternalRequestDeserializer(), new KvStateResponse.KvStateResponseDeserializer()); Client<KvStateInternalRequest, KvStateResponse> client = null; Channel serverChannel = null; try { client = new Client<>("Test Client", 1, serializer, stats); final LinkedBlockingQueue<ByteBuf> received = new LinkedBlockingQueue<>(); final AtomicReference<Channel> channel = new AtomicReference<>(); serverChannel = createServerChannel(new ChannelDataCollectingHandler(channel, received)); InetSocketAddress serverAddress = getKvStateServerAddress(serverChannel); // Requests KvStateInternalRequest request = new KvStateInternalRequest(new KvStateID(), new byte[0]); CompletableFuture<KvStateResponse> future = client.sendRequest(serverAddress, request); received.take(); assertThat(stats.getNumConnections()).isEqualTo(1); channel.get().close().await(); FlinkAssertions.assertThatFuture(future) .eventuallyFailsWith(ExecutionException.class) .satisfies(FlinkAssertions.anyCauseMatches(ClosedChannelException.class)); assertThat(stats.getNumConnections()).isZero(); // Counts can take some time to propagate while (stats.getNumSuccessful() != 0L || stats.getNumFailed() != 1L) { Thread.sleep(100L); } assertThat(stats.getNumRequests()).isEqualTo(1L); assertThat(stats.getNumSuccessful()).isZero(); assertThat(stats.getNumFailed()).isEqualTo(1L); } finally { if (client != null) { try { client.shutdown().get(); } catch (Exception e) { e.printStackTrace(); } assertThat(client.isEventGroupShutdown()).isTrue(); } if (serverChannel != null) { serverChannel.close(); } assertThat(stats.getNumConnections()).withFailMessage("Channel leak").isZero(); } }
Tests that a server channel close, closes the connection and removes it from the established connections.
testServerClosesChannel
java
apache/flink
flink-queryable-state/flink-queryable-state-runtime/src/test/java/org/apache/flink/queryablestate/network/ClientTest.java
https://github.com/apache/flink/blob/master/flink-queryable-state/flink-queryable-state-runtime/src/test/java/org/apache/flink/queryablestate/network/ClientTest.java
Apache-2.0
@Test void testListSerialization() throws Exception { final long key = 0L; final RocksDBKeyedStateBackend<Long> longHeapKeyedStateBackend = RocksDBTestUtils.builderForTestDefaults(tmpFile, LongSerializer.INSTANCE).build(); longHeapKeyedStateBackend.setCurrentKey(key); final InternalListState<Long, VoidNamespace, Long> listState = longHeapKeyedStateBackend.createOrUpdateInternalState( VoidNamespaceSerializer.INSTANCE, new ListStateDescriptor<>("test", LongSerializer.INSTANCE)); KvStateRequestSerializerTest.testListSerialization(key, listState); longHeapKeyedStateBackend.dispose(); }
Tests list serialization and deserialization match. @see KvStateRequestSerializerTest#testListSerialization() KvStateRequestSerializerTest#testListSerialization() using the heap state back-end test
testListSerialization
java
apache/flink
flink-queryable-state/flink-queryable-state-runtime/src/test/java/org/apache/flink/queryablestate/network/KVStateRequestSerializerRocksDBTest.java
https://github.com/apache/flink/blob/master/flink-queryable-state/flink-queryable-state-runtime/src/test/java/org/apache/flink/queryablestate/network/KVStateRequestSerializerRocksDBTest.java
Apache-2.0
@Test void testMapSerialization() throws Exception { final long key = 0L; // objects for RocksDB state list serialisation final RocksDBKeyedStateBackend<Long> longHeapKeyedStateBackend = RocksDBTestUtils.builderForTestDefaults(tmpFile, LongSerializer.INSTANCE).build(); longHeapKeyedStateBackend.setCurrentKey(key); final InternalMapState<Long, VoidNamespace, Long, String> mapState = (InternalMapState<Long, VoidNamespace, Long, String>) longHeapKeyedStateBackend.getPartitionedState( VoidNamespace.INSTANCE, VoidNamespaceSerializer.INSTANCE, new MapStateDescriptor<>( "test", LongSerializer.INSTANCE, StringSerializer.INSTANCE)); KvStateRequestSerializerTest.testMapSerialization(key, mapState); longHeapKeyedStateBackend.dispose(); }
Tests map serialization and deserialization match. @see KvStateRequestSerializerTest#testMapSerialization() KvStateRequestSerializerTest#testMapSerialization() using the heap state back-end test
testMapSerialization
java
apache/flink
flink-queryable-state/flink-queryable-state-runtime/src/test/java/org/apache/flink/queryablestate/network/KVStateRequestSerializerRocksDBTest.java
https://github.com/apache/flink/blob/master/flink-queryable-state/flink-queryable-state-runtime/src/test/java/org/apache/flink/queryablestate/network/KVStateRequestSerializerRocksDBTest.java
Apache-2.0
@Test void testKeyAndNamespaceSerialization() throws Exception { TypeSerializer<Long> keySerializer = LongSerializer.INSTANCE; TypeSerializer<String> namespaceSerializer = StringSerializer.INSTANCE; long expectedKey = Integer.MAX_VALUE + 12323L; String expectedNamespace = "knilf"; byte[] serializedKeyAndNamespace = KvStateSerializer.serializeKeyAndNamespace( expectedKey, keySerializer, expectedNamespace, namespaceSerializer); Tuple2<Long, String> actual = KvStateSerializer.deserializeKeyAndNamespace( serializedKeyAndNamespace, keySerializer, namespaceSerializer); assertThat(actual.f0.longValue()).isEqualTo(expectedKey); assertThat(actual.f1).isEqualTo(expectedNamespace); }
Tests key and namespace serialization utils.
testKeyAndNamespaceSerialization
java
apache/flink
flink-queryable-state/flink-queryable-state-runtime/src/test/java/org/apache/flink/queryablestate/network/KvStateRequestSerializerTest.java
https://github.com/apache/flink/blob/master/flink-queryable-state/flink-queryable-state-runtime/src/test/java/org/apache/flink/queryablestate/network/KvStateRequestSerializerTest.java
Apache-2.0
public static void testListSerialization( final long key, final InternalListState<Long, VoidNamespace, Long> listState) throws Exception { TypeSerializer<Long> valueSerializer = LongSerializer.INSTANCE; listState.setCurrentNamespace(VoidNamespace.INSTANCE); // List final int numElements = 10; final List<Long> expectedValues = new ArrayList<>(); for (int i = 0; i < numElements; i++) { final long value = ThreadLocalRandom.current().nextLong(); expectedValues.add(value); listState.add(value); } final byte[] serializedKey = KvStateSerializer.serializeKeyAndNamespace( key, LongSerializer.INSTANCE, VoidNamespace.INSTANCE, VoidNamespaceSerializer.INSTANCE); final byte[] serializedValues = listState.getSerializedValue( serializedKey, listState.getKeySerializer(), listState.getNamespaceSerializer(), listState.getValueSerializer()); List<Long> actualValues = KvStateSerializer.deserializeList(serializedValues, valueSerializer); assertThat(actualValues).isEqualTo(expectedValues); // Single value long expectedValue = ThreadLocalRandom.current().nextLong(); byte[] serializedValue = KvStateSerializer.serializeValue(expectedValue, valueSerializer); List<Long> actualValue = KvStateSerializer.deserializeList(serializedValue, valueSerializer); assertThat(actualValue).containsExactly(expectedValue); }
Verifies that the serialization of a list using the given list state matches the deserialization with {@link KvStateSerializer#deserializeList}. @param key key of the list state @param listState list state using the {@link VoidNamespace}, must also be a {@link InternalKvState} instance @throws Exception
testListSerialization
java
apache/flink
flink-queryable-state/flink-queryable-state-runtime/src/test/java/org/apache/flink/queryablestate/network/KvStateRequestSerializerTest.java
https://github.com/apache/flink/blob/master/flink-queryable-state/flink-queryable-state-runtime/src/test/java/org/apache/flink/queryablestate/network/KvStateRequestSerializerTest.java
Apache-2.0
public static void testMapSerialization( final long key, final InternalMapState<Long, VoidNamespace, Long, String> mapState) throws Exception { TypeSerializer<Long> userKeySerializer = LongSerializer.INSTANCE; TypeSerializer<String> userValueSerializer = StringSerializer.INSTANCE; mapState.setCurrentNamespace(VoidNamespace.INSTANCE); // Map final int numElements = 10; final Map<Long, String> expectedValues = new HashMap<>(); for (int i = 1; i <= numElements; i++) { final long value = ThreadLocalRandom.current().nextLong(); expectedValues.put(value, Long.toString(value)); mapState.put(value, Long.toString(value)); } expectedValues.put(0L, null); mapState.put(0L, null); final byte[] serializedKey = KvStateSerializer.serializeKeyAndNamespace( key, LongSerializer.INSTANCE, VoidNamespace.INSTANCE, VoidNamespaceSerializer.INSTANCE); final byte[] serializedValues = mapState.getSerializedValue( serializedKey, mapState.getKeySerializer(), mapState.getNamespaceSerializer(), mapState.getValueSerializer()); Map<Long, String> actualValues = KvStateSerializer.deserializeMap( serializedValues, userKeySerializer, userValueSerializer); assertThat(actualValues).hasSize(expectedValues.size()); for (Map.Entry<Long, String> actualEntry : actualValues.entrySet()) { assertThat(actualEntry.getValue()).isEqualTo(expectedValues.get(actualEntry.getKey())); } // Single value ByteArrayOutputStream baos = new ByteArrayOutputStream(); long expectedKey = ThreadLocalRandom.current().nextLong(); String expectedValue = Long.toString(expectedKey); byte[] isNull = {0}; baos.write(KvStateSerializer.serializeValue(expectedKey, userKeySerializer)); baos.write(isNull); baos.write(KvStateSerializer.serializeValue(expectedValue, userValueSerializer)); byte[] serializedValue = baos.toByteArray(); Map<Long, String> actualValue = KvStateSerializer.deserializeMap( serializedValue, userKeySerializer, userValueSerializer); assertThat(actualValue).hasSize(1); assertThat(actualValue.get(expectedKey)).isEqualTo(expectedValue); }
Verifies that the serialization of a map using the given map state matches the deserialization with {@link KvStateSerializer#deserializeList}. @param key key of the map state @param mapState map state using the {@link VoidNamespace}, must also be a {@link InternalKvState} instance @throws Exception
testMapSerialization
java
apache/flink
flink-queryable-state/flink-queryable-state-runtime/src/test/java/org/apache/flink/queryablestate/network/KvStateRequestSerializerTest.java
https://github.com/apache/flink/blob/master/flink-queryable-state/flink-queryable-state-runtime/src/test/java/org/apache/flink/queryablestate/network/KvStateRequestSerializerTest.java
Apache-2.0
@Test void testSimpleQuery() throws Exception { KvStateRegistry registry = new KvStateRegistry(); AtomicKvStateRequestStats stats = new AtomicKvStateRequestStats(); MessageSerializer<KvStateInternalRequest, KvStateResponse> serializer = new MessageSerializer<>( new KvStateInternalRequest.KvStateInternalRequestDeserializer(), new KvStateResponse.KvStateResponseDeserializer()); KvStateServerHandler handler = new KvStateServerHandler(testServer, registry, serializer, stats); EmbeddedChannel channel = new EmbeddedChannel(getFrameDecoder(), handler); // Register state ValueStateDescriptor<Integer> desc = new ValueStateDescriptor<>("any", IntSerializer.INSTANCE); desc.setQueryable("vanilla"); int numKeyGroups = 1; AbstractStateBackend abstractBackend = new HashMapStateBackend(); DummyEnvironment dummyEnv = new DummyEnvironment("test", 1, 0); dummyEnv.setKvStateRegistry(registry); AbstractKeyedStateBackend<Integer> backend = createKeyedStateBackend(registry, numKeyGroups, abstractBackend, dummyEnv); final TestRegistryListener registryListener = new TestRegistryListener(); registry.registerListener(dummyEnv.getJobID(), registryListener); // Update the KvState and request it int expectedValue = 712828289; int key = 99812822; backend.setCurrentKey(key); ValueState<Integer> state = backend.getPartitionedState( VoidNamespace.INSTANCE, VoidNamespaceSerializer.INSTANCE, desc); state.update(expectedValue); byte[] serializedKeyAndNamespace = KvStateSerializer.serializeKeyAndNamespace( key, IntSerializer.INSTANCE, VoidNamespace.INSTANCE, VoidNamespaceSerializer.INSTANCE); long requestId = Integer.MAX_VALUE + 182828L; assertThat(registryListener.registrationName).isEqualTo("vanilla"); KvStateInternalRequest request = new KvStateInternalRequest(registryListener.kvStateId, serializedKeyAndNamespace); ByteBuf serRequest = MessageSerializer.serializeRequest(channel.alloc(), requestId, request); // Write the request and wait for the response channel.writeInbound(serRequest); ByteBuf buf = (ByteBuf) readInboundBlocking(channel); buf.skipBytes(4); // skip frame length // Verify the response assertThat(MessageSerializer.deserializeHeader(buf)).isEqualTo(MessageType.REQUEST_RESULT); long deserRequestId = MessageSerializer.getRequestId(buf); KvStateResponse response = serializer.deserializeResponse(buf); buf.release(); assertThat(deserRequestId).isEqualTo(requestId); int actualValue = KvStateSerializer.deserializeValue(response.getContent(), IntSerializer.INSTANCE); assertThat(actualValue).isEqualTo(expectedValue); assertThat(stats.getNumRequests()).isEqualTo(1).withFailMessage(stats.toString()); // Wait for async successful request report long deadline = System.nanoTime() + TimeUnit.NANOSECONDS.convert(30, TimeUnit.SECONDS); while (stats.getNumSuccessful() != 1L && System.nanoTime() <= deadline) { Thread.sleep(10L); } assertThat(stats.getNumSuccessful()).isEqualTo(1L).withFailMessage(stats.toString()); }
Tests a simple successful query via an EmbeddedChannel.
testSimpleQuery
java
apache/flink
flink-queryable-state/flink-queryable-state-runtime/src/test/java/org/apache/flink/queryablestate/network/KvStateServerHandlerTest.java
https://github.com/apache/flink/blob/master/flink-queryable-state/flink-queryable-state-runtime/src/test/java/org/apache/flink/queryablestate/network/KvStateServerHandlerTest.java
Apache-2.0
@Test void testQueryUnknownKvStateID() throws Exception { KvStateRegistry registry = new KvStateRegistry(); AtomicKvStateRequestStats stats = new AtomicKvStateRequestStats(); MessageSerializer<KvStateInternalRequest, KvStateResponse> serializer = new MessageSerializer<>( new KvStateInternalRequest.KvStateInternalRequestDeserializer(), new KvStateResponse.KvStateResponseDeserializer()); KvStateServerHandler handler = new KvStateServerHandler(testServer, registry, serializer, stats); EmbeddedChannel channel = new EmbeddedChannel(getFrameDecoder(), handler); long requestId = Integer.MAX_VALUE + 182828L; KvStateInternalRequest request = new KvStateInternalRequest(new KvStateID(), new byte[0]); ByteBuf serRequest = MessageSerializer.serializeRequest(channel.alloc(), requestId, request); // Write the request and wait for the response channel.writeInbound(serRequest); ByteBuf buf = (ByteBuf) readInboundBlocking(channel); buf.skipBytes(4); // skip frame length // Verify the response assertThat(MessageSerializer.deserializeHeader(buf)).isEqualTo(MessageType.REQUEST_FAILURE); RequestFailure response = MessageSerializer.deserializeRequestFailure(buf); buf.release(); assertThat(response.getRequestId()).isEqualTo(requestId); assertThat(response.getCause()) .isInstanceOf(UnknownKvStateIdException.class) .withFailMessage("Did not respond with expected failure cause"); assertThat(stats.getNumRequests()).isEqualTo(1L); assertThat(stats.getNumFailed()).isEqualTo(1L); }
Tests the failure response with {@link UnknownKvStateIdException} as cause on queries for unregistered KvStateIDs.
testQueryUnknownKvStateID
java
apache/flink
flink-queryable-state/flink-queryable-state-runtime/src/test/java/org/apache/flink/queryablestate/network/KvStateServerHandlerTest.java
https://github.com/apache/flink/blob/master/flink-queryable-state/flink-queryable-state-runtime/src/test/java/org/apache/flink/queryablestate/network/KvStateServerHandlerTest.java
Apache-2.0
@Test void testQueryUnknownKey() throws Exception { KvStateRegistry registry = new KvStateRegistry(); AtomicKvStateRequestStats stats = new AtomicKvStateRequestStats(); MessageSerializer<KvStateInternalRequest, KvStateResponse> serializer = new MessageSerializer<>( new KvStateInternalRequest.KvStateInternalRequestDeserializer(), new KvStateResponse.KvStateResponseDeserializer()); KvStateServerHandler handler = new KvStateServerHandler(testServer, registry, serializer, stats); EmbeddedChannel channel = new EmbeddedChannel(getFrameDecoder(), handler); int numKeyGroups = 1; AbstractStateBackend abstractBackend = new HashMapStateBackend(); DummyEnvironment dummyEnv = new DummyEnvironment("test", 1, 0); dummyEnv.setKvStateRegistry(registry); KeyedStateBackend<Integer> backend = createKeyedStateBackend(registry, numKeyGroups, abstractBackend, dummyEnv); final TestRegistryListener registryListener = new TestRegistryListener(); registry.registerListener(dummyEnv.getJobID(), registryListener); // Register state ValueStateDescriptor<Integer> desc = new ValueStateDescriptor<>("any", IntSerializer.INSTANCE); desc.setQueryable("vanilla"); backend.getPartitionedState(VoidNamespace.INSTANCE, VoidNamespaceSerializer.INSTANCE, desc); byte[] serializedKeyAndNamespace = KvStateSerializer.serializeKeyAndNamespace( 1238283, IntSerializer.INSTANCE, VoidNamespace.INSTANCE, VoidNamespaceSerializer.INSTANCE); long requestId = Integer.MAX_VALUE + 22982L; assertThat(registryListener.registrationName).isEqualTo("vanilla"); KvStateInternalRequest request = new KvStateInternalRequest(registryListener.kvStateId, serializedKeyAndNamespace); ByteBuf serRequest = MessageSerializer.serializeRequest(channel.alloc(), requestId, request); // Write the request and wait for the response channel.writeInbound(serRequest); ByteBuf buf = (ByteBuf) readInboundBlocking(channel); buf.skipBytes(4); // skip frame length // Verify the response assertThat(MessageSerializer.deserializeHeader(buf)).isEqualTo(MessageType.REQUEST_FAILURE); RequestFailure response = MessageSerializer.deserializeRequestFailure(buf); buf.release(); assertThat(response.getRequestId()).isEqualTo(requestId); assertThat(response.getCause()) .isInstanceOf(UnknownKeyOrNamespaceException.class) .withFailMessage("Did not respond with expected failure cause"); assertThat(stats.getNumRequests()).isEqualTo(1L); assertThat(stats.getNumFailed()).isEqualTo(1L); }
Tests the failure response with {@link UnknownKeyOrNamespaceException} as cause on queries for non-existing keys.
testQueryUnknownKey
java
apache/flink
flink-queryable-state/flink-queryable-state-runtime/src/test/java/org/apache/flink/queryablestate/network/KvStateServerHandlerTest.java
https://github.com/apache/flink/blob/master/flink-queryable-state/flink-queryable-state-runtime/src/test/java/org/apache/flink/queryablestate/network/KvStateServerHandlerTest.java
Apache-2.0
@Test void testCloseChannelOnExceptionCaught() throws Exception { KvStateRegistry registry = new KvStateRegistry(); AtomicKvStateRequestStats stats = new AtomicKvStateRequestStats(); MessageSerializer<KvStateInternalRequest, KvStateResponse> serializer = new MessageSerializer<>( new KvStateInternalRequest.KvStateInternalRequestDeserializer(), new KvStateResponse.KvStateResponseDeserializer()); KvStateServerHandler handler = new KvStateServerHandler(testServer, registry, serializer, stats); EmbeddedChannel channel = new EmbeddedChannel(handler); channel.pipeline().fireExceptionCaught(new RuntimeException("Expected test Exception")); ByteBuf buf = (ByteBuf) readInboundBlocking(channel); buf.skipBytes(4); // skip frame length // Verify the response assertThat(MessageSerializer.deserializeHeader(buf)).isEqualTo(MessageType.SERVER_FAILURE); Throwable response = MessageSerializer.deserializeServerFailure(buf); buf.release(); assertThat(response.getMessage()).contains("Expected test Exception"); channel.closeFuture().await(READ_TIMEOUT_MILLIS); assertThat(channel.isActive()).isFalse(); }
Tests that the channel is closed if an Exception reaches the channel handler.
testCloseChannelOnExceptionCaught
java
apache/flink
flink-queryable-state/flink-queryable-state-runtime/src/test/java/org/apache/flink/queryablestate/network/KvStateServerHandlerTest.java
https://github.com/apache/flink/blob/master/flink-queryable-state/flink-queryable-state-runtime/src/test/java/org/apache/flink/queryablestate/network/KvStateServerHandlerTest.java
Apache-2.0
@Test void testQueryExecutorShutDown() throws Throwable { KvStateRegistry registry = new KvStateRegistry(); AtomicKvStateRequestStats stats = new AtomicKvStateRequestStats(); KvStateServerImpl localTestServer = new KvStateServerImpl( InetAddress.getLocalHost().getHostName(), Collections.singletonList(0).iterator(), 1, 1, new KvStateRegistry(), new DisabledKvStateRequestStats()); localTestServer.start(); localTestServer.shutdown(); assertThat(localTestServer.getQueryExecutor().isTerminated()).isTrue(); MessageSerializer<KvStateInternalRequest, KvStateResponse> serializer = new MessageSerializer<>( new KvStateInternalRequest.KvStateInternalRequestDeserializer(), new KvStateResponse.KvStateResponseDeserializer()); KvStateServerHandler handler = new KvStateServerHandler(localTestServer, registry, serializer, stats); EmbeddedChannel channel = new EmbeddedChannel(getFrameDecoder(), handler); int numKeyGroups = 1; AbstractStateBackend abstractBackend = new HashMapStateBackend(); DummyEnvironment dummyEnv = new DummyEnvironment("test", 1, 0); dummyEnv.setKvStateRegistry(registry); KeyedStateBackend<Integer> backend = createKeyedStateBackend(registry, numKeyGroups, abstractBackend, dummyEnv); final TestRegistryListener registryListener = new TestRegistryListener(); registry.registerListener(dummyEnv.getJobID(), registryListener); // Register state ValueStateDescriptor<Integer> desc = new ValueStateDescriptor<>("any", IntSerializer.INSTANCE); desc.setQueryable("vanilla"); backend.getPartitionedState(VoidNamespace.INSTANCE, VoidNamespaceSerializer.INSTANCE, desc); assertThat(registryListener.registrationName).isEqualTo("vanilla"); KvStateInternalRequest request = new KvStateInternalRequest(registryListener.kvStateId, new byte[0]); ByteBuf serRequest = MessageSerializer.serializeRequest(channel.alloc(), 282872L, request); // Write the request and wait for the response channel.writeInbound(serRequest); ByteBuf buf = (ByteBuf) readInboundBlocking(channel); buf.skipBytes(4); // skip frame length // Verify the response assertThat(MessageSerializer.deserializeHeader(buf)).isEqualTo(MessageType.REQUEST_FAILURE); RequestFailure response = MessageSerializer.deserializeRequestFailure(buf); buf.release(); assertThat(response.getCause().getMessage()).contains("RejectedExecutionException"); assertThat(stats.getNumRequests()).isEqualTo(1L); assertThat(stats.getNumFailed()).isEqualTo(1L); localTestServer.shutdown(); }
Tests the failure response on a rejected execution, because the query executor has been closed.
testQueryExecutorShutDown
java
apache/flink
flink-queryable-state/flink-queryable-state-runtime/src/test/java/org/apache/flink/queryablestate/network/KvStateServerHandlerTest.java
https://github.com/apache/flink/blob/master/flink-queryable-state/flink-queryable-state-runtime/src/test/java/org/apache/flink/queryablestate/network/KvStateServerHandlerTest.java
Apache-2.0
@Test void testIncomingBufferIsRecycled() throws Exception { KvStateRegistry registry = new KvStateRegistry(); AtomicKvStateRequestStats stats = new AtomicKvStateRequestStats(); MessageSerializer<KvStateInternalRequest, KvStateResponse> serializer = new MessageSerializer<>( new KvStateInternalRequest.KvStateInternalRequestDeserializer(), new KvStateResponse.KvStateResponseDeserializer()); KvStateServerHandler handler = new KvStateServerHandler(testServer, registry, serializer, stats); EmbeddedChannel channel = new EmbeddedChannel(getFrameDecoder(), handler); KvStateInternalRequest request = new KvStateInternalRequest(new KvStateID(), new byte[0]); ByteBuf serRequest = MessageSerializer.serializeRequest(channel.alloc(), 282872L, request); assertThat(serRequest.refCnt()).isEqualTo(1L); // Write regular request channel.writeInbound(serRequest); assertThat(serRequest.refCnt()).isEqualTo(0L).withFailMessage("Buffer not recycled"); // Write unexpected msg ByteBuf unexpected = channel.alloc().buffer(8); unexpected.writeInt(4); unexpected.writeInt(4); assertThat(unexpected.refCnt()).isEqualTo(1L); channel.writeInbound(unexpected); assertThat(unexpected.refCnt()).isEqualTo(0L).withFailMessage("Buffer not recycled"); channel.finishAndReleaseAll(); }
Tests that incoming buffer instances are recycled.
testIncomingBufferIsRecycled
java
apache/flink
flink-queryable-state/flink-queryable-state-runtime/src/test/java/org/apache/flink/queryablestate/network/KvStateServerHandlerTest.java
https://github.com/apache/flink/blob/master/flink-queryable-state/flink-queryable-state-runtime/src/test/java/org/apache/flink/queryablestate/network/KvStateServerHandlerTest.java
Apache-2.0
@Test void testSerializerMismatch() throws Exception { KvStateRegistry registry = new KvStateRegistry(); AtomicKvStateRequestStats stats = new AtomicKvStateRequestStats(); MessageSerializer<KvStateInternalRequest, KvStateResponse> serializer = new MessageSerializer<>( new KvStateInternalRequest.KvStateInternalRequestDeserializer(), new KvStateResponse.KvStateResponseDeserializer()); KvStateServerHandler handler = new KvStateServerHandler(testServer, registry, serializer, stats); EmbeddedChannel channel = new EmbeddedChannel(getFrameDecoder(), handler); int numKeyGroups = 1; AbstractStateBackend abstractBackend = new HashMapStateBackend(); DummyEnvironment dummyEnv = new DummyEnvironment("test", 1, 0); dummyEnv.setKvStateRegistry(registry); AbstractKeyedStateBackend<Integer> backend = createKeyedStateBackend(registry, numKeyGroups, abstractBackend, dummyEnv); final TestRegistryListener registryListener = new TestRegistryListener(); registry.registerListener(dummyEnv.getJobID(), registryListener); // Register state ValueStateDescriptor<Integer> desc = new ValueStateDescriptor<>("any", IntSerializer.INSTANCE); desc.setQueryable("vanilla"); ValueState<Integer> state = backend.getPartitionedState( VoidNamespace.INSTANCE, VoidNamespaceSerializer.INSTANCE, desc); int key = 99812822; // Update the KvState backend.setCurrentKey(key); state.update(712828289); byte[] wrongKeyAndNamespace = KvStateSerializer.serializeKeyAndNamespace( "wrong-key-type", StringSerializer.INSTANCE, "wrong-namespace-type", StringSerializer.INSTANCE); byte[] wrongNamespace = KvStateSerializer.serializeKeyAndNamespace( key, IntSerializer.INSTANCE, "wrong-namespace-type", StringSerializer.INSTANCE); assertThat(registryListener.registrationName).isEqualTo("vanilla"); KvStateInternalRequest request = new KvStateInternalRequest(registryListener.kvStateId, wrongKeyAndNamespace); ByteBuf serRequest = MessageSerializer.serializeRequest(channel.alloc(), 182828L, request); // Write the request and wait for the response channel.writeInbound(serRequest); ByteBuf buf = (ByteBuf) readInboundBlocking(channel); buf.skipBytes(4); // skip frame length // Verify the response assertThat(MessageSerializer.deserializeHeader(buf)).isEqualTo(MessageType.REQUEST_FAILURE); RequestFailure response = MessageSerializer.deserializeRequestFailure(buf); buf.release(); assertThat(response.getRequestId()).isEqualTo(182828L); assertThat(response.getCause().getMessage()).contains("IOException"); // Repeat with wrong namespace only request = new KvStateInternalRequest(registryListener.kvStateId, wrongNamespace); serRequest = MessageSerializer.serializeRequest(channel.alloc(), 182829L, request); // Write the request and wait for the response channel.writeInbound(serRequest); buf = (ByteBuf) readInboundBlocking(channel); buf.skipBytes(4); // skip frame length // Verify the response assertThat(MessageSerializer.deserializeHeader(buf)).isEqualTo(MessageType.REQUEST_FAILURE); response = MessageSerializer.deserializeRequestFailure(buf); buf.release(); assertThat(response.getRequestId()).isEqualTo(182829L); assertThat(response.getCause().getMessage()).contains("IOException"); assertThat(stats.getNumRequests()).isEqualTo(2L); assertThat(stats.getNumFailed()).isEqualTo(2L); }
Tests the failure response if the serializers don't match.
testSerializerMismatch
java
apache/flink
flink-queryable-state/flink-queryable-state-runtime/src/test/java/org/apache/flink/queryablestate/network/KvStateServerHandlerTest.java
https://github.com/apache/flink/blob/master/flink-queryable-state/flink-queryable-state-runtime/src/test/java/org/apache/flink/queryablestate/network/KvStateServerHandlerTest.java
Apache-2.0
@Test void testChunkedResponse() throws Exception { KvStateRegistry registry = new KvStateRegistry(); KvStateRequestStats stats = new AtomicKvStateRequestStats(); MessageSerializer<KvStateInternalRequest, KvStateResponse> serializer = new MessageSerializer<>( new KvStateInternalRequest.KvStateInternalRequestDeserializer(), new KvStateResponse.KvStateResponseDeserializer()); KvStateServerHandler handler = new KvStateServerHandler(testServer, registry, serializer, stats); EmbeddedChannel channel = new EmbeddedChannel(getFrameDecoder(), handler); int numKeyGroups = 1; AbstractStateBackend abstractBackend = new HashMapStateBackend(); DummyEnvironment dummyEnv = new DummyEnvironment("test", 1, 0); dummyEnv.setKvStateRegistry(registry); AbstractKeyedStateBackend<Integer> backend = createKeyedStateBackend(registry, numKeyGroups, abstractBackend, dummyEnv); final TestRegistryListener registryListener = new TestRegistryListener(); registry.registerListener(dummyEnv.getJobID(), registryListener); // Register state ValueStateDescriptor<byte[]> desc = new ValueStateDescriptor<>("any", BytePrimitiveArraySerializer.INSTANCE); desc.setQueryable("vanilla"); ValueState<byte[]> state = backend.getPartitionedState( VoidNamespace.INSTANCE, VoidNamespaceSerializer.INSTANCE, desc); // Update KvState byte[] bytes = new byte[2 * channel.config().getWriteBufferHighWaterMark()]; byte current = 0; for (int i = 0; i < bytes.length; i++) { bytes[i] = current++; } int key = 99812822; backend.setCurrentKey(key); state.update(bytes); // Request byte[] serializedKeyAndNamespace = KvStateSerializer.serializeKeyAndNamespace( key, IntSerializer.INSTANCE, VoidNamespace.INSTANCE, VoidNamespaceSerializer.INSTANCE); long requestId = Integer.MAX_VALUE + 182828L; assertThat(registryListener.registrationName).isEqualTo("vanilla"); KvStateInternalRequest request = new KvStateInternalRequest(registryListener.kvStateId, serializedKeyAndNamespace); ByteBuf serRequest = MessageSerializer.serializeRequest(channel.alloc(), requestId, request); // Write the request and wait for the response channel.writeInbound(serRequest); Object msg = readInboundBlocking(channel); assertThat(msg).isInstanceOf(ChunkedByteBuf.class).withFailMessage("Not ChunkedByteBuf"); ((ChunkedByteBuf) msg).close(); }
Tests that large responses are chunked.
testChunkedResponse
java
apache/flink
flink-queryable-state/flink-queryable-state-runtime/src/test/java/org/apache/flink/queryablestate/network/KvStateServerHandlerTest.java
https://github.com/apache/flink/blob/master/flink-queryable-state/flink-queryable-state-runtime/src/test/java/org/apache/flink/queryablestate/network/KvStateServerHandlerTest.java
Apache-2.0
private Object readInboundBlocking(EmbeddedChannel channel) throws InterruptedException, TimeoutException { final long sleepMillis = 50L; long sleptMillis = 0L; Object msg = null; while (sleptMillis < READ_TIMEOUT_MILLIS && (msg = channel.readOutbound()) == null) { Thread.sleep(sleepMillis); sleptMillis += sleepMillis; } if (msg == null) { throw new TimeoutException(); } else { return msg; } }
Queries the embedded channel for data.
readInboundBlocking
java
apache/flink
flink-queryable-state/flink-queryable-state-runtime/src/test/java/org/apache/flink/queryablestate/network/KvStateServerHandlerTest.java
https://github.com/apache/flink/blob/master/flink-queryable-state/flink-queryable-state-runtime/src/test/java/org/apache/flink/queryablestate/network/KvStateServerHandlerTest.java
Apache-2.0
private ChannelHandler getFrameDecoder() { return new LengthFieldBasedFrameDecoder(Integer.MAX_VALUE, 0, 4, 0, 4); }
Frame length decoder (expected by the serialized messages).
getFrameDecoder
java
apache/flink
flink-queryable-state/flink-queryable-state-runtime/src/test/java/org/apache/flink/queryablestate/network/KvStateServerHandlerTest.java
https://github.com/apache/flink/blob/master/flink-queryable-state/flink-queryable-state-runtime/src/test/java/org/apache/flink/queryablestate/network/KvStateServerHandlerTest.java
Apache-2.0
@Test void testRequestSerializationWithZeroLengthKeyAndNamespace() throws Exception { long requestId = Integer.MAX_VALUE + 1337L; KvStateID kvStateId = new KvStateID(); byte[] serializedKeyAndNamespace = new byte[0]; final KvStateInternalRequest request = new KvStateInternalRequest(kvStateId, serializedKeyAndNamespace); final MessageSerializer<KvStateInternalRequest, KvStateResponse> serializer = new MessageSerializer<>( new KvStateInternalRequest.KvStateInternalRequestDeserializer(), new KvStateResponse.KvStateResponseDeserializer()); ByteBuf buf = MessageSerializer.serializeRequest(alloc, requestId, request); int frameLength = buf.readInt(); assertThat(MessageSerializer.deserializeHeader(buf)).isEqualTo(MessageType.REQUEST); assertThat(MessageSerializer.getRequestId(buf)).isEqualTo(requestId); KvStateInternalRequest requestDeser = serializer.deserializeRequest(buf); assertThat(buf.readerIndex()).isEqualTo(frameLength + 4); assertThat(requestDeser.getKvStateId()).isEqualTo(kvStateId); assertThat(requestDeser.getSerializedKeyAndNamespace()) .isEqualTo(serializedKeyAndNamespace); }
Tests request serialization with zero-length serialized key and namespace.
testRequestSerializationWithZeroLengthKeyAndNamespace
java
apache/flink
flink-queryable-state/flink-queryable-state-runtime/src/test/java/org/apache/flink/queryablestate/network/MessageSerializerTest.java
https://github.com/apache/flink/blob/master/flink-queryable-state/flink-queryable-state-runtime/src/test/java/org/apache/flink/queryablestate/network/MessageSerializerTest.java
Apache-2.0
@Test void testNullPointerExceptionOnNullSerializedKeyAndNamepsace() throws Exception { assertThatThrownBy(() -> new KvStateInternalRequest(new KvStateID(), null)) .isInstanceOf(NullPointerException.class); }
Tests that we don't try to be smart about <code>null</code> key and namespace. They should be treated explicitly.
testNullPointerExceptionOnNullSerializedKeyAndNamepsace
java
apache/flink
flink-queryable-state/flink-queryable-state-runtime/src/test/java/org/apache/flink/queryablestate/network/MessageSerializerTest.java
https://github.com/apache/flink/blob/master/flink-queryable-state/flink-queryable-state-runtime/src/test/java/org/apache/flink/queryablestate/network/MessageSerializerTest.java
Apache-2.0
@Test void testResponseSerializationWithZeroLengthSerializedResult() throws Exception { byte[] serializedResult = new byte[0]; final KvStateResponse response = new KvStateResponse(serializedResult); final MessageSerializer<KvStateInternalRequest, KvStateResponse> serializer = new MessageSerializer<>( new KvStateInternalRequest.KvStateInternalRequestDeserializer(), new KvStateResponse.KvStateResponseDeserializer()); ByteBuf buf = MessageSerializer.serializeResponse(alloc, 72727278L, response); int frameLength = buf.readInt(); assertThat(MessageSerializer.deserializeHeader(buf)).isEqualTo(MessageType.REQUEST_RESULT); assertThat(MessageSerializer.getRequestId(buf)).isEqualTo(72727278L); KvStateResponse responseDeser = serializer.deserializeResponse(buf); assertThat(buf.readerIndex()).isEqualTo(frameLength + 4); assertThat(responseDeser.getContent()).isEqualTo(serializedResult); }
Tests response serialization with zero-length serialized result.
testResponseSerializationWithZeroLengthSerializedResult
java
apache/flink
flink-queryable-state/flink-queryable-state-runtime/src/test/java/org/apache/flink/queryablestate/network/MessageSerializerTest.java
https://github.com/apache/flink/blob/master/flink-queryable-state/flink-queryable-state-runtime/src/test/java/org/apache/flink/queryablestate/network/MessageSerializerTest.java
Apache-2.0
@Test void testNullPointerExceptionOnNullSerializedResult() throws Exception { assertThatThrownBy(() -> new KvStateResponse((byte[]) null)) .isInstanceOf(NullPointerException.class); }
Tests that we don't try to be smart about <code>null</code> results. They should be treated explicitly.
testNullPointerExceptionOnNullSerializedResult
java
apache/flink
flink-queryable-state/flink-queryable-state-runtime/src/test/java/org/apache/flink/queryablestate/network/MessageSerializerTest.java
https://github.com/apache/flink/blob/master/flink-queryable-state/flink-queryable-state-runtime/src/test/java/org/apache/flink/queryablestate/network/MessageSerializerTest.java
Apache-2.0
@VisibleForTesting public static ActorSystem startRemoteActorSystem( Configuration configuration, String externalAddress, String externalPortRange, Logger logger) throws Exception { return startRemoteActorSystem( configuration, PekkoUtils.getFlinkActorSystemName(), externalAddress, externalPortRange, NetUtils.getWildcardIPAddress(), Optional.empty(), logger, PekkoUtils.getForkJoinExecutorConfig( getForkJoinExecutorConfiguration(configuration)), null); }
Starts a remote ActorSystem at given address and specific port range. @param configuration The Flink configuration @param externalAddress The external address to access the ActorSystem. @param externalPortRange The choosing range of the external port to access the ActorSystem. @param logger The logger to output log information. @return The ActorSystem which has been started @throws Exception Thrown when actor system cannot be started in specified port range
startRemoteActorSystem
java
apache/flink
flink-rpc/flink-rpc-akka/src/main/java/org/apache/flink/runtime/rpc/pekko/ActorSystemBootstrapTools.java
https://github.com/apache/flink/blob/master/flink-rpc/flink-rpc-akka/src/main/java/org/apache/flink/runtime/rpc/pekko/ActorSystemBootstrapTools.java
Apache-2.0
public static ActorSystem startRemoteActorSystem( Configuration configuration, String actorSystemName, String externalAddress, String externalPortRange, String bindAddress, @SuppressWarnings("OptionalUsedAsFieldOrParameterType") Optional<Integer> bindPort, Logger logger, Config actorSystemExecutorConfiguration, Config customConfig) throws Exception { // parse port range definition and create port iterator Iterator<Integer> portsIterator; try { portsIterator = NetUtils.getPortRangeFromString(externalPortRange); } catch (Exception e) { throw new IllegalArgumentException( "Invalid port range definition: " + externalPortRange); } while (portsIterator.hasNext()) { final int externalPort = portsIterator.next(); try { return startRemoteActorSystem( configuration, actorSystemName, externalAddress, externalPort, bindAddress, bindPort.orElse(externalPort), logger, actorSystemExecutorConfiguration, customConfig); } catch (Exception e) { // we can continue to try if this contains a netty channel exception Throwable cause = e.getCause(); if (!(cause instanceof org.apache.flink.shaded.netty4.io.netty.channel.ChannelException || cause instanceof java.net.BindException)) { throw e; } // else fall through the loop and try the next port } } // if we come here, we have exhausted the port range throw new BindException( "Could not start actor system on any port in port range " + externalPortRange); }
Starts a remote ActorSystem at given address and specific port range. @param configuration The Flink configuration @param actorSystemName Name of the started {@link ActorSystem} @param externalAddress The external address to access the ActorSystem. @param externalPortRange The choosing range of the external port to access the ActorSystem. @param bindAddress The local address to bind to. @param bindPort The local port to bind to. If not present, then the external port will be used. @param logger The logger to output log information. @param actorSystemExecutorConfiguration configuration for the ActorSystem's underlying executor @param customConfig Custom Pekko config to be combined with the config derived from Flink configuration. @return The ActorSystem which has been started @throws Exception Thrown when actor system cannot be started in specified port range
startRemoteActorSystem
java
apache/flink
flink-rpc/flink-rpc-akka/src/main/java/org/apache/flink/runtime/rpc/pekko/ActorSystemBootstrapTools.java
https://github.com/apache/flink/blob/master/flink-rpc/flink-rpc-akka/src/main/java/org/apache/flink/runtime/rpc/pekko/ActorSystemBootstrapTools.java
Apache-2.0
private static ActorSystem startRemoteActorSystem( Configuration configuration, String actorSystemName, String externalAddress, int externalPort, String bindAddress, int bindPort, Logger logger, Config actorSystemExecutorConfiguration, Config customConfig) throws Exception { String externalHostPortUrl = NetUtils.unresolvedHostAndPortToNormalizedString(externalAddress, externalPort); String bindHostPortUrl = NetUtils.unresolvedHostAndPortToNormalizedString(bindAddress, bindPort); logger.info( "Trying to start actor system, external address {}, bind address {}.", externalHostPortUrl, bindHostPortUrl); try { Config pekkoConfig = PekkoUtils.getConfig( configuration, new HostAndPort(externalAddress, externalPort), new HostAndPort(bindAddress, bindPort), actorSystemExecutorConfiguration); if (customConfig != null) { pekkoConfig = customConfig.withFallback(pekkoConfig); } return startActorSystem(pekkoConfig, actorSystemName, logger); } catch (Throwable t) { if (t instanceof ChannelException) { Throwable cause = t.getCause(); if (cause != null && t.getCause() instanceof BindException) { throw new IOException( "Unable to create ActorSystem at address " + bindHostPortUrl + " : " + cause.getMessage(), t); } } throw new Exception("Could not create actor system", t); } }
Starts a remote Actor System at given address and specific port. @param configuration The Flink configuration. @param actorSystemName Name of the started {@link ActorSystem} @param externalAddress The external address to access the ActorSystem. @param externalPort The external port to access the ActorSystem. @param bindAddress The local address to bind to. @param bindPort The local port to bind to. @param logger the logger to output log information. @param actorSystemExecutorConfiguration configuration for the ActorSystem's underlying executor @param customConfig Custom Pekko config to be combined with the config derived from Flink configuration. @return The ActorSystem which has been started. @throws Exception
startRemoteActorSystem
java
apache/flink
flink-rpc/flink-rpc-akka/src/main/java/org/apache/flink/runtime/rpc/pekko/ActorSystemBootstrapTools.java
https://github.com/apache/flink/blob/master/flink-rpc/flink-rpc-akka/src/main/java/org/apache/flink/runtime/rpc/pekko/ActorSystemBootstrapTools.java
Apache-2.0
public static ActorSystem startLocalActorSystem( Configuration configuration, String actorSystemName, Logger logger, Config actorSystemExecutorConfiguration, Config customConfig) throws Exception { logger.info("Trying to start local actor system"); try { Config pekkoConfig = PekkoUtils.getConfig( configuration, null, null, actorSystemExecutorConfiguration); if (customConfig != null) { pekkoConfig = customConfig.withFallback(pekkoConfig); } return startActorSystem(pekkoConfig, actorSystemName, logger); } catch (Throwable t) { throw new Exception("Could not create actor system", t); } }
Starts a local Actor System. @param configuration The Flink configuration. @param actorSystemName Name of the started ActorSystem. @param logger The logger to output log information. @param actorSystemExecutorConfiguration Configuration for the ActorSystem's underlying executor. @param customConfig Custom Pekko config to be combined with the config derived from Flink configuration. @return The ActorSystem which has been started. @throws Exception
startLocalActorSystem
java
apache/flink
flink-rpc/flink-rpc-akka/src/main/java/org/apache/flink/runtime/rpc/pekko/ActorSystemBootstrapTools.java
https://github.com/apache/flink/blob/master/flink-rpc/flink-rpc-akka/src/main/java/org/apache/flink/runtime/rpc/pekko/ActorSystemBootstrapTools.java
Apache-2.0
private static ActorSystem startActorSystem( Config config, String actorSystemName, Logger logger) { logger.debug("Using pekko configuration\n {}", config); ActorSystem actorSystem = PekkoUtils.createActorSystem(actorSystemName, config); logger.info("Actor system started at {}", PekkoUtils.getAddress(actorSystem)); return actorSystem; }
Starts an Actor System with given Pekko config. @param config Config of the started ActorSystem. @param actorSystemName Name of the started ActorSystem. @param logger The logger to output log information. @return The ActorSystem which has been started.
startActorSystem
java
apache/flink
flink-rpc/flink-rpc-akka/src/main/java/org/apache/flink/runtime/rpc/pekko/ActorSystemBootstrapTools.java
https://github.com/apache/flink/blob/master/flink-rpc/flink-rpc-akka/src/main/java/org/apache/flink/runtime/rpc/pekko/ActorSystemBootstrapTools.java
Apache-2.0
private Object invokeRpc(Method method, Object[] args) throws Exception { String methodName = method.getName(); Class<?>[] parameterTypes = method.getParameterTypes(); final boolean isLocalRpcInvocation = method.getAnnotation(Local.class) != null; Annotation[][] parameterAnnotations = method.getParameterAnnotations(); Duration futureTimeout = RpcGatewayUtils.extractRpcTimeout(parameterAnnotations, args, timeout); final RpcInvocation rpcInvocation = createRpcInvocationMessage( method.getDeclaringClass().getSimpleName(), methodName, isLocalRpcInvocation, parameterTypes, args); Class<?> returnType = method.getReturnType(); final Object result; if (Objects.equals(returnType, Void.TYPE)) { tell(rpcInvocation); result = null; } else { // Capture the call stack. It is significantly faster to do that via an exception than // via Thread.getStackTrace(), because exceptions lazily initialize the stack trace, // initially only // capture a lightweight native pointer, and convert that into the stack trace lazily // when needed. final Throwable callStackCapture = captureAskCallStack ? new Throwable() : null; // execute an asynchronous call final CompletableFuture<?> resultFuture = ask(rpcInvocation, futureTimeout) .thenApply( resultValue -> deserializeValueIfNeeded( resultValue, method, flinkClassLoader)); final CompletableFuture<Object> completableFuture = new CompletableFuture<>(); resultFuture.whenComplete( (resultValue, failure) -> { if (failure != null) { completableFuture.completeExceptionally( resolveTimeoutException( ExceptionUtils.stripCompletionException(failure), callStackCapture, address, rpcInvocation)); } else { completableFuture.complete(resultValue); } }); if (Objects.equals(returnType, CompletableFuture.class)) { result = completableFuture; } else { try { result = completableFuture.get(futureTimeout.toMillis(), TimeUnit.MILLISECONDS); } catch (ExecutionException ee) { throw new RpcException( "Failure while obtaining synchronous RPC result.", ExceptionUtils.stripExecutionException(ee)); } } } return result; }
Invokes a RPC method by sending the RPC invocation details to the rpc endpoint. @param method to call @param args of the method call @return result of the RPC; the result future is completed with a {@link TimeoutException} if the requests times out; if the recipient is not reachable, then the result future is completed with a {@link RecipientUnreachableException}. @throws Exception if the RPC invocation fails
invokeRpc
java
apache/flink
flink-rpc/flink-rpc-akka/src/main/java/org/apache/flink/runtime/rpc/pekko/PekkoInvocationHandler.java
https://github.com/apache/flink/blob/master/flink-rpc/flink-rpc-akka/src/main/java/org/apache/flink/runtime/rpc/pekko/PekkoInvocationHandler.java
Apache-2.0
private RpcInvocation createRpcInvocationMessage( final String declaringClassName, final String methodName, final boolean isLocalRpcInvocation, final Class<?>[] parameterTypes, final Object[] args) throws IOException { final RpcInvocation rpcInvocation; if (isLocal && (!forceRpcInvocationSerialization || isLocalRpcInvocation)) { rpcInvocation = new LocalRpcInvocation(declaringClassName, methodName, parameterTypes, args); } else { rpcInvocation = new RemoteRpcInvocation(declaringClassName, methodName, parameterTypes, args); } return rpcInvocation; }
Create the RpcInvocation message for the given RPC. @param declaringClassName of the RPC @param methodName of the RPC @param isLocalRpcInvocation whether the RPC must be sent as a local message @param parameterTypes of the RPC @param args of the RPC @return RpcInvocation message which encapsulates the RPC details @throws IOException if we cannot serialize the RPC invocation parameters
createRpcInvocationMessage
java
apache/flink
flink-rpc/flink-rpc-akka/src/main/java/org/apache/flink/runtime/rpc/pekko/PekkoInvocationHandler.java
https://github.com/apache/flink/blob/master/flink-rpc/flink-rpc-akka/src/main/java/org/apache/flink/runtime/rpc/pekko/PekkoInvocationHandler.java
Apache-2.0
protected void tell(Object message) { rpcEndpoint.tell(message, ActorRef.noSender()); }
Sends the message to the RPC endpoint. @param message to send to the RPC endpoint.
tell
java
apache/flink
flink-rpc/flink-rpc-akka/src/main/java/org/apache/flink/runtime/rpc/pekko/PekkoInvocationHandler.java
https://github.com/apache/flink/blob/master/flink-rpc/flink-rpc-akka/src/main/java/org/apache/flink/runtime/rpc/pekko/PekkoInvocationHandler.java
Apache-2.0
private void handleCallAsync(CallAsync callAsync) { try { Object result = runWithContextClassLoader( () -> callAsync.getCallable().call(), flinkClassLoader); getSender().tell(new Status.Success(result), getSelf()); } catch (Throwable e) { getSender().tell(new Status.Failure(e), getSelf()); } }
Handle asynchronous {@link Callable}. This method simply executes the given {@link Callable} in the context of the actor thread. @param callAsync Call async message
handleCallAsync
java
apache/flink
flink-rpc/flink-rpc-akka/src/main/java/org/apache/flink/runtime/rpc/pekko/PekkoRpcActor.java
https://github.com/apache/flink/blob/master/flink-rpc/flink-rpc-akka/src/main/java/org/apache/flink/runtime/rpc/pekko/PekkoRpcActor.java
Apache-2.0
private void handleRunAsync(RunAsync runAsync) { final long timeToRun = runAsync.getTimeNanos(); final long delayNanos; if (timeToRun == 0 || (delayNanos = timeToRun - System.nanoTime()) <= 0) { // run immediately try { runWithContextClassLoader(() -> runAsync.getRunnable().run(), flinkClassLoader); } catch (Throwable t) { log.error("Caught exception while executing runnable in main thread.", t); ExceptionUtils.rethrowIfFatalErrorOrOOM(t); } } else { // schedule for later. send a new message after the delay, which will then be // immediately executed FiniteDuration delay = new FiniteDuration(delayNanos, TimeUnit.NANOSECONDS); RunAsync message = new RunAsync(runAsync.getRunnable(), timeToRun); final Object envelopedSelfMessage = envelopeSelfMessage(message); getContext() .system() .scheduler() .scheduleOnce( delay, getSelf(), envelopedSelfMessage, getContext().dispatcher(), ActorRef.noSender()); } }
Handle asynchronous {@link Runnable}. This method simply executes the given {@link Runnable} in the context of the actor thread. @param runAsync Run async message
handleRunAsync
java
apache/flink
flink-rpc/flink-rpc-akka/src/main/java/org/apache/flink/runtime/rpc/pekko/PekkoRpcActor.java
https://github.com/apache/flink/blob/master/flink-rpc/flink-rpc-akka/src/main/java/org/apache/flink/runtime/rpc/pekko/PekkoRpcActor.java
Apache-2.0
private Method lookupRpcMethod(final String methodName, final Class<?>[] parameterTypes) throws NoSuchMethodException { return rpcEndpoint.getClass().getMethod(methodName, parameterTypes); }
Look up the rpc method on the given {@link RpcEndpoint} instance. @param methodName Name of the method @param parameterTypes Parameter types of the method @return Method of the rpc endpoint @throws NoSuchMethodException Thrown if the method with the given name and parameter types cannot be found at the rpc endpoint
lookupRpcMethod
java
apache/flink
flink-rpc/flink-rpc-akka/src/main/java/org/apache/flink/runtime/rpc/pekko/PekkoRpcActor.java
https://github.com/apache/flink/blob/master/flink-rpc/flink-rpc-akka/src/main/java/org/apache/flink/runtime/rpc/pekko/PekkoRpcActor.java
Apache-2.0
protected void sendErrorIfSender(Throwable throwable) { if (!getSender().equals(ActorRef.noSender())) { getSender().tell(new Status.Failure(throwable), getSelf()); } }
Send throwable to sender if the sender is specified. @param throwable to send to the sender
sendErrorIfSender
java
apache/flink
flink-rpc/flink-rpc-akka/src/main/java/org/apache/flink/runtime/rpc/pekko/PekkoRpcActor.java
https://github.com/apache/flink/blob/master/flink-rpc/flink-rpc-akka/src/main/java/org/apache/flink/runtime/rpc/pekko/PekkoRpcActor.java
Apache-2.0
protected Object envelopeSelfMessage(Object message) { return message; }
Hook to envelope self messages. @param message to envelope @return enveloped message
envelopeSelfMessage
java
apache/flink
flink-rpc/flink-rpc-akka/src/main/java/org/apache/flink/runtime/rpc/pekko/PekkoRpcActor.java
https://github.com/apache/flink/blob/master/flink-rpc/flink-rpc-akka/src/main/java/org/apache/flink/runtime/rpc/pekko/PekkoRpcActor.java
Apache-2.0
public static String getRpcUrl( String hostname, int port, String endpointName, AddressResolution addressResolution, Configuration config) throws UnknownHostException { checkNotNull(config, "config is null"); final boolean sslEnabled = config.get(RpcOptions.SSL_ENABLED) && SecurityOptions.isInternalSSLEnabled(config); return getRpcUrl( hostname, port, endpointName, addressResolution, sslEnabled ? Protocol.SSL_TCP : Protocol.TCP); }
@param hostname The hostname or address where the target RPC service is listening. @param port The port where the target RPC service is listening. @param endpointName The name of the RPC endpoint. @param addressResolution Whether to try address resolution of the given hostname or not. This allows to fail fast in case that the hostname cannot be resolved. @param config The configuration from which to deduce further settings. @return The RPC URL of the specified RPC endpoint.
getRpcUrl
java
apache/flink
flink-rpc/flink-rpc-akka/src/main/java/org/apache/flink/runtime/rpc/pekko/PekkoRpcServiceUtils.java
https://github.com/apache/flink/blob/master/flink-rpc/flink-rpc-akka/src/main/java/org/apache/flink/runtime/rpc/pekko/PekkoRpcServiceUtils.java
Apache-2.0
public static long extractMaximumFramesize(Configuration configuration) { String maxFrameSizeStr = configuration.get(RpcOptions.FRAMESIZE); String configStr = String.format(SIMPLE_CONFIG_TEMPLATE, maxFrameSizeStr); Config config = ConfigFactory.parseString(configStr); return config.getBytes(MAXIMUM_FRAME_SIZE_PATH); }
Whether to use TCP or encrypted TCP for Pekko.
extractMaximumFramesize
java
apache/flink
flink-rpc/flink-rpc-akka/src/main/java/org/apache/flink/runtime/rpc/pekko/PekkoRpcServiceUtils.java
https://github.com/apache/flink/blob/master/flink-rpc/flink-rpc-akka/src/main/java/org/apache/flink/runtime/rpc/pekko/PekkoRpcServiceUtils.java
Apache-2.0
public static String getFlinkActorSystemName() { return FLINK_ACTOR_SYSTEM_NAME; }
This class contains utility functions for pekko. It contains methods to start an actor system with a given Pekko configuration. Furthermore, the Pekko configuration used for starting the different actor systems resides in this class.
getFlinkActorSystemName
java
apache/flink
flink-rpc/flink-rpc-akka/src/main/java/org/apache/flink/runtime/rpc/pekko/PekkoUtils.java
https://github.com/apache/flink/blob/master/flink-rpc/flink-rpc-akka/src/main/java/org/apache/flink/runtime/rpc/pekko/PekkoUtils.java
Apache-2.0
public static ActorSystem createLocalActorSystem(Configuration configuration) { return createActorSystem(getConfig(configuration, null)); }
Creates a local actor system without remoting. @param configuration instance containing the user provided configuration values @return The created actor system
createLocalActorSystem
java
apache/flink
flink-rpc/flink-rpc-akka/src/main/java/org/apache/flink/runtime/rpc/pekko/PekkoUtils.java
https://github.com/apache/flink/blob/master/flink-rpc/flink-rpc-akka/src/main/java/org/apache/flink/runtime/rpc/pekko/PekkoUtils.java
Apache-2.0
private static ActorSystem createActorSystem(Config config) { return createActorSystem(getFlinkActorSystemName(), config); }
Creates an actor system with the given pekko config. @param config configuration for the actor system @return created actor system
createActorSystem
java
apache/flink
flink-rpc/flink-rpc-akka/src/main/java/org/apache/flink/runtime/rpc/pekko/PekkoUtils.java
https://github.com/apache/flink/blob/master/flink-rpc/flink-rpc-akka/src/main/java/org/apache/flink/runtime/rpc/pekko/PekkoUtils.java
Apache-2.0
public static ActorSystem createActorSystem(String actorSystemName, Config config) { // Initialize slf4j as logger of Pekko's Netty instead of java.util.logging (FLINK-1650) InternalLoggerFactory.setDefaultFactory(new Slf4JLoggerFactory()); return RobustActorSystem.create(actorSystemName, config); }
Creates an actor system with the given pekko config. @param actorSystemName name of the actor system @param config configuration for the actor system @return created actor system
createActorSystem
java
apache/flink
flink-rpc/flink-rpc-akka/src/main/java/org/apache/flink/runtime/rpc/pekko/PekkoUtils.java
https://github.com/apache/flink/blob/master/flink-rpc/flink-rpc-akka/src/main/java/org/apache/flink/runtime/rpc/pekko/PekkoUtils.java
Apache-2.0
@VisibleForTesting public static ActorSystem createDefaultActorSystem() { return createActorSystem(getDefaultConfig()); }
Creates an actor system with the default config and listening on a random port of the localhost. @return default actor system listening on a random port of the localhost
createDefaultActorSystem
java
apache/flink
flink-rpc/flink-rpc-akka/src/main/java/org/apache/flink/runtime/rpc/pekko/PekkoUtils.java
https://github.com/apache/flink/blob/master/flink-rpc/flink-rpc-akka/src/main/java/org/apache/flink/runtime/rpc/pekko/PekkoUtils.java
Apache-2.0
private static Config getDefaultConfig() { return getConfig(new Configuration(), new HostAndPort("", 0)); }
Creates the default pekko configuration which listens on a random port on the local machine. All configuration values are set to default values. @return Flink's Pekko default config
getDefaultConfig
java
apache/flink
flink-rpc/flink-rpc-akka/src/main/java/org/apache/flink/runtime/rpc/pekko/PekkoUtils.java
https://github.com/apache/flink/blob/master/flink-rpc/flink-rpc-akka/src/main/java/org/apache/flink/runtime/rpc/pekko/PekkoUtils.java
Apache-2.0
public static Config getConfig( Configuration configuration, @Nullable HostAndPort externalAddress) { return getConfig( configuration, externalAddress, null, PekkoUtils.getForkJoinExecutorConfig( ActorSystemBootstrapTools.getForkJoinExecutorConfiguration(configuration))); }
Creates a pekko config with the provided configuration values. If the listening address is specified, then the actor system will listen on the respective address. @param configuration instance containing the user provided configuration values @param externalAddress optional tuple of bindAddress and port to be reachable at. If null is given, then a Pekko config for local actor system will be returned @return Pekko config
getConfig
java
apache/flink
flink-rpc/flink-rpc-akka/src/main/java/org/apache/flink/runtime/rpc/pekko/PekkoUtils.java
https://github.com/apache/flink/blob/master/flink-rpc/flink-rpc-akka/src/main/java/org/apache/flink/runtime/rpc/pekko/PekkoUtils.java
Apache-2.0
public static Config getConfig( Configuration configuration, @Nullable HostAndPort externalAddress, @Nullable HostAndPort bindAddress, Config executorConfig) { final Config defaultConfig = PekkoUtils.getBasicConfig(configuration).withFallback(executorConfig); if (externalAddress != null) { if (bindAddress != null) { final Config remoteConfig = PekkoUtils.getRemoteConfig( configuration, bindAddress.getHost(), bindAddress.getPort(), externalAddress.getHost(), externalAddress.getPort()); return remoteConfig.withFallback(defaultConfig); } else { final Config remoteConfig = PekkoUtils.getRemoteConfig( configuration, NetUtils.getWildcardIPAddress(), externalAddress.getPort(), externalAddress.getHost(), externalAddress.getPort()); return remoteConfig.withFallback(defaultConfig); } } return defaultConfig; }
Creates a pekko config with the provided configuration values. If the listening address is specified, then the actor system will listen on the respective address. @param configuration instance containing the user provided configuration values @param externalAddress optional tuple of external address and port to be reachable at. If null is given, then a Pekko config for local actor system will be returned @param bindAddress optional tuple of bind address and port to be used locally. If null is given, wildcard IP address and the external port wil be used. Takes effect only if externalAddress is not null. @param executorConfig config defining the used executor by the default dispatcher @return Pekko config
getConfig
java
apache/flink
flink-rpc/flink-rpc-akka/src/main/java/org/apache/flink/runtime/rpc/pekko/PekkoUtils.java
https://github.com/apache/flink/blob/master/flink-rpc/flink-rpc-akka/src/main/java/org/apache/flink/runtime/rpc/pekko/PekkoUtils.java
Apache-2.0
public static Address getAddress(ActorSystem system) { return RemoteAddressExtension.INSTANCE.apply(system).getAddress(); }
Returns the address of the given {@link ActorSystem}. The {@link Address} object contains the port and the host under which the actor system is reachable. @param system {@link ActorSystem} for which the {@link Address} shall be retrieved @return {@link Address} of the given {@link ActorSystem}
getAddress
java
apache/flink
flink-rpc/flink-rpc-akka/src/main/java/org/apache/flink/runtime/rpc/pekko/PekkoUtils.java
https://github.com/apache/flink/blob/master/flink-rpc/flink-rpc-akka/src/main/java/org/apache/flink/runtime/rpc/pekko/PekkoUtils.java
Apache-2.0
public static String getRpcURL(ActorSystem system, ActorRef actor) { final Address address = getAddress(system); return actor.path().toStringWithAddress(address); }
Returns the given {@link ActorRef}'s path string representation with host and port of the {@link ActorSystem} in which the actor is running. @param system {@link ActorSystem} in which the given {@link ActorRef} is running @param actor {@link ActorRef} of the actor for which the URL has to be generated @return String containing the {@link ActorSystem} independent URL of the actor
getRpcURL
java
apache/flink
flink-rpc/flink-rpc-akka/src/main/java/org/apache/flink/runtime/rpc/pekko/PekkoUtils.java
https://github.com/apache/flink/blob/master/flink-rpc/flink-rpc-akka/src/main/java/org/apache/flink/runtime/rpc/pekko/PekkoUtils.java
Apache-2.0
@SuppressWarnings("RedundantThrows") // hidden checked exception coming from Pekko public static Address getAddressFromRpcURL(String rpcURL) throws MalformedURLException { return AddressFromURIString.apply(rpcURL); }
Extracts the {@link Address} from the given pekko URL. @param rpcURL to extract the {@link Address} from @throws MalformedURLException if the {@link Address} could not be parsed from the given pekko URL @return Extracted {@link Address} from the given rpc URL
getAddressFromRpcURL
java
apache/flink
flink-rpc/flink-rpc-akka/src/main/java/org/apache/flink/runtime/rpc/pekko/PekkoUtils.java
https://github.com/apache/flink/blob/master/flink-rpc/flink-rpc-akka/src/main/java/org/apache/flink/runtime/rpc/pekko/PekkoUtils.java
Apache-2.0
public static InetSocketAddress getInetSocketAddressFromRpcURL(String rpcURL) throws Exception { // Pekko URLs have the form schema://systemName@host:port/.... if it's a remote Pekko URL try { final Address address = getAddressFromRpcURL(rpcURL); if (address.host().isDefined() && address.port().isDefined()) { return new InetSocketAddress(address.host().get(), (int) address.port().get()); } else { throw new MalformedURLException(); } } catch (MalformedURLException e) { throw new Exception("Could not retrieve InetSocketAddress from Pekko URL " + rpcURL); } }
Extracts the hostname and the port of the remote actor system from the given Pekko URL. The result is an {@link InetSocketAddress} instance containing the extracted hostname and port. If the Pekko URL does not contain the hostname and port information, e.g. a local Pekko URL is provided, then an {@link Exception} is thrown. @param rpcURL The URL to extract the host and port from. @throws java.lang.Exception Thrown, if the given string does not represent a proper url @return The InetSocketAddress with the extracted host and port.
getInetSocketAddressFromRpcURL
java
apache/flink
flink-rpc/flink-rpc-akka/src/main/java/org/apache/flink/runtime/rpc/pekko/PekkoUtils.java
https://github.com/apache/flink/blob/master/flink-rpc/flink-rpc-akka/src/main/java/org/apache/flink/runtime/rpc/pekko/PekkoUtils.java
Apache-2.0
@Override public Thread newThread(@Nonnull Runnable r) { final Thread thread = backingThreadFactory.newThread(r); thread.setPriority(newThreadPriority); return thread; }
Wrapper around a {@link ThreadFactory} that configures the thread priority.
newThread
java
apache/flink
flink-rpc/flink-rpc-akka/src/main/java/org/apache/flink/runtime/rpc/pekko/PrioritySettingThreadFactory.java
https://github.com/apache/flink/blob/master/flink-rpc/flink-rpc-akka/src/main/java/org/apache/flink/runtime/rpc/pekko/PrioritySettingThreadFactory.java
Apache-2.0
private static DispatcherPrerequisites createPriorityThreadDispatcherPrerequisites( DispatcherPrerequisites prerequisites, int newThreadPriority) { return new DefaultDispatcherPrerequisites( new PrioritySettingThreadFactory(prerequisites.threadFactory(), newThreadPriority), prerequisites.eventStream(), prerequisites.scheduler(), prerequisites.dynamicAccess(), prerequisites.settings(), prerequisites.mailboxes(), prerequisites.defaultExecutionContext()); }
@param config passed automatically by Pekko, should contain information about threads priority @param prerequisites passed automatically by Pekko
createPriorityThreadDispatcherPrerequisites
java
apache/flink
flink-rpc/flink-rpc-akka/src/main/java/org/apache/flink/runtime/rpc/pekko/PriorityThreadsDispatcher.java
https://github.com/apache/flink/blob/master/flink-rpc/flink-rpc-akka/src/main/java/org/apache/flink/runtime/rpc/pekko/PriorityThreadsDispatcher.java
Apache-2.0
@Nullable public byte[] getSerializedData() { return serializedData; }
A self-contained serialized value to decouple from user values and transfer on wire.
getSerializedData
java
apache/flink
flink-rpc/flink-rpc-akka/src/main/java/org/apache/flink/runtime/rpc/pekko/RpcSerializedValue.java
https://github.com/apache/flink/blob/master/flink-rpc/flink-rpc-akka/src/main/java/org/apache/flink/runtime/rpc/pekko/RpcSerializedValue.java
Apache-2.0
@Test void testConcurrentActorSystemCreation() throws Exception { final int concurrentCreations = 10; final ExecutorService executorService = Executors.newFixedThreadPool(concurrentCreations); final CyclicBarrier cyclicBarrier = new CyclicBarrier(concurrentCreations); try { final List<CompletableFuture<Void>> actorSystemFutures = IntStream.range(0, concurrentCreations) .mapToObj( ignored -> CompletableFuture.supplyAsync( CheckedSupplier.unchecked( () -> { cyclicBarrier.await(); return ActorSystemBootstrapTools .startRemoteActorSystem( new Configuration(), "localhost", "0", LOG); }), executorService)) .map( // terminate ActorSystems actorSystemFuture -> actorSystemFuture.thenCompose( PekkoUtils::terminateActorSystem)) .collect(Collectors.toList()); FutureUtils.completeAll(actorSystemFutures).get(); } finally { ExecutorUtils.gracefulShutdown(10000L, TimeUnit.MILLISECONDS, executorService); } }
Tests that we can concurrently create two {@link ActorSystem} without port conflicts. This effectively tests that we don't open a socket to check for a ports availability. See FLINK-10580 for more details.
testConcurrentActorSystemCreation
java
apache/flink
flink-rpc/flink-rpc-akka/src/test/java/org/apache/flink/runtime/rpc/pekko/ActorSystemBootstrapToolsTest.java
https://github.com/apache/flink/blob/master/flink-rpc/flink-rpc-akka/src/test/java/org/apache/flink/runtime/rpc/pekko/ActorSystemBootstrapToolsTest.java
Apache-2.0
@Test void testActorSystemInstantiationFailureWhenPortOccupied() throws Exception { final ServerSocket portOccupier = new ServerSocket(0, 10, InetAddress.getByName("0.0.0.0")); try { final int port = portOccupier.getLocalPort(); assertThatThrownBy( () -> ActorSystemBootstrapTools.startRemoteActorSystem( new Configuration(), "0.0.0.0", String.valueOf(port), LOG)) .satisfies(FlinkAssertions.anyCauseMatches(BindException.class)); } finally { portOccupier.close(); } }
Tests that the {@link ActorSystem} fails with an expressive exception if it cannot be instantiated due to an occupied port.
testActorSystemInstantiationFailureWhenPortOccupied
java
apache/flink
flink-rpc/flink-rpc-akka/src/test/java/org/apache/flink/runtime/rpc/pekko/ActorSystemBootstrapToolsTest.java
https://github.com/apache/flink/blob/master/flink-rpc/flink-rpc-akka/src/test/java/org/apache/flink/runtime/rpc/pekko/ActorSystemBootstrapToolsTest.java
Apache-2.0
@Test void testNonSerializableLocalMessageTransfer() throws Exception { LinkedBlockingQueue<Object> linkedBlockingQueue = new LinkedBlockingQueue<>(); TestEndpoint testEndpoint = new TestEndpoint(rpcService1, linkedBlockingQueue); testEndpoint.start(); TestGateway testGateway = testEndpoint.getSelfGateway(TestGateway.class); NonSerializableObject expected = new NonSerializableObject(42); testGateway.foobar(expected); assertThat(linkedBlockingQueue.take()).isSameAs(expected); }
Tests that a local rpc call with a non serializable argument can be executed.
testNonSerializableLocalMessageTransfer
java
apache/flink
flink-rpc/flink-rpc-akka/src/test/java/org/apache/flink/runtime/rpc/pekko/MessageSerializationTest.java
https://github.com/apache/flink/blob/master/flink-rpc/flink-rpc-akka/src/test/java/org/apache/flink/runtime/rpc/pekko/MessageSerializationTest.java
Apache-2.0
@Test void testNonSerializableRemoteMessageTransfer() throws Exception { LinkedBlockingQueue<Object> linkedBlockingQueue = new LinkedBlockingQueue<>(); TestEndpoint testEndpoint = new TestEndpoint(rpcService1, linkedBlockingQueue); testEndpoint.start(); String address = testEndpoint.getAddress(); TestGateway remoteGateway = rpcService2.connect(address, TestGateway.class).get(); assertThatThrownBy(() -> remoteGateway.foobar(new Object())) .isInstanceOf(IOException.class); }
Tests that a remote rpc call with a non-serializable argument fails with an {@link IOException} (or an {@link java.lang.reflect.UndeclaredThrowableException} if the method declaration does not include the {@link IOException} as throwable).
testNonSerializableRemoteMessageTransfer
java
apache/flink
flink-rpc/flink-rpc-akka/src/test/java/org/apache/flink/runtime/rpc/pekko/MessageSerializationTest.java
https://github.com/apache/flink/blob/master/flink-rpc/flink-rpc-akka/src/test/java/org/apache/flink/runtime/rpc/pekko/MessageSerializationTest.java
Apache-2.0
@Test void testSerializableRemoteMessageTransfer() throws Exception { LinkedBlockingQueue<Object> linkedBlockingQueue = new LinkedBlockingQueue<>(); TestEndpoint testEndpoint = new TestEndpoint(rpcService1, linkedBlockingQueue); testEndpoint.start(); String address = testEndpoint.getAddress(); CompletableFuture<TestGateway> remoteGatewayFuture = rpcService2.connect(address, TestGateway.class); TestGateway remoteGateway = remoteGatewayFuture.get(); int expected = 42; remoteGateway.foobar(expected); assertThat(linkedBlockingQueue.take()).isEqualTo(expected); }
Tests that a remote rpc call with a serializable argument can be successfully executed.
testSerializableRemoteMessageTransfer
java
apache/flink
flink-rpc/flink-rpc-akka/src/test/java/org/apache/flink/runtime/rpc/pekko/MessageSerializationTest.java
https://github.com/apache/flink/blob/master/flink-rpc/flink-rpc-akka/src/test/java/org/apache/flink/runtime/rpc/pekko/MessageSerializationTest.java
Apache-2.0
@Test void testAddressResolution() throws Exception { DummyRpcEndpoint rpcEndpoint = new DummyRpcEndpoint(pekkoRpcService); CompletableFuture<DummyRpcGateway> futureRpcGateway = pekkoRpcService.connect(rpcEndpoint.getAddress(), DummyRpcGateway.class); DummyRpcGateway rpcGateway = futureRpcGateway.get(); assertThat(rpcGateway.getAddress()).isEqualTo(rpcEndpoint.getAddress()); }
Tests that the rpc endpoint and the associated rpc gateway have the same addresses. @throws Exception
testAddressResolution
java
apache/flink
flink-rpc/flink-rpc-akka/src/test/java/org/apache/flink/runtime/rpc/pekko/PekkoRpcActorTest.java
https://github.com/apache/flink/blob/master/flink-rpc/flink-rpc-akka/src/test/java/org/apache/flink/runtime/rpc/pekko/PekkoRpcActorTest.java
Apache-2.0
@Test void testFailingAddressResolution() throws Exception { CompletableFuture<DummyRpcGateway> futureRpcGateway = pekkoRpcService.connect("foobar", DummyRpcGateway.class); assertThatThrownBy(() -> futureRpcGateway.get()) .hasCauseInstanceOf(RpcConnectionException.class); }
Tests that a {@link RpcConnectionException} is thrown if the rpc endpoint cannot be connected to.
testFailingAddressResolution
java
apache/flink
flink-rpc/flink-rpc-akka/src/test/java/org/apache/flink/runtime/rpc/pekko/PekkoRpcActorTest.java
https://github.com/apache/flink/blob/master/flink-rpc/flink-rpc-akka/src/test/java/org/apache/flink/runtime/rpc/pekko/PekkoRpcActorTest.java
Apache-2.0
@Test void testMessageDiscarding() throws Exception { int expectedValue = 1337; DummyRpcEndpoint rpcEndpoint = new DummyRpcEndpoint(pekkoRpcService); DummyRpcGateway rpcGateway = rpcEndpoint.getSelfGateway(DummyRpcGateway.class); // this message should be discarded and complete with an exception assertThatThrownBy(() -> rpcGateway.foobar().get()) .hasCauseInstanceOf(EndpointNotStartedException.class); // set a new value which we expect to be returned rpcEndpoint.setFoobar(expectedValue); // start the endpoint so that it can process messages rpcEndpoint.start(); try { // send the rpc again CompletableFuture<Integer> result = rpcGateway.foobar(); // now we should receive a result :-) Integer actualValue = result.get(); assertThat(actualValue).isEqualTo(expectedValue); } finally { RpcUtils.terminateRpcEndpoint(rpcEndpoint); } }
Tests that the {@link PekkoRpcActor} discards messages until the corresponding {@link RpcEndpoint} has been started.
testMessageDiscarding
java
apache/flink
flink-rpc/flink-rpc-akka/src/test/java/org/apache/flink/runtime/rpc/pekko/PekkoRpcActorTest.java
https://github.com/apache/flink/blob/master/flink-rpc/flink-rpc-akka/src/test/java/org/apache/flink/runtime/rpc/pekko/PekkoRpcActorTest.java
Apache-2.0
@Test void testResultFutureFailsOnDeserializationError() throws Exception { // setup 2 actor systems and rpc services that support remote connections (for which RPCs go // through serialization) final PekkoRpcService serverPekkoRpcService = new PekkoRpcService( PekkoUtils.createActorSystem( "serverActorSystem", PekkoUtils.getConfig( new Configuration(), new HostAndPort("localhost", 0))), PekkoRpcServiceConfiguration.defaultConfiguration()); final PekkoRpcService clientPekkoRpcService = new PekkoRpcService( PekkoUtils.createActorSystem( "clientActorSystem", PekkoUtils.getConfig( new Configuration(), new HostAndPort("localhost", 0))), PekkoRpcServiceConfiguration.defaultConfiguration()); try { final DeserializatonFailingEndpoint rpcEndpoint = new DeserializatonFailingEndpoint(serverPekkoRpcService); rpcEndpoint.start(); final DeserializatonFailingGateway rpcGateway = rpcEndpoint.getSelfGateway(DeserializatonFailingGateway.class); final DeserializatonFailingGateway connect = clientPekkoRpcService .connect(rpcGateway.getAddress(), DeserializatonFailingGateway.class) .get(); assertThatFuture(connect.doStuff()) .eventuallyFailsWith(ExecutionException.class) .withCauseInstanceOf(RpcException.class); } finally { RpcUtils.terminateRpcService(clientPekkoRpcService); RpcUtils.terminateRpcService(serverPekkoRpcService); } }
Tests that the {@link PekkoInvocationHandler} properly fails the returned future if the response cannot be deserialized.
testResultFutureFailsOnDeserializationError
java
apache/flink
flink-rpc/flink-rpc-akka/src/test/java/org/apache/flink/runtime/rpc/pekko/PekkoRpcActorTest.java
https://github.com/apache/flink/blob/master/flink-rpc/flink-rpc-akka/src/test/java/org/apache/flink/runtime/rpc/pekko/PekkoRpcActorTest.java
Apache-2.0
@Test void testOnStopExceptionPropagation() throws Exception { FailingOnStopEndpoint rpcEndpoint = new FailingOnStopEndpoint(pekkoRpcService, "FailingOnStopEndpoint"); rpcEndpoint.start(); CompletableFuture<Void> terminationFuture = rpcEndpoint.closeAsync(); assertThatThrownBy(terminationFuture::get) .hasCauseInstanceOf(FailingOnStopEndpoint.OnStopException.class); }
Tests that exception thrown in the onStop method are returned by the termination future.
testOnStopExceptionPropagation
java
apache/flink
flink-rpc/flink-rpc-akka/src/test/java/org/apache/flink/runtime/rpc/pekko/PekkoRpcActorTest.java
https://github.com/apache/flink/blob/master/flink-rpc/flink-rpc-akka/src/test/java/org/apache/flink/runtime/rpc/pekko/PekkoRpcActorTest.java
Apache-2.0
@Test void testOnStopExecutedByMainThread() throws Exception { SimpleRpcEndpoint simpleRpcEndpoint = new SimpleRpcEndpoint(pekkoRpcService, "SimpleRpcEndpoint"); simpleRpcEndpoint.start(); CompletableFuture<Void> terminationFuture = simpleRpcEndpoint.closeAsync(); // check that we executed the onStop method in the main thread, otherwise an exception // would be thrown here. terminationFuture.get(); }
Checks that the onStop callback is executed within the main thread.
testOnStopExecutedByMainThread
java
apache/flink
flink-rpc/flink-rpc-akka/src/test/java/org/apache/flink/runtime/rpc/pekko/PekkoRpcActorTest.java
https://github.com/apache/flink/blob/master/flink-rpc/flink-rpc-akka/src/test/java/org/apache/flink/runtime/rpc/pekko/PekkoRpcActorTest.java
Apache-2.0
@Test void testActorTerminationWhenServiceShutdown() throws Exception { final ActorSystem rpcActorSystem = PekkoUtils.createDefaultActorSystem(); final RpcService rpcService = new PekkoRpcService( rpcActorSystem, PekkoRpcServiceConfiguration.defaultConfiguration()); try { SimpleRpcEndpoint rpcEndpoint = new SimpleRpcEndpoint(rpcService, SimpleRpcEndpoint.class.getSimpleName()); rpcEndpoint.start(); CompletableFuture<Void> terminationFuture = rpcEndpoint.getTerminationFuture(); rpcService.closeAsync(); terminationFuture.get(); } finally { rpcActorSystem.terminate(); ScalaFutureUtils.toJava(rpcActorSystem.whenTerminated()).get(); } }
Tests that actors are properly terminated when the {@link PekkoRpcService} is shut down.
testActorTerminationWhenServiceShutdown
java
apache/flink
flink-rpc/flink-rpc-akka/src/test/java/org/apache/flink/runtime/rpc/pekko/PekkoRpcActorTest.java
https://github.com/apache/flink/blob/master/flink-rpc/flink-rpc-akka/src/test/java/org/apache/flink/runtime/rpc/pekko/PekkoRpcActorTest.java
Apache-2.0
@Test void testActorTerminationWithAsynchronousOnStopAction() throws Exception { final CompletableFuture<Void> onStopFuture = new CompletableFuture<>(); final AsynchronousOnStopEndpoint endpoint = new AsynchronousOnStopEndpoint(pekkoRpcService, onStopFuture); try { endpoint.start(); final CompletableFuture<Void> terminationFuture = endpoint.closeAsync(); assertThat(terminationFuture).isNotDone(); onStopFuture.complete(null); // the onStopFuture completion should allow the endpoint to terminate terminationFuture.get(); } finally { RpcUtils.terminateRpcEndpoint(endpoint); } }
Tests that the {@link PekkoRpcActor} only completes after the asynchronous post stop action has completed.
testActorTerminationWithAsynchronousOnStopAction
java
apache/flink
flink-rpc/flink-rpc-akka/src/test/java/org/apache/flink/runtime/rpc/pekko/PekkoRpcActorTest.java
https://github.com/apache/flink/blob/master/flink-rpc/flink-rpc-akka/src/test/java/org/apache/flink/runtime/rpc/pekko/PekkoRpcActorTest.java
Apache-2.0
@Test void testMainThreadExecutionOnStop() throws Exception { final MainThreadExecutorOnStopEndpoint endpoint = new MainThreadExecutorOnStopEndpoint(pekkoRpcService); try { endpoint.start(); CompletableFuture<Void> terminationFuture = endpoint.closeAsync(); terminationFuture.get(); } finally { RpcUtils.terminateRpcEndpoint(endpoint); } }
Tests that we can still run commands via the main thread executor when the onStop method is called.
testMainThreadExecutionOnStop
java
apache/flink
flink-rpc/flink-rpc-akka/src/test/java/org/apache/flink/runtime/rpc/pekko/PekkoRpcActorTest.java
https://github.com/apache/flink/blob/master/flink-rpc/flink-rpc-akka/src/test/java/org/apache/flink/runtime/rpc/pekko/PekkoRpcActorTest.java
Apache-2.0
@Test void testOnStopFutureCompletionDirectlyTerminatesRpcActor() throws Exception { final CompletableFuture<Void> onStopFuture = new CompletableFuture<>(); final TerminatingAfterOnStopFutureCompletionEndpoint endpoint = new TerminatingAfterOnStopFutureCompletionEndpoint(pekkoRpcService, onStopFuture); try { endpoint.start(); final AsyncOperationGateway asyncOperationGateway = endpoint.getSelfGateway(AsyncOperationGateway.class); final CompletableFuture<Void> terminationFuture = endpoint.closeAsync(); assertThat(terminationFuture).isNotDone(); final CompletableFuture<Integer> firstAsyncOperationFuture = asyncOperationGateway.asyncOperation(timeout); final CompletableFuture<Integer> secondAsyncOperationFuture = asyncOperationGateway.asyncOperation(timeout); endpoint.awaitEnterAsyncOperation(); // complete stop operation which should prevent the second async operation from being // executed onStopFuture.complete(null); // we can only complete the termination after the first async operation has been // completed assertThat(terminationFuture).isNotDone(); endpoint.triggerUnblockAsyncOperation(); assertThat(firstAsyncOperationFuture.get()).isEqualTo(42); terminationFuture.get(); assertThat(endpoint.getNumberAsyncOperationCalls()).isEqualTo(1); assertThatFuture(secondAsyncOperationFuture) .eventuallyFailsWith(ExecutionException.class) .withCauseInstanceOf(RecipientUnreachableException.class); } finally { RpcUtils.terminateRpcEndpoint(endpoint); } }
Tests that when the onStop future completes that no other messages will be processed.
testOnStopFutureCompletionDirectlyTerminatesRpcActor
java
apache/flink
flink-rpc/flink-rpc-akka/src/test/java/org/apache/flink/runtime/rpc/pekko/PekkoRpcActorTest.java
https://github.com/apache/flink/blob/master/flink-rpc/flink-rpc-akka/src/test/java/org/apache/flink/runtime/rpc/pekko/PekkoRpcActorTest.java
Apache-2.0
@Test void testOnStartIsCalledWhenRpcEndpointStarts() throws Exception { final OnStartEndpoint onStartEndpoint = new OnStartEndpoint(pekkoRpcService, null); try { onStartEndpoint.start(); onStartEndpoint.awaitUntilOnStartCalled(); } finally { RpcUtils.terminateRpcEndpoint(onStartEndpoint); } }
Tests that the {@link RpcEndpoint#onStart()} method is called when the {@link RpcEndpoint} is started.
testOnStartIsCalledWhenRpcEndpointStarts
java
apache/flink
flink-rpc/flink-rpc-akka/src/test/java/org/apache/flink/runtime/rpc/pekko/PekkoRpcActorTest.java
https://github.com/apache/flink/blob/master/flink-rpc/flink-rpc-akka/src/test/java/org/apache/flink/runtime/rpc/pekko/PekkoRpcActorTest.java
Apache-2.0
@Test void testOnStartFails() throws Exception { final FlinkException testException = new FlinkException("Test exception"); final OnStartEndpoint onStartEndpoint = new OnStartEndpoint(pekkoRpcService, testException); onStartEndpoint.start(); onStartEndpoint.awaitUntilOnStartCalled(); assertThatThrownBy(() -> onStartEndpoint.getTerminationFuture().get()) .satisfies( FlinkAssertions.anyCauseMatches( testException.getClass(), testException.getMessage())); }
Tests that if onStart fails, then the endpoint terminates.
testOnStartFails
java
apache/flink
flink-rpc/flink-rpc-akka/src/test/java/org/apache/flink/runtime/rpc/pekko/PekkoRpcActorTest.java
https://github.com/apache/flink/blob/master/flink-rpc/flink-rpc-akka/src/test/java/org/apache/flink/runtime/rpc/pekko/PekkoRpcActorTest.java
Apache-2.0
@Test void testScheduledExecutorServiceSimpleSchedule() throws Exception { ScheduledExecutor scheduledExecutor = pekkoRpcService.getScheduledExecutor(); final OneShotLatch latch = new OneShotLatch(); ScheduledFuture<?> future = scheduledExecutor.schedule(latch::trigger, 10L, TimeUnit.MILLISECONDS); future.get(); // once the future is completed, then the latch should have been triggered assertThat(latch.isTriggered()).isTrue(); }
Tests a simple scheduled runnable being executed by the RPC services scheduled executor service.
testScheduledExecutorServiceSimpleSchedule
java
apache/flink
flink-rpc/flink-rpc-akka/src/test/java/org/apache/flink/runtime/rpc/pekko/PekkoRpcServiceTest.java
https://github.com/apache/flink/blob/master/flink-rpc/flink-rpc-akka/src/test/java/org/apache/flink/runtime/rpc/pekko/PekkoRpcServiceTest.java
Apache-2.0
@Test void testScheduledExecutorServicePeriodicSchedule() throws Exception { ScheduledExecutor scheduledExecutor = pekkoRpcService.getScheduledExecutor(); final int tries = 4; final long delay = 10L; final CountDownLatch countDownLatch = new CountDownLatch(tries); long currentTime = System.nanoTime(); ScheduledFuture<?> future = scheduledExecutor.scheduleAtFixedRate( countDownLatch::countDown, delay, delay, TimeUnit.MILLISECONDS); assertThat((Future) future).isNotDone(); countDownLatch.await(); // the future should not complete since we have a periodic task assertThat((Future) future).isNotDone(); long finalTime = System.nanoTime() - currentTime; // the processing should have taken at least delay times the number of count downs. assertThat(finalTime).isGreaterThanOrEqualTo(tries * delay); future.cancel(true); }
Tests that the RPC service's scheduled executor service can execute runnables at a fixed rate.
testScheduledExecutorServicePeriodicSchedule
java
apache/flink
flink-rpc/flink-rpc-akka/src/test/java/org/apache/flink/runtime/rpc/pekko/PekkoRpcServiceTest.java
https://github.com/apache/flink/blob/master/flink-rpc/flink-rpc-akka/src/test/java/org/apache/flink/runtime/rpc/pekko/PekkoRpcServiceTest.java
Apache-2.0