comment
stringlengths
1
45k
method_body
stringlengths
23
281k
target_code
stringlengths
0
5.16k
method_body_after
stringlengths
12
281k
context_before
stringlengths
8
543k
context_after
stringlengths
8
543k
nit: consistent use of final.
public Mono<ServiceBusReceiverAsyncClient> acceptSession(String sessionId) { if (CoreUtils.isNullOrEmpty(sessionId)) { return monoError(logger, new IllegalArgumentException("sessionId can not be null or empty")); } ReceiverOptions newReceiverOptions = new ReceiverOptions(receiverOptions.getReceiveMode(), receiverOptions.getPrefetchCount(), sessionId, null, receiverOptions.getMaxLockRenewDuration()); final ServiceBusSessionManager sessionSpecificManager = new ServiceBusSessionManager(entityPath, entityType, connectionProcessor, tracerProvider, messageSerializer, newReceiverOptions); return sessionSpecificManager.getActiveLink().thenReturn(new ServiceBusReceiverAsyncClient( fullyQualifiedNamespace, entityPath, entityType, newReceiverOptions, connectionProcessor, ServiceBusConstants.OPERATION_TIMEOUT, tracerProvider, messageSerializer, onClientClose, sessionSpecificManager)); }
ReceiverOptions newReceiverOptions = new ReceiverOptions(receiverOptions.getReceiveMode(),
public Mono<ServiceBusReceiverAsyncClient> acceptSession(String sessionId) { if (sessionId == null) { return monoError(logger, new NullPointerException("'sessionId' cannot be null")); } if (CoreUtils.isNullOrEmpty(sessionId)) { return monoError(logger, new IllegalArgumentException("'sessionId' cannot be empty")); } final ReceiverOptions newReceiverOptions = new ReceiverOptions(receiverOptions.getReceiveMode(), receiverOptions.getPrefetchCount(), receiverOptions.getMaxLockRenewDuration(), receiverOptions.isAutoLockRenewEnabled(), sessionId, null); final ServiceBusSessionManager sessionSpecificManager = new ServiceBusSessionManager(entityPath, entityType, connectionProcessor, tracerProvider, messageSerializer, newReceiverOptions); return sessionSpecificManager.getActiveLink().map(receiveLink -> new ServiceBusReceiverAsyncClient( fullyQualifiedNamespace, entityPath, entityType, newReceiverOptions, connectionProcessor, ServiceBusConstants.OPERATION_TIMEOUT, tracerProvider, messageSerializer, () -> { }, sessionSpecificManager)); }
class ServiceBusSessionReceiverAsyncClient implements AutoCloseable { private final String fullyQualifiedNamespace; private final String entityPath; private final MessagingEntityType entityType; private final ReceiverOptions receiverOptions; private final ServiceBusConnectionProcessor connectionProcessor; private final TracerProvider tracerProvider; private final MessageSerializer messageSerializer; private final Runnable onClientClose; private final ServiceBusSessionManager unNamedSessionManager; private final ClientLogger logger = new ClientLogger(ServiceBusSessionReceiverAsyncClient.class); ServiceBusSessionReceiverAsyncClient(String fullyQualifiedNamespace, String entityPath, MessagingEntityType entityType, ReceiverOptions receiverOptions, ServiceBusConnectionProcessor connectionProcessor, TracerProvider tracerProvider, MessageSerializer messageSerializer, Runnable onClientClose) { this.fullyQualifiedNamespace = Objects.requireNonNull(fullyQualifiedNamespace, "'fullyQualifiedNamespace' cannot be null."); this.entityPath = Objects.requireNonNull(entityPath, "'entityPath' cannot be null."); this.entityType = Objects.requireNonNull(entityType, "'entityType' cannot be null."); this.receiverOptions = Objects.requireNonNull(receiverOptions, "'receiveOptions cannot be null.'"); this.connectionProcessor = Objects.requireNonNull(connectionProcessor, "'connectionProcessor' cannot be null."); this.tracerProvider = Objects.requireNonNull(tracerProvider, "'tracerProvider' cannot be null."); this.messageSerializer = Objects.requireNonNull(messageSerializer, "'messageSerializer' cannot be null."); this.onClientClose = Objects.requireNonNull(onClientClose, "'onClientClose' cannot be null."); this.unNamedSessionManager = new ServiceBusSessionManager(entityPath, entityType, connectionProcessor, tracerProvider, messageSerializer, receiverOptions); } /** * Create a link for the next available session and use the link to create a {@link ServiceBusReceiverAsyncClient} * to receive messages from that session. * @return A {@link ServiceBusReceiverAsyncClient} that is tied to the available session. */ public Mono<ServiceBusReceiverAsyncClient> acceptNextSession() { return unNamedSessionManager.getActiveLink().flatMap(receiveLink -> receiveLink.getSessionId() .map(sessionId -> { ReceiverOptions newReceiverOptions = new ReceiverOptions(receiverOptions.getReceiveMode(), receiverOptions.getPrefetchCount(), sessionId, null, receiverOptions.getMaxLockRenewDuration()); final ServiceBusSessionManager sessionSpecificManager = new ServiceBusSessionManager(entityPath, entityType, connectionProcessor, tracerProvider, messageSerializer, newReceiverOptions, receiveLink); return new ServiceBusReceiverAsyncClient(fullyQualifiedNamespace, entityPath, entityType, newReceiverOptions, connectionProcessor, ServiceBusConstants.OPERATION_TIMEOUT, tracerProvider, messageSerializer, onClientClose, sessionSpecificManager); })); } /** * Create a link for the "sessionId" and use the link to create a {@link ServiceBusReceiverAsyncClient} * to receive messages from the session. * @param sessionId The session Id. * @return A {@link ServiceBusReceiverAsyncClient} that is tied to the specified session. * @throws IllegalArgumentException if {@code sessionId} is null or empty. */ /** * Create a {@link ServiceBusReceiverAsyncClient} that processes at most {@code maxConcurrentSessions} sessions. * * @param maxConcurrentSessions Maximum number of concurrent sessions to process at any given time. * * @return The {@link ServiceBusReceiverAsyncClient} object that will be used to receive messages. * @throws IllegalArgumentException if {@code maxConcurrentSessions} is less than 1. */ public ServiceBusReceiverAsyncClient getReceiverClient(int maxConcurrentSessions) { if (maxConcurrentSessions < 1) { throw logger.logExceptionAsError( new IllegalArgumentException("Maximum number of concurrent sessions must be positive.")); } ReceiverOptions newReceiverOptions = new ReceiverOptions(receiverOptions.getReceiveMode(), receiverOptions.getPrefetchCount(), null, maxConcurrentSessions, receiverOptions.getMaxLockRenewDuration()); ServiceBusSessionManager newSessionManager = new ServiceBusSessionManager(entityPath, entityType, connectionProcessor, tracerProvider, messageSerializer, newReceiverOptions); return new ServiceBusReceiverAsyncClient(fullyQualifiedNamespace, entityPath, entityType, newReceiverOptions, connectionProcessor, ServiceBusConstants.OPERATION_TIMEOUT, tracerProvider, messageSerializer, onClientClose, newSessionManager); } @Override public void close() { this.onClientClose.run(); } }
class ServiceBusSessionReceiverAsyncClient implements AutoCloseable { private final String fullyQualifiedNamespace; private final String entityPath; private final MessagingEntityType entityType; private final ReceiverOptions receiverOptions; private final ServiceBusConnectionProcessor connectionProcessor; private final TracerProvider tracerProvider; private final MessageSerializer messageSerializer; private final Runnable onClientClose; private final ServiceBusSessionManager unNamedSessionManager; private final ClientLogger logger = new ClientLogger(ServiceBusSessionReceiverAsyncClient.class); ServiceBusSessionReceiverAsyncClient(String fullyQualifiedNamespace, String entityPath, MessagingEntityType entityType, ReceiverOptions receiverOptions, ServiceBusConnectionProcessor connectionProcessor, TracerProvider tracerProvider, MessageSerializer messageSerializer, Runnable onClientClose) { this.fullyQualifiedNamespace = Objects.requireNonNull(fullyQualifiedNamespace, "'fullyQualifiedNamespace' cannot be null."); this.entityPath = Objects.requireNonNull(entityPath, "'entityPath' cannot be null."); this.entityType = Objects.requireNonNull(entityType, "'entityType' cannot be null."); this.receiverOptions = Objects.requireNonNull(receiverOptions, "'receiveOptions cannot be null.'"); this.connectionProcessor = Objects.requireNonNull(connectionProcessor, "'connectionProcessor' cannot be null."); this.tracerProvider = Objects.requireNonNull(tracerProvider, "'tracerProvider' cannot be null."); this.messageSerializer = Objects.requireNonNull(messageSerializer, "'messageSerializer' cannot be null."); this.onClientClose = Objects.requireNonNull(onClientClose, "'onClientClose' cannot be null."); this.unNamedSessionManager = new ServiceBusSessionManager(entityPath, entityType, connectionProcessor, tracerProvider, messageSerializer, receiverOptions); } /** * Acquires a session lock for the next available session and create a {@link ServiceBusReceiverAsyncClient} * to receive messages from the session. It will wait until a session is available if no one is available * immediately. * * {@codesnippet com.azure.messaging.servicebus.servicebusasyncreceiverclient.instantiation * * @return A {@link ServiceBusReceiverAsyncClient} that is tied to the available session. * @throws UnsupportedOperationException if the queue or topic subscription is not session-enabled. */ @ServiceMethod(returns = ReturnType.SINGLE) public Mono<ServiceBusReceiverAsyncClient> acceptNextSession() { return unNamedSessionManager.getActiveLink().flatMap(receiveLink -> receiveLink.getSessionId() .map(sessionId -> { final ReceiverOptions newReceiverOptions = new ReceiverOptions(receiverOptions.getReceiveMode(), receiverOptions.getPrefetchCount(), receiverOptions.getMaxLockRenewDuration(), receiverOptions.isAutoLockRenewEnabled(), sessionId, null); final ServiceBusSessionManager sessionSpecificManager = new ServiceBusSessionManager(entityPath, entityType, connectionProcessor, tracerProvider, messageSerializer, newReceiverOptions, receiveLink); return new ServiceBusReceiverAsyncClient(fullyQualifiedNamespace, entityPath, entityType, newReceiverOptions, connectionProcessor, ServiceBusConstants.OPERATION_TIMEOUT, tracerProvider, messageSerializer, () -> { }, sessionSpecificManager); })); } /** * Acquires a session lock for {@code sessionId} and create a {@link ServiceBusReceiverAsyncClient} * to receive messages from the session. If the session is already locked by another client, an * {@link com.azure.core.amqp.exception.AmqpException} is thrown. * * {@codesnippet com.azure.messaging.servicebus.servicebusasyncreceiverclient.instantiation * * @param sessionId The session Id. * @return A {@link ServiceBusReceiverAsyncClient} that is tied to the specified session. * @throws NullPointerException if {@code sessionId} is null. * @throws IllegalArgumentException if {@code sessionId} is empty. * @throws UnsupportedOperationException if the queue or topic subscription is not session-enabled. * @throws com.azure.core.amqp.exception.AmqpException if the lock cannot be acquired. */ @ServiceMethod(returns = ReturnType.SINGLE) @Override public void close() { this.onClientClose.run(); } }
Can use map instead of .thenReturn
public Mono<ServiceBusReceiverAsyncClient> acceptSession(String sessionId) { if (CoreUtils.isNullOrEmpty(sessionId)) { return monoError(logger, new IllegalArgumentException("sessionId can not be null or empty")); } ReceiverOptions newReceiverOptions = new ReceiverOptions(receiverOptions.getReceiveMode(), receiverOptions.getPrefetchCount(), sessionId, null, receiverOptions.getMaxLockRenewDuration()); final ServiceBusSessionManager sessionSpecificManager = new ServiceBusSessionManager(entityPath, entityType, connectionProcessor, tracerProvider, messageSerializer, newReceiverOptions); return sessionSpecificManager.getActiveLink().thenReturn(new ServiceBusReceiverAsyncClient( fullyQualifiedNamespace, entityPath, entityType, newReceiverOptions, connectionProcessor, ServiceBusConstants.OPERATION_TIMEOUT, tracerProvider, messageSerializer, onClientClose, sessionSpecificManager)); }
return sessionSpecificManager.getActiveLink().thenReturn(new ServiceBusReceiverAsyncClient(
public Mono<ServiceBusReceiverAsyncClient> acceptSession(String sessionId) { if (sessionId == null) { return monoError(logger, new NullPointerException("'sessionId' cannot be null")); } if (CoreUtils.isNullOrEmpty(sessionId)) { return monoError(logger, new IllegalArgumentException("'sessionId' cannot be empty")); } final ReceiverOptions newReceiverOptions = new ReceiverOptions(receiverOptions.getReceiveMode(), receiverOptions.getPrefetchCount(), receiverOptions.getMaxLockRenewDuration(), receiverOptions.isAutoLockRenewEnabled(), sessionId, null); final ServiceBusSessionManager sessionSpecificManager = new ServiceBusSessionManager(entityPath, entityType, connectionProcessor, tracerProvider, messageSerializer, newReceiverOptions); return sessionSpecificManager.getActiveLink().map(receiveLink -> new ServiceBusReceiverAsyncClient( fullyQualifiedNamespace, entityPath, entityType, newReceiverOptions, connectionProcessor, ServiceBusConstants.OPERATION_TIMEOUT, tracerProvider, messageSerializer, () -> { }, sessionSpecificManager)); }
class ServiceBusSessionReceiverAsyncClient implements AutoCloseable { private final String fullyQualifiedNamespace; private final String entityPath; private final MessagingEntityType entityType; private final ReceiverOptions receiverOptions; private final ServiceBusConnectionProcessor connectionProcessor; private final TracerProvider tracerProvider; private final MessageSerializer messageSerializer; private final Runnable onClientClose; private final ServiceBusSessionManager unNamedSessionManager; private final ClientLogger logger = new ClientLogger(ServiceBusSessionReceiverAsyncClient.class); ServiceBusSessionReceiverAsyncClient(String fullyQualifiedNamespace, String entityPath, MessagingEntityType entityType, ReceiverOptions receiverOptions, ServiceBusConnectionProcessor connectionProcessor, TracerProvider tracerProvider, MessageSerializer messageSerializer, Runnable onClientClose) { this.fullyQualifiedNamespace = Objects.requireNonNull(fullyQualifiedNamespace, "'fullyQualifiedNamespace' cannot be null."); this.entityPath = Objects.requireNonNull(entityPath, "'entityPath' cannot be null."); this.entityType = Objects.requireNonNull(entityType, "'entityType' cannot be null."); this.receiverOptions = Objects.requireNonNull(receiverOptions, "'receiveOptions cannot be null.'"); this.connectionProcessor = Objects.requireNonNull(connectionProcessor, "'connectionProcessor' cannot be null."); this.tracerProvider = Objects.requireNonNull(tracerProvider, "'tracerProvider' cannot be null."); this.messageSerializer = Objects.requireNonNull(messageSerializer, "'messageSerializer' cannot be null."); this.onClientClose = Objects.requireNonNull(onClientClose, "'onClientClose' cannot be null."); this.unNamedSessionManager = new ServiceBusSessionManager(entityPath, entityType, connectionProcessor, tracerProvider, messageSerializer, receiverOptions); } /** * Create a link for the next available session and use the link to create a {@link ServiceBusReceiverAsyncClient} * to receive messages from that session. * @return A {@link ServiceBusReceiverAsyncClient} that is tied to the available session. */ public Mono<ServiceBusReceiverAsyncClient> acceptNextSession() { return unNamedSessionManager.getActiveLink().flatMap(receiveLink -> receiveLink.getSessionId() .map(sessionId -> { ReceiverOptions newReceiverOptions = new ReceiverOptions(receiverOptions.getReceiveMode(), receiverOptions.getPrefetchCount(), sessionId, null, receiverOptions.getMaxLockRenewDuration()); final ServiceBusSessionManager sessionSpecificManager = new ServiceBusSessionManager(entityPath, entityType, connectionProcessor, tracerProvider, messageSerializer, newReceiverOptions, receiveLink); return new ServiceBusReceiverAsyncClient(fullyQualifiedNamespace, entityPath, entityType, newReceiverOptions, connectionProcessor, ServiceBusConstants.OPERATION_TIMEOUT, tracerProvider, messageSerializer, onClientClose, sessionSpecificManager); })); } /** * Create a link for the "sessionId" and use the link to create a {@link ServiceBusReceiverAsyncClient} * to receive messages from the session. * @param sessionId The session Id. * @return A {@link ServiceBusReceiverAsyncClient} that is tied to the specified session. * @throws IllegalArgumentException if {@code sessionId} is null or empty. */ /** * Create a {@link ServiceBusReceiverAsyncClient} that processes at most {@code maxConcurrentSessions} sessions. * * @param maxConcurrentSessions Maximum number of concurrent sessions to process at any given time. * * @return The {@link ServiceBusReceiverAsyncClient} object that will be used to receive messages. * @throws IllegalArgumentException if {@code maxConcurrentSessions} is less than 1. */ public ServiceBusReceiverAsyncClient getReceiverClient(int maxConcurrentSessions) { if (maxConcurrentSessions < 1) { throw logger.logExceptionAsError( new IllegalArgumentException("Maximum number of concurrent sessions must be positive.")); } ReceiverOptions newReceiverOptions = new ReceiverOptions(receiverOptions.getReceiveMode(), receiverOptions.getPrefetchCount(), null, maxConcurrentSessions, receiverOptions.getMaxLockRenewDuration()); ServiceBusSessionManager newSessionManager = new ServiceBusSessionManager(entityPath, entityType, connectionProcessor, tracerProvider, messageSerializer, newReceiverOptions); return new ServiceBusReceiverAsyncClient(fullyQualifiedNamespace, entityPath, entityType, newReceiverOptions, connectionProcessor, ServiceBusConstants.OPERATION_TIMEOUT, tracerProvider, messageSerializer, onClientClose, newSessionManager); } @Override public void close() { this.onClientClose.run(); } }
class ServiceBusSessionReceiverAsyncClient implements AutoCloseable { private final String fullyQualifiedNamespace; private final String entityPath; private final MessagingEntityType entityType; private final ReceiverOptions receiverOptions; private final ServiceBusConnectionProcessor connectionProcessor; private final TracerProvider tracerProvider; private final MessageSerializer messageSerializer; private final Runnable onClientClose; private final ServiceBusSessionManager unNamedSessionManager; private final ClientLogger logger = new ClientLogger(ServiceBusSessionReceiverAsyncClient.class); ServiceBusSessionReceiverAsyncClient(String fullyQualifiedNamespace, String entityPath, MessagingEntityType entityType, ReceiverOptions receiverOptions, ServiceBusConnectionProcessor connectionProcessor, TracerProvider tracerProvider, MessageSerializer messageSerializer, Runnable onClientClose) { this.fullyQualifiedNamespace = Objects.requireNonNull(fullyQualifiedNamespace, "'fullyQualifiedNamespace' cannot be null."); this.entityPath = Objects.requireNonNull(entityPath, "'entityPath' cannot be null."); this.entityType = Objects.requireNonNull(entityType, "'entityType' cannot be null."); this.receiverOptions = Objects.requireNonNull(receiverOptions, "'receiveOptions cannot be null.'"); this.connectionProcessor = Objects.requireNonNull(connectionProcessor, "'connectionProcessor' cannot be null."); this.tracerProvider = Objects.requireNonNull(tracerProvider, "'tracerProvider' cannot be null."); this.messageSerializer = Objects.requireNonNull(messageSerializer, "'messageSerializer' cannot be null."); this.onClientClose = Objects.requireNonNull(onClientClose, "'onClientClose' cannot be null."); this.unNamedSessionManager = new ServiceBusSessionManager(entityPath, entityType, connectionProcessor, tracerProvider, messageSerializer, receiverOptions); } /** * Acquires a session lock for the next available session and create a {@link ServiceBusReceiverAsyncClient} * to receive messages from the session. It will wait until a session is available if no one is available * immediately. * * {@codesnippet com.azure.messaging.servicebus.servicebusasyncreceiverclient.instantiation * * @return A {@link ServiceBusReceiverAsyncClient} that is tied to the available session. * @throws UnsupportedOperationException if the queue or topic subscription is not session-enabled. */ @ServiceMethod(returns = ReturnType.SINGLE) public Mono<ServiceBusReceiverAsyncClient> acceptNextSession() { return unNamedSessionManager.getActiveLink().flatMap(receiveLink -> receiveLink.getSessionId() .map(sessionId -> { final ReceiverOptions newReceiverOptions = new ReceiverOptions(receiverOptions.getReceiveMode(), receiverOptions.getPrefetchCount(), receiverOptions.getMaxLockRenewDuration(), receiverOptions.isAutoLockRenewEnabled(), sessionId, null); final ServiceBusSessionManager sessionSpecificManager = new ServiceBusSessionManager(entityPath, entityType, connectionProcessor, tracerProvider, messageSerializer, newReceiverOptions, receiveLink); return new ServiceBusReceiverAsyncClient(fullyQualifiedNamespace, entityPath, entityType, newReceiverOptions, connectionProcessor, ServiceBusConstants.OPERATION_TIMEOUT, tracerProvider, messageSerializer, () -> { }, sessionSpecificManager); })); } /** * Acquires a session lock for {@code sessionId} and create a {@link ServiceBusReceiverAsyncClient} * to receive messages from the session. If the session is already locked by another client, an * {@link com.azure.core.amqp.exception.AmqpException} is thrown. * * {@codesnippet com.azure.messaging.servicebus.servicebusasyncreceiverclient.instantiation * * @param sessionId The session Id. * @return A {@link ServiceBusReceiverAsyncClient} that is tied to the specified session. * @throws NullPointerException if {@code sessionId} is null. * @throws IllegalArgumentException if {@code sessionId} is empty. * @throws UnsupportedOperationException if the queue or topic subscription is not session-enabled. * @throws com.azure.core.amqp.exception.AmqpException if the lock cannot be acquired. */ @ServiceMethod(returns = ReturnType.SINGLE) @Override public void close() { this.onClientClose.run(); } }
Is this idempotent? If we call close multiple times, what happens.
public void close() { this.onClientClose.run(); }
this.onClientClose.run();
public void close() { this.onClientClose.run(); }
class ServiceBusSessionReceiverAsyncClient implements AutoCloseable { private final String fullyQualifiedNamespace; private final String entityPath; private final MessagingEntityType entityType; private final ReceiverOptions receiverOptions; private final ServiceBusConnectionProcessor connectionProcessor; private final TracerProvider tracerProvider; private final MessageSerializer messageSerializer; private final Runnable onClientClose; private final ServiceBusSessionManager unNamedSessionManager; private final ClientLogger logger = new ClientLogger(ServiceBusSessionReceiverAsyncClient.class); ServiceBusSessionReceiverAsyncClient(String fullyQualifiedNamespace, String entityPath, MessagingEntityType entityType, ReceiverOptions receiverOptions, ServiceBusConnectionProcessor connectionProcessor, TracerProvider tracerProvider, MessageSerializer messageSerializer, Runnable onClientClose) { this.fullyQualifiedNamespace = Objects.requireNonNull(fullyQualifiedNamespace, "'fullyQualifiedNamespace' cannot be null."); this.entityPath = Objects.requireNonNull(entityPath, "'entityPath' cannot be null."); this.entityType = Objects.requireNonNull(entityType, "'entityType' cannot be null."); this.receiverOptions = Objects.requireNonNull(receiverOptions, "'receiveOptions cannot be null.'"); this.connectionProcessor = Objects.requireNonNull(connectionProcessor, "'connectionProcessor' cannot be null."); this.tracerProvider = Objects.requireNonNull(tracerProvider, "'tracerProvider' cannot be null."); this.messageSerializer = Objects.requireNonNull(messageSerializer, "'messageSerializer' cannot be null."); this.onClientClose = Objects.requireNonNull(onClientClose, "'onClientClose' cannot be null."); this.unNamedSessionManager = new ServiceBusSessionManager(entityPath, entityType, connectionProcessor, tracerProvider, messageSerializer, receiverOptions); } /** * Create a link for the next available session and use the link to create a {@link ServiceBusReceiverAsyncClient} * to receive messages from that session. * @return A {@link ServiceBusReceiverAsyncClient} that is tied to the available session. */ public Mono<ServiceBusReceiverAsyncClient> acceptNextSession() { return unNamedSessionManager.getActiveLink().flatMap(receiveLink -> receiveLink.getSessionId() .map(sessionId -> { ReceiverOptions newReceiverOptions = new ReceiverOptions(receiverOptions.getReceiveMode(), receiverOptions.getPrefetchCount(), sessionId, null, receiverOptions.getMaxLockRenewDuration()); final ServiceBusSessionManager sessionSpecificManager = new ServiceBusSessionManager(entityPath, entityType, connectionProcessor, tracerProvider, messageSerializer, newReceiverOptions, receiveLink); return new ServiceBusReceiverAsyncClient(fullyQualifiedNamespace, entityPath, entityType, newReceiverOptions, connectionProcessor, ServiceBusConstants.OPERATION_TIMEOUT, tracerProvider, messageSerializer, onClientClose, sessionSpecificManager); })); } /** * Create a link for the "sessionId" and use the link to create a {@link ServiceBusReceiverAsyncClient} * to receive messages from the session. * @param sessionId The session Id. * @return A {@link ServiceBusReceiverAsyncClient} that is tied to the specified session. * @throws IllegalArgumentException if {@code sessionId} is null or empty. */ public Mono<ServiceBusReceiverAsyncClient> acceptSession(String sessionId) { if (CoreUtils.isNullOrEmpty(sessionId)) { return monoError(logger, new IllegalArgumentException("sessionId can not be null or empty")); } ReceiverOptions newReceiverOptions = new ReceiverOptions(receiverOptions.getReceiveMode(), receiverOptions.getPrefetchCount(), sessionId, null, receiverOptions.getMaxLockRenewDuration()); final ServiceBusSessionManager sessionSpecificManager = new ServiceBusSessionManager(entityPath, entityType, connectionProcessor, tracerProvider, messageSerializer, newReceiverOptions); return sessionSpecificManager.getActiveLink().thenReturn(new ServiceBusReceiverAsyncClient( fullyQualifiedNamespace, entityPath, entityType, newReceiverOptions, connectionProcessor, ServiceBusConstants.OPERATION_TIMEOUT, tracerProvider, messageSerializer, onClientClose, sessionSpecificManager)); } /** * Create a {@link ServiceBusReceiverAsyncClient} that processes at most {@code maxConcurrentSessions} sessions. * * @param maxConcurrentSessions Maximum number of concurrent sessions to process at any given time. * * @return The {@link ServiceBusReceiverAsyncClient} object that will be used to receive messages. * @throws IllegalArgumentException if {@code maxConcurrentSessions} is less than 1. */ public ServiceBusReceiverAsyncClient getReceiverClient(int maxConcurrentSessions) { if (maxConcurrentSessions < 1) { throw logger.logExceptionAsError( new IllegalArgumentException("Maximum number of concurrent sessions must be positive.")); } ReceiverOptions newReceiverOptions = new ReceiverOptions(receiverOptions.getReceiveMode(), receiverOptions.getPrefetchCount(), null, maxConcurrentSessions, receiverOptions.getMaxLockRenewDuration()); ServiceBusSessionManager newSessionManager = new ServiceBusSessionManager(entityPath, entityType, connectionProcessor, tracerProvider, messageSerializer, newReceiverOptions); return new ServiceBusReceiverAsyncClient(fullyQualifiedNamespace, entityPath, entityType, newReceiverOptions, connectionProcessor, ServiceBusConstants.OPERATION_TIMEOUT, tracerProvider, messageSerializer, onClientClose, newSessionManager); } @Override }
class ServiceBusSessionReceiverAsyncClient implements AutoCloseable { private final String fullyQualifiedNamespace; private final String entityPath; private final MessagingEntityType entityType; private final ReceiverOptions receiverOptions; private final ServiceBusConnectionProcessor connectionProcessor; private final TracerProvider tracerProvider; private final MessageSerializer messageSerializer; private final Runnable onClientClose; private final ServiceBusSessionManager unNamedSessionManager; private final ClientLogger logger = new ClientLogger(ServiceBusSessionReceiverAsyncClient.class); ServiceBusSessionReceiverAsyncClient(String fullyQualifiedNamespace, String entityPath, MessagingEntityType entityType, ReceiverOptions receiverOptions, ServiceBusConnectionProcessor connectionProcessor, TracerProvider tracerProvider, MessageSerializer messageSerializer, Runnable onClientClose) { this.fullyQualifiedNamespace = Objects.requireNonNull(fullyQualifiedNamespace, "'fullyQualifiedNamespace' cannot be null."); this.entityPath = Objects.requireNonNull(entityPath, "'entityPath' cannot be null."); this.entityType = Objects.requireNonNull(entityType, "'entityType' cannot be null."); this.receiverOptions = Objects.requireNonNull(receiverOptions, "'receiveOptions cannot be null.'"); this.connectionProcessor = Objects.requireNonNull(connectionProcessor, "'connectionProcessor' cannot be null."); this.tracerProvider = Objects.requireNonNull(tracerProvider, "'tracerProvider' cannot be null."); this.messageSerializer = Objects.requireNonNull(messageSerializer, "'messageSerializer' cannot be null."); this.onClientClose = Objects.requireNonNull(onClientClose, "'onClientClose' cannot be null."); this.unNamedSessionManager = new ServiceBusSessionManager(entityPath, entityType, connectionProcessor, tracerProvider, messageSerializer, receiverOptions); } /** * Acquires a session lock for the next available session and create a {@link ServiceBusReceiverAsyncClient} * to receive messages from the session. It will wait until a session is available if no one is available * immediately. * * {@codesnippet com.azure.messaging.servicebus.servicebusasyncreceiverclient.instantiation * * @return A {@link ServiceBusReceiverAsyncClient} that is tied to the available session. * @throws UnsupportedOperationException if the queue or topic subscription is not session-enabled. */ @ServiceMethod(returns = ReturnType.SINGLE) public Mono<ServiceBusReceiverAsyncClient> acceptNextSession() { return unNamedSessionManager.getActiveLink().flatMap(receiveLink -> receiveLink.getSessionId() .map(sessionId -> { final ReceiverOptions newReceiverOptions = new ReceiverOptions(receiverOptions.getReceiveMode(), receiverOptions.getPrefetchCount(), receiverOptions.getMaxLockRenewDuration(), receiverOptions.isAutoLockRenewEnabled(), sessionId, null); final ServiceBusSessionManager sessionSpecificManager = new ServiceBusSessionManager(entityPath, entityType, connectionProcessor, tracerProvider, messageSerializer, newReceiverOptions, receiveLink); return new ServiceBusReceiverAsyncClient(fullyQualifiedNamespace, entityPath, entityType, newReceiverOptions, connectionProcessor, ServiceBusConstants.OPERATION_TIMEOUT, tracerProvider, messageSerializer, () -> { }, sessionSpecificManager); })); } /** * Acquires a session lock for {@code sessionId} and create a {@link ServiceBusReceiverAsyncClient} * to receive messages from the session. If the session is already locked by another client, an * {@link com.azure.core.amqp.exception.AmqpException} is thrown. * * {@codesnippet com.azure.messaging.servicebus.servicebusasyncreceiverclient.instantiation * * @param sessionId The session Id. * @return A {@link ServiceBusReceiverAsyncClient} that is tied to the specified session. * @throws NullPointerException if {@code sessionId} is null. * @throws IllegalArgumentException if {@code sessionId} is empty. * @throws UnsupportedOperationException if the queue or topic subscription is not session-enabled. * @throws com.azure.core.amqp.exception.AmqpException if the lock cannot be acquired. */ @ServiceMethod(returns = ReturnType.SINGLE) public Mono<ServiceBusReceiverAsyncClient> acceptSession(String sessionId) { if (sessionId == null) { return monoError(logger, new NullPointerException("'sessionId' cannot be null")); } if (CoreUtils.isNullOrEmpty(sessionId)) { return monoError(logger, new IllegalArgumentException("'sessionId' cannot be empty")); } final ReceiverOptions newReceiverOptions = new ReceiverOptions(receiverOptions.getReceiveMode(), receiverOptions.getPrefetchCount(), receiverOptions.getMaxLockRenewDuration(), receiverOptions.isAutoLockRenewEnabled(), sessionId, null); final ServiceBusSessionManager sessionSpecificManager = new ServiceBusSessionManager(entityPath, entityType, connectionProcessor, tracerProvider, messageSerializer, newReceiverOptions); return sessionSpecificManager.getActiveLink().map(receiveLink -> new ServiceBusReceiverAsyncClient( fullyQualifiedNamespace, entityPath, entityType, newReceiverOptions, connectionProcessor, ServiceBusConstants.OPERATION_TIMEOUT, tracerProvider, messageSerializer, () -> { }, sessionSpecificManager)); } @Override }
```suggestion return monoError(logger, new IllegalArgumentException("sessionId cannot be null or empty")); ```
public Mono<ServiceBusReceiverAsyncClient> acceptSession(String sessionId) { if (CoreUtils.isNullOrEmpty(sessionId)) { return monoError(logger, new IllegalArgumentException("sessionId can not be null or empty")); } ReceiverOptions newReceiverOptions = new ReceiverOptions(receiverOptions.getReceiveMode(), receiverOptions.getPrefetchCount(), sessionId, null, receiverOptions.getMaxLockRenewDuration()); final ServiceBusSessionManager sessionSpecificManager = new ServiceBusSessionManager(entityPath, entityType, connectionProcessor, tracerProvider, messageSerializer, newReceiverOptions); return sessionSpecificManager.getActiveLink().thenReturn(new ServiceBusReceiverAsyncClient( fullyQualifiedNamespace, entityPath, entityType, newReceiverOptions, connectionProcessor, ServiceBusConstants.OPERATION_TIMEOUT, tracerProvider, messageSerializer, onClientClose, sessionSpecificManager)); }
return monoError(logger, new IllegalArgumentException("sessionId can not be null or empty"));
public Mono<ServiceBusReceiverAsyncClient> acceptSession(String sessionId) { if (sessionId == null) { return monoError(logger, new NullPointerException("'sessionId' cannot be null")); } if (CoreUtils.isNullOrEmpty(sessionId)) { return monoError(logger, new IllegalArgumentException("'sessionId' cannot be empty")); } final ReceiverOptions newReceiverOptions = new ReceiverOptions(receiverOptions.getReceiveMode(), receiverOptions.getPrefetchCount(), receiverOptions.getMaxLockRenewDuration(), receiverOptions.isAutoLockRenewEnabled(), sessionId, null); final ServiceBusSessionManager sessionSpecificManager = new ServiceBusSessionManager(entityPath, entityType, connectionProcessor, tracerProvider, messageSerializer, newReceiverOptions); return sessionSpecificManager.getActiveLink().map(receiveLink -> new ServiceBusReceiverAsyncClient( fullyQualifiedNamespace, entityPath, entityType, newReceiverOptions, connectionProcessor, ServiceBusConstants.OPERATION_TIMEOUT, tracerProvider, messageSerializer, () -> { }, sessionSpecificManager)); }
class ServiceBusSessionReceiverAsyncClient implements AutoCloseable { private final String fullyQualifiedNamespace; private final String entityPath; private final MessagingEntityType entityType; private final ReceiverOptions receiverOptions; private final ServiceBusConnectionProcessor connectionProcessor; private final TracerProvider tracerProvider; private final MessageSerializer messageSerializer; private final Runnable onClientClose; private final ServiceBusSessionManager unNamedSessionManager; private final ClientLogger logger = new ClientLogger(ServiceBusSessionReceiverAsyncClient.class); ServiceBusSessionReceiverAsyncClient(String fullyQualifiedNamespace, String entityPath, MessagingEntityType entityType, ReceiverOptions receiverOptions, ServiceBusConnectionProcessor connectionProcessor, TracerProvider tracerProvider, MessageSerializer messageSerializer, Runnable onClientClose) { this.fullyQualifiedNamespace = Objects.requireNonNull(fullyQualifiedNamespace, "'fullyQualifiedNamespace' cannot be null."); this.entityPath = Objects.requireNonNull(entityPath, "'entityPath' cannot be null."); this.entityType = Objects.requireNonNull(entityType, "'entityType' cannot be null."); this.receiverOptions = Objects.requireNonNull(receiverOptions, "'receiveOptions cannot be null.'"); this.connectionProcessor = Objects.requireNonNull(connectionProcessor, "'connectionProcessor' cannot be null."); this.tracerProvider = Objects.requireNonNull(tracerProvider, "'tracerProvider' cannot be null."); this.messageSerializer = Objects.requireNonNull(messageSerializer, "'messageSerializer' cannot be null."); this.onClientClose = Objects.requireNonNull(onClientClose, "'onClientClose' cannot be null."); this.unNamedSessionManager = new ServiceBusSessionManager(entityPath, entityType, connectionProcessor, tracerProvider, messageSerializer, receiverOptions); } /** * Create a link for the next available session and use the link to create a {@link ServiceBusReceiverAsyncClient} * to receive messages from that session. * @return A {@link ServiceBusReceiverAsyncClient} that is tied to the available session. */ public Mono<ServiceBusReceiverAsyncClient> acceptNextSession() { return unNamedSessionManager.getActiveLink().flatMap(receiveLink -> receiveLink.getSessionId() .map(sessionId -> { ReceiverOptions newReceiverOptions = new ReceiverOptions(receiverOptions.getReceiveMode(), receiverOptions.getPrefetchCount(), sessionId, null, receiverOptions.getMaxLockRenewDuration()); final ServiceBusSessionManager sessionSpecificManager = new ServiceBusSessionManager(entityPath, entityType, connectionProcessor, tracerProvider, messageSerializer, newReceiverOptions, receiveLink); return new ServiceBusReceiverAsyncClient(fullyQualifiedNamespace, entityPath, entityType, newReceiverOptions, connectionProcessor, ServiceBusConstants.OPERATION_TIMEOUT, tracerProvider, messageSerializer, onClientClose, sessionSpecificManager); })); } /** * Create a link for the "sessionId" and use the link to create a {@link ServiceBusReceiverAsyncClient} * to receive messages from the session. * @param sessionId The session Id. * @return A {@link ServiceBusReceiverAsyncClient} that is tied to the specified session. * @throws IllegalArgumentException if {@code sessionId} is null or empty. */ /** * Create a {@link ServiceBusReceiverAsyncClient} that processes at most {@code maxConcurrentSessions} sessions. * * @param maxConcurrentSessions Maximum number of concurrent sessions to process at any given time. * * @return The {@link ServiceBusReceiverAsyncClient} object that will be used to receive messages. * @throws IllegalArgumentException if {@code maxConcurrentSessions} is less than 1. */ public ServiceBusReceiverAsyncClient getReceiverClient(int maxConcurrentSessions) { if (maxConcurrentSessions < 1) { throw logger.logExceptionAsError( new IllegalArgumentException("Maximum number of concurrent sessions must be positive.")); } ReceiverOptions newReceiverOptions = new ReceiverOptions(this.receiverOptions.getReceiveMode(), this.receiverOptions.getPrefetchCount(), null, maxConcurrentSessions, this.receiverOptions.getMaxLockRenewDuration()); return new ServiceBusReceiverAsyncClient(fullyQualifiedNamespace, entityPath, entityType, newReceiverOptions, connectionProcessor, ServiceBusConstants.OPERATION_TIMEOUT, tracerProvider, messageSerializer, onClientClose); } @Override public void close() { this.onClientClose.run(); } }
class ServiceBusSessionReceiverAsyncClient implements AutoCloseable { private final String fullyQualifiedNamespace; private final String entityPath; private final MessagingEntityType entityType; private final ReceiverOptions receiverOptions; private final ServiceBusConnectionProcessor connectionProcessor; private final TracerProvider tracerProvider; private final MessageSerializer messageSerializer; private final Runnable onClientClose; private final ServiceBusSessionManager unNamedSessionManager; private final ClientLogger logger = new ClientLogger(ServiceBusSessionReceiverAsyncClient.class); ServiceBusSessionReceiverAsyncClient(String fullyQualifiedNamespace, String entityPath, MessagingEntityType entityType, ReceiverOptions receiverOptions, ServiceBusConnectionProcessor connectionProcessor, TracerProvider tracerProvider, MessageSerializer messageSerializer, Runnable onClientClose) { this.fullyQualifiedNamespace = Objects.requireNonNull(fullyQualifiedNamespace, "'fullyQualifiedNamespace' cannot be null."); this.entityPath = Objects.requireNonNull(entityPath, "'entityPath' cannot be null."); this.entityType = Objects.requireNonNull(entityType, "'entityType' cannot be null."); this.receiverOptions = Objects.requireNonNull(receiverOptions, "'receiveOptions cannot be null.'"); this.connectionProcessor = Objects.requireNonNull(connectionProcessor, "'connectionProcessor' cannot be null."); this.tracerProvider = Objects.requireNonNull(tracerProvider, "'tracerProvider' cannot be null."); this.messageSerializer = Objects.requireNonNull(messageSerializer, "'messageSerializer' cannot be null."); this.onClientClose = Objects.requireNonNull(onClientClose, "'onClientClose' cannot be null."); this.unNamedSessionManager = new ServiceBusSessionManager(entityPath, entityType, connectionProcessor, tracerProvider, messageSerializer, receiverOptions); } /** * Acquires a session lock for the next available session and create a {@link ServiceBusReceiverAsyncClient} * to receive messages from the session. It will wait until a session is available if no one is available * immediately. * * {@codesnippet com.azure.messaging.servicebus.servicebusasyncreceiverclient.instantiation * * @return A {@link ServiceBusReceiverAsyncClient} that is tied to the available session. * @throws UnsupportedOperationException if the queue or topic subscription is not session-enabled. */ @ServiceMethod(returns = ReturnType.SINGLE) public Mono<ServiceBusReceiverAsyncClient> acceptNextSession() { return unNamedSessionManager.getActiveLink().flatMap(receiveLink -> receiveLink.getSessionId() .map(sessionId -> { final ReceiverOptions newReceiverOptions = new ReceiverOptions(receiverOptions.getReceiveMode(), receiverOptions.getPrefetchCount(), receiverOptions.getMaxLockRenewDuration(), receiverOptions.isAutoLockRenewEnabled(), sessionId, null); final ServiceBusSessionManager sessionSpecificManager = new ServiceBusSessionManager(entityPath, entityType, connectionProcessor, tracerProvider, messageSerializer, newReceiverOptions, receiveLink); return new ServiceBusReceiverAsyncClient(fullyQualifiedNamespace, entityPath, entityType, newReceiverOptions, connectionProcessor, ServiceBusConstants.OPERATION_TIMEOUT, tracerProvider, messageSerializer, () -> { }, sessionSpecificManager); })); } /** * Acquires a session lock for {@code sessionId} and create a {@link ServiceBusReceiverAsyncClient} * to receive messages from the session. If the session is already locked by another client, an * {@link com.azure.core.amqp.exception.AmqpException} is thrown. * * {@codesnippet com.azure.messaging.servicebus.servicebusasyncreceiverclient.instantiation * * @param sessionId The session Id. * @return A {@link ServiceBusReceiverAsyncClient} that is tied to the specified session. * @throws NullPointerException if {@code sessionId} is null. * @throws IllegalArgumentException if {@code sessionId} is empty. * @throws UnsupportedOperationException if the queue or topic subscription is not session-enabled. * @throws com.azure.core.amqp.exception.AmqpException if the lock cannot be acquired. */ @ServiceMethod(returns = ReturnType.SINGLE) @Override public void close() { this.onClientClose.run(); } }
Java guidelines specify that if the input is null for an expected non-null value, then we must throw NPE. If the input is invalid, then we should use IllegalArgumentException.
public Mono<ServiceBusReceiverAsyncClient> acceptSession(String sessionId) { if (CoreUtils.isNullOrEmpty(sessionId)) { return monoError(logger, new IllegalArgumentException("sessionId can not be null or empty")); } ReceiverOptions newReceiverOptions = new ReceiverOptions(receiverOptions.getReceiveMode(), receiverOptions.getPrefetchCount(), sessionId, null, receiverOptions.getMaxLockRenewDuration()); final ServiceBusSessionManager sessionSpecificManager = new ServiceBusSessionManager(entityPath, entityType, connectionProcessor, tracerProvider, messageSerializer, newReceiverOptions); return sessionSpecificManager.getActiveLink().thenReturn(new ServiceBusReceiverAsyncClient( fullyQualifiedNamespace, entityPath, entityType, newReceiverOptions, connectionProcessor, ServiceBusConstants.OPERATION_TIMEOUT, tracerProvider, messageSerializer, onClientClose, sessionSpecificManager)); }
if (CoreUtils.isNullOrEmpty(sessionId)) {
public Mono<ServiceBusReceiverAsyncClient> acceptSession(String sessionId) { if (sessionId == null) { return monoError(logger, new NullPointerException("'sessionId' cannot be null")); } if (CoreUtils.isNullOrEmpty(sessionId)) { return monoError(logger, new IllegalArgumentException("'sessionId' cannot be empty")); } final ReceiverOptions newReceiverOptions = new ReceiverOptions(receiverOptions.getReceiveMode(), receiverOptions.getPrefetchCount(), receiverOptions.getMaxLockRenewDuration(), receiverOptions.isAutoLockRenewEnabled(), sessionId, null); final ServiceBusSessionManager sessionSpecificManager = new ServiceBusSessionManager(entityPath, entityType, connectionProcessor, tracerProvider, messageSerializer, newReceiverOptions); return sessionSpecificManager.getActiveLink().map(receiveLink -> new ServiceBusReceiverAsyncClient( fullyQualifiedNamespace, entityPath, entityType, newReceiverOptions, connectionProcessor, ServiceBusConstants.OPERATION_TIMEOUT, tracerProvider, messageSerializer, () -> { }, sessionSpecificManager)); }
class ServiceBusSessionReceiverAsyncClient implements AutoCloseable { private final String fullyQualifiedNamespace; private final String entityPath; private final MessagingEntityType entityType; private final ReceiverOptions receiverOptions; private final ServiceBusConnectionProcessor connectionProcessor; private final TracerProvider tracerProvider; private final MessageSerializer messageSerializer; private final Runnable onClientClose; private final ServiceBusSessionManager unNamedSessionManager; private final ClientLogger logger = new ClientLogger(ServiceBusSessionReceiverAsyncClient.class); ServiceBusSessionReceiverAsyncClient(String fullyQualifiedNamespace, String entityPath, MessagingEntityType entityType, ReceiverOptions receiverOptions, ServiceBusConnectionProcessor connectionProcessor, TracerProvider tracerProvider, MessageSerializer messageSerializer, Runnable onClientClose) { this.fullyQualifiedNamespace = Objects.requireNonNull(fullyQualifiedNamespace, "'fullyQualifiedNamespace' cannot be null."); this.entityPath = Objects.requireNonNull(entityPath, "'entityPath' cannot be null."); this.entityType = Objects.requireNonNull(entityType, "'entityType' cannot be null."); this.receiverOptions = Objects.requireNonNull(receiverOptions, "'receiveOptions cannot be null.'"); this.connectionProcessor = Objects.requireNonNull(connectionProcessor, "'connectionProcessor' cannot be null."); this.tracerProvider = Objects.requireNonNull(tracerProvider, "'tracerProvider' cannot be null."); this.messageSerializer = Objects.requireNonNull(messageSerializer, "'messageSerializer' cannot be null."); this.onClientClose = Objects.requireNonNull(onClientClose, "'onClientClose' cannot be null."); this.unNamedSessionManager = new ServiceBusSessionManager(entityPath, entityType, connectionProcessor, tracerProvider, messageSerializer, receiverOptions); } /** * Create a link for the next available session and use the link to create a {@link ServiceBusReceiverAsyncClient} * to receive messages from that session. * @return A {@link ServiceBusReceiverAsyncClient} that is tied to the available session. */ public Mono<ServiceBusReceiverAsyncClient> acceptNextSession() { return unNamedSessionManager.getActiveLink().flatMap(receiveLink -> receiveLink.getSessionId() .map(sessionId -> { ReceiverOptions newReceiverOptions = new ReceiverOptions(receiverOptions.getReceiveMode(), receiverOptions.getPrefetchCount(), sessionId, null, receiverOptions.getMaxLockRenewDuration()); final ServiceBusSessionManager sessionSpecificManager = new ServiceBusSessionManager(entityPath, entityType, connectionProcessor, tracerProvider, messageSerializer, newReceiverOptions, receiveLink); return new ServiceBusReceiverAsyncClient(fullyQualifiedNamespace, entityPath, entityType, newReceiverOptions, connectionProcessor, ServiceBusConstants.OPERATION_TIMEOUT, tracerProvider, messageSerializer, onClientClose, sessionSpecificManager); })); } /** * Create a link for the "sessionId" and use the link to create a {@link ServiceBusReceiverAsyncClient} * to receive messages from the session. * @param sessionId The session Id. * @return A {@link ServiceBusReceiverAsyncClient} that is tied to the specified session. * @throws IllegalArgumentException if {@code sessionId} is null or empty. */ /** * Create a {@link ServiceBusReceiverAsyncClient} that processes at most {@code maxConcurrentSessions} sessions. * * @param maxConcurrentSessions Maximum number of concurrent sessions to process at any given time. * * @return The {@link ServiceBusReceiverAsyncClient} object that will be used to receive messages. * @throws IllegalArgumentException if {@code maxConcurrentSessions} is less than 1. */ public ServiceBusReceiverAsyncClient getReceiverClient(int maxConcurrentSessions) { if (maxConcurrentSessions < 1) { throw logger.logExceptionAsError( new IllegalArgumentException("Maximum number of concurrent sessions must be positive.")); } ReceiverOptions newReceiverOptions = new ReceiverOptions(this.receiverOptions.getReceiveMode(), this.receiverOptions.getPrefetchCount(), null, maxConcurrentSessions, this.receiverOptions.getMaxLockRenewDuration()); return new ServiceBusReceiverAsyncClient(fullyQualifiedNamespace, entityPath, entityType, newReceiverOptions, connectionProcessor, ServiceBusConstants.OPERATION_TIMEOUT, tracerProvider, messageSerializer, onClientClose); } @Override public void close() { this.onClientClose.run(); } }
class ServiceBusSessionReceiverAsyncClient implements AutoCloseable { private final String fullyQualifiedNamespace; private final String entityPath; private final MessagingEntityType entityType; private final ReceiverOptions receiverOptions; private final ServiceBusConnectionProcessor connectionProcessor; private final TracerProvider tracerProvider; private final MessageSerializer messageSerializer; private final Runnable onClientClose; private final ServiceBusSessionManager unNamedSessionManager; private final ClientLogger logger = new ClientLogger(ServiceBusSessionReceiverAsyncClient.class); ServiceBusSessionReceiverAsyncClient(String fullyQualifiedNamespace, String entityPath, MessagingEntityType entityType, ReceiverOptions receiverOptions, ServiceBusConnectionProcessor connectionProcessor, TracerProvider tracerProvider, MessageSerializer messageSerializer, Runnable onClientClose) { this.fullyQualifiedNamespace = Objects.requireNonNull(fullyQualifiedNamespace, "'fullyQualifiedNamespace' cannot be null."); this.entityPath = Objects.requireNonNull(entityPath, "'entityPath' cannot be null."); this.entityType = Objects.requireNonNull(entityType, "'entityType' cannot be null."); this.receiverOptions = Objects.requireNonNull(receiverOptions, "'receiveOptions cannot be null.'"); this.connectionProcessor = Objects.requireNonNull(connectionProcessor, "'connectionProcessor' cannot be null."); this.tracerProvider = Objects.requireNonNull(tracerProvider, "'tracerProvider' cannot be null."); this.messageSerializer = Objects.requireNonNull(messageSerializer, "'messageSerializer' cannot be null."); this.onClientClose = Objects.requireNonNull(onClientClose, "'onClientClose' cannot be null."); this.unNamedSessionManager = new ServiceBusSessionManager(entityPath, entityType, connectionProcessor, tracerProvider, messageSerializer, receiverOptions); } /** * Acquires a session lock for the next available session and create a {@link ServiceBusReceiverAsyncClient} * to receive messages from the session. It will wait until a session is available if no one is available * immediately. * * {@codesnippet com.azure.messaging.servicebus.servicebusasyncreceiverclient.instantiation * * @return A {@link ServiceBusReceiverAsyncClient} that is tied to the available session. * @throws UnsupportedOperationException if the queue or topic subscription is not session-enabled. */ @ServiceMethod(returns = ReturnType.SINGLE) public Mono<ServiceBusReceiverAsyncClient> acceptNextSession() { return unNamedSessionManager.getActiveLink().flatMap(receiveLink -> receiveLink.getSessionId() .map(sessionId -> { final ReceiverOptions newReceiverOptions = new ReceiverOptions(receiverOptions.getReceiveMode(), receiverOptions.getPrefetchCount(), receiverOptions.getMaxLockRenewDuration(), receiverOptions.isAutoLockRenewEnabled(), sessionId, null); final ServiceBusSessionManager sessionSpecificManager = new ServiceBusSessionManager(entityPath, entityType, connectionProcessor, tracerProvider, messageSerializer, newReceiverOptions, receiveLink); return new ServiceBusReceiverAsyncClient(fullyQualifiedNamespace, entityPath, entityType, newReceiverOptions, connectionProcessor, ServiceBusConstants.OPERATION_TIMEOUT, tracerProvider, messageSerializer, () -> { }, sessionSpecificManager); })); } /** * Acquires a session lock for {@code sessionId} and create a {@link ServiceBusReceiverAsyncClient} * to receive messages from the session. If the session is already locked by another client, an * {@link com.azure.core.amqp.exception.AmqpException} is thrown. * * {@codesnippet com.azure.messaging.servicebus.servicebusasyncreceiverclient.instantiation * * @param sessionId The session Id. * @return A {@link ServiceBusReceiverAsyncClient} that is tied to the specified session. * @throws NullPointerException if {@code sessionId} is null. * @throws IllegalArgumentException if {@code sessionId} is empty. * @throws UnsupportedOperationException if the queue or topic subscription is not session-enabled. * @throws com.azure.core.amqp.exception.AmqpException if the lock cannot be acquired. */ @ServiceMethod(returns = ReturnType.SINGLE) @Override public void close() { this.onClientClose.run(); } }
Don't throw NPE here. Return `monoError(logger, new NPE())` instead.
public Mono<ServiceBusReceiverAsyncClient> acceptSession(String sessionId) { sessionId = Objects.requireNonNull(sessionId, "'sessionId' cannot be null"); if (CoreUtils.isNullOrEmpty(sessionId)) { return monoError(logger, new IllegalArgumentException("'sessionId' cannot be empty")); } final ReceiverOptions newReceiverOptions = new ReceiverOptions(receiverOptions.getReceiveMode(), receiverOptions.getPrefetchCount(), receiverOptions.getMaxLockRenewDuration(), receiverOptions.isAutoLockRenewEnabled(), sessionId, null); final ServiceBusSessionManager sessionSpecificManager = new ServiceBusSessionManager(entityPath, entityType, connectionProcessor, tracerProvider, messageSerializer, newReceiverOptions); return sessionSpecificManager.getActiveLink().map(receiveLink -> new ServiceBusReceiverAsyncClient( fullyQualifiedNamespace, entityPath, entityType, newReceiverOptions, connectionProcessor, ServiceBusConstants.OPERATION_TIMEOUT, tracerProvider, messageSerializer, () -> { }, sessionSpecificManager)); }
sessionId = Objects.requireNonNull(sessionId, "'sessionId' cannot be null");
public Mono<ServiceBusReceiverAsyncClient> acceptSession(String sessionId) { if (sessionId == null) { return monoError(logger, new NullPointerException("'sessionId' cannot be null")); } if (CoreUtils.isNullOrEmpty(sessionId)) { return monoError(logger, new IllegalArgumentException("'sessionId' cannot be empty")); } final ReceiverOptions newReceiverOptions = new ReceiverOptions(receiverOptions.getReceiveMode(), receiverOptions.getPrefetchCount(), receiverOptions.getMaxLockRenewDuration(), receiverOptions.isAutoLockRenewEnabled(), sessionId, null); final ServiceBusSessionManager sessionSpecificManager = new ServiceBusSessionManager(entityPath, entityType, connectionProcessor, tracerProvider, messageSerializer, newReceiverOptions); return sessionSpecificManager.getActiveLink().map(receiveLink -> new ServiceBusReceiverAsyncClient( fullyQualifiedNamespace, entityPath, entityType, newReceiverOptions, connectionProcessor, ServiceBusConstants.OPERATION_TIMEOUT, tracerProvider, messageSerializer, () -> { }, sessionSpecificManager)); }
class ServiceBusSessionReceiverAsyncClient implements AutoCloseable { private final String fullyQualifiedNamespace; private final String entityPath; private final MessagingEntityType entityType; private final ReceiverOptions receiverOptions; private final ServiceBusConnectionProcessor connectionProcessor; private final TracerProvider tracerProvider; private final MessageSerializer messageSerializer; private final Runnable onClientClose; private final ServiceBusSessionManager unNamedSessionManager; private final ClientLogger logger = new ClientLogger(ServiceBusSessionReceiverAsyncClient.class); ServiceBusSessionReceiverAsyncClient(String fullyQualifiedNamespace, String entityPath, MessagingEntityType entityType, ReceiverOptions receiverOptions, ServiceBusConnectionProcessor connectionProcessor, TracerProvider tracerProvider, MessageSerializer messageSerializer, Runnable onClientClose) { this.fullyQualifiedNamespace = Objects.requireNonNull(fullyQualifiedNamespace, "'fullyQualifiedNamespace' cannot be null."); this.entityPath = Objects.requireNonNull(entityPath, "'entityPath' cannot be null."); this.entityType = Objects.requireNonNull(entityType, "'entityType' cannot be null."); this.receiverOptions = Objects.requireNonNull(receiverOptions, "'receiveOptions cannot be null.'"); this.connectionProcessor = Objects.requireNonNull(connectionProcessor, "'connectionProcessor' cannot be null."); this.tracerProvider = Objects.requireNonNull(tracerProvider, "'tracerProvider' cannot be null."); this.messageSerializer = Objects.requireNonNull(messageSerializer, "'messageSerializer' cannot be null."); this.onClientClose = Objects.requireNonNull(onClientClose, "'onClientClose' cannot be null."); this.unNamedSessionManager = new ServiceBusSessionManager(entityPath, entityType, connectionProcessor, tracerProvider, messageSerializer, receiverOptions); } /** * Acquires a session lock for the next available session and create a {@link ServiceBusReceiverAsyncClient} * to receive messages from the session. * <p>Accept next available session</p> * {@codesnippet com.azure.messaging.servicebus.servicebusasyncreceiverclient.instantiation * </p> * @return A {@link ServiceBusReceiverAsyncClient} that is tied to the available session. * @throws UnsupportedOperationException if the queue or topic subscription is not session-enabled. */ public Mono<ServiceBusReceiverAsyncClient> acceptNextSession() { return unNamedSessionManager.getActiveLink().flatMap(receiveLink -> receiveLink.getSessionId() .map(sessionId -> { final ReceiverOptions newReceiverOptions = new ReceiverOptions(receiverOptions.getReceiveMode(), receiverOptions.getPrefetchCount(), receiverOptions.getMaxLockRenewDuration(), receiverOptions.isAutoLockRenewEnabled(), sessionId, null); final ServiceBusSessionManager sessionSpecificManager = new ServiceBusSessionManager(entityPath, entityType, connectionProcessor, tracerProvider, messageSerializer, newReceiverOptions, receiveLink); return new ServiceBusReceiverAsyncClient(fullyQualifiedNamespace, entityPath, entityType, newReceiverOptions, connectionProcessor, ServiceBusConstants.OPERATION_TIMEOUT, tracerProvider, messageSerializer, () -> { }, sessionSpecificManager); })); } /** * Acquires a session lock for {@code sessionId} and create a {@link ServiceBusReceiverAsyncClient} * to receive messages from the session. * <p> * {@codesnippet com.azure.messaging.servicebus.servicebusasyncreceiverclient.instantiation * </p> * @param sessionId The session Id. * @return A {@link ServiceBusReceiverAsyncClient} that is tied to the specified session. * @throws NullPointerException if {@code sessionId} is null. * @throws IllegalArgumentException if {@code sessionId} is empty. * @throws UnsupportedOperationException if the queue or topic subscription is not session-enabled. * @throws com.azure.core.amqp.exception.AmqpException if the session has been locked by another session receiver. */ @Override public void close() { this.onClientClose.run(); } }
class ServiceBusSessionReceiverAsyncClient implements AutoCloseable { private final String fullyQualifiedNamespace; private final String entityPath; private final MessagingEntityType entityType; private final ReceiverOptions receiverOptions; private final ServiceBusConnectionProcessor connectionProcessor; private final TracerProvider tracerProvider; private final MessageSerializer messageSerializer; private final Runnable onClientClose; private final ServiceBusSessionManager unNamedSessionManager; private final ClientLogger logger = new ClientLogger(ServiceBusSessionReceiverAsyncClient.class); ServiceBusSessionReceiverAsyncClient(String fullyQualifiedNamespace, String entityPath, MessagingEntityType entityType, ReceiverOptions receiverOptions, ServiceBusConnectionProcessor connectionProcessor, TracerProvider tracerProvider, MessageSerializer messageSerializer, Runnable onClientClose) { this.fullyQualifiedNamespace = Objects.requireNonNull(fullyQualifiedNamespace, "'fullyQualifiedNamespace' cannot be null."); this.entityPath = Objects.requireNonNull(entityPath, "'entityPath' cannot be null."); this.entityType = Objects.requireNonNull(entityType, "'entityType' cannot be null."); this.receiverOptions = Objects.requireNonNull(receiverOptions, "'receiveOptions cannot be null.'"); this.connectionProcessor = Objects.requireNonNull(connectionProcessor, "'connectionProcessor' cannot be null."); this.tracerProvider = Objects.requireNonNull(tracerProvider, "'tracerProvider' cannot be null."); this.messageSerializer = Objects.requireNonNull(messageSerializer, "'messageSerializer' cannot be null."); this.onClientClose = Objects.requireNonNull(onClientClose, "'onClientClose' cannot be null."); this.unNamedSessionManager = new ServiceBusSessionManager(entityPath, entityType, connectionProcessor, tracerProvider, messageSerializer, receiverOptions); } /** * Acquires a session lock for the next available session and create a {@link ServiceBusReceiverAsyncClient} * to receive messages from the session. It will wait until a session is available if no one is available * immediately. * * {@codesnippet com.azure.messaging.servicebus.servicebusasyncreceiverclient.instantiation * * @return A {@link ServiceBusReceiverAsyncClient} that is tied to the available session. * @throws UnsupportedOperationException if the queue or topic subscription is not session-enabled. */ @ServiceMethod(returns = ReturnType.SINGLE) public Mono<ServiceBusReceiverAsyncClient> acceptNextSession() { return unNamedSessionManager.getActiveLink().flatMap(receiveLink -> receiveLink.getSessionId() .map(sessionId -> { final ReceiverOptions newReceiverOptions = new ReceiverOptions(receiverOptions.getReceiveMode(), receiverOptions.getPrefetchCount(), receiverOptions.getMaxLockRenewDuration(), receiverOptions.isAutoLockRenewEnabled(), sessionId, null); final ServiceBusSessionManager sessionSpecificManager = new ServiceBusSessionManager(entityPath, entityType, connectionProcessor, tracerProvider, messageSerializer, newReceiverOptions, receiveLink); return new ServiceBusReceiverAsyncClient(fullyQualifiedNamespace, entityPath, entityType, newReceiverOptions, connectionProcessor, ServiceBusConstants.OPERATION_TIMEOUT, tracerProvider, messageSerializer, () -> { }, sessionSpecificManager); })); } /** * Acquires a session lock for {@code sessionId} and create a {@link ServiceBusReceiverAsyncClient} * to receive messages from the session. If the session is already locked by another client, an * {@link com.azure.core.amqp.exception.AmqpException} is thrown. * * {@codesnippet com.azure.messaging.servicebus.servicebusasyncreceiverclient.instantiation * * @param sessionId The session Id. * @return A {@link ServiceBusReceiverAsyncClient} that is tied to the specified session. * @throws NullPointerException if {@code sessionId} is null. * @throws IllegalArgumentException if {@code sessionId} is empty. * @throws UnsupportedOperationException if the queue or topic subscription is not session-enabled. * @throws com.azure.core.amqp.exception.AmqpException if the lock cannot be acquired. */ @ServiceMethod(returns = ReturnType.SINGLE) @Override public void close() { this.onClientClose.run(); } }
According to our discussion discussion offline, we give the user a Mono so they have full control.
public Mono<ServiceBusReceiverAsyncClient> acceptNextSession() { return unNamedSessionManager.getActiveLink().flatMap(receiveLink -> receiveLink.getSessionId() .map(sessionId -> { ReceiverOptions newReceiverOptions = new ReceiverOptions(receiverOptions.getReceiveMode(), receiverOptions.getPrefetchCount(), sessionId, null, receiverOptions.getMaxLockRenewDuration()); final ServiceBusSessionManager sessionSpecificManager = new ServiceBusSessionManager(entityPath, entityType, connectionProcessor, tracerProvider, messageSerializer, newReceiverOptions, receiveLink); return new ServiceBusReceiverAsyncClient(fullyQualifiedNamespace, entityPath, entityType, newReceiverOptions, connectionProcessor, ServiceBusConstants.OPERATION_TIMEOUT, tracerProvider, messageSerializer, onClientClose, sessionSpecificManager); })); }
return unNamedSessionManager.getActiveLink().flatMap(receiveLink -> receiveLink.getSessionId()
public Mono<ServiceBusReceiverAsyncClient> acceptNextSession() { return unNamedSessionManager.getActiveLink().flatMap(receiveLink -> receiveLink.getSessionId() .map(sessionId -> { final ReceiverOptions newReceiverOptions = new ReceiverOptions(receiverOptions.getReceiveMode(), receiverOptions.getPrefetchCount(), receiverOptions.getMaxLockRenewDuration(), receiverOptions.isAutoLockRenewEnabled(), sessionId, null); final ServiceBusSessionManager sessionSpecificManager = new ServiceBusSessionManager(entityPath, entityType, connectionProcessor, tracerProvider, messageSerializer, newReceiverOptions, receiveLink); return new ServiceBusReceiverAsyncClient(fullyQualifiedNamespace, entityPath, entityType, newReceiverOptions, connectionProcessor, ServiceBusConstants.OPERATION_TIMEOUT, tracerProvider, messageSerializer, () -> { }, sessionSpecificManager); })); }
class ServiceBusSessionReceiverAsyncClient implements AutoCloseable { private final String fullyQualifiedNamespace; private final String entityPath; private final MessagingEntityType entityType; private final ReceiverOptions receiverOptions; private final ServiceBusConnectionProcessor connectionProcessor; private final TracerProvider tracerProvider; private final MessageSerializer messageSerializer; private final Runnable onClientClose; private final ServiceBusSessionManager unNamedSessionManager; private final ClientLogger logger = new ClientLogger(ServiceBusSessionReceiverAsyncClient.class); ServiceBusSessionReceiverAsyncClient(String fullyQualifiedNamespace, String entityPath, MessagingEntityType entityType, ReceiverOptions receiverOptions, ServiceBusConnectionProcessor connectionProcessor, TracerProvider tracerProvider, MessageSerializer messageSerializer, Runnable onClientClose) { this.fullyQualifiedNamespace = Objects.requireNonNull(fullyQualifiedNamespace, "'fullyQualifiedNamespace' cannot be null."); this.entityPath = Objects.requireNonNull(entityPath, "'entityPath' cannot be null."); this.entityType = Objects.requireNonNull(entityType, "'entityType' cannot be null."); this.receiverOptions = Objects.requireNonNull(receiverOptions, "'receiveOptions cannot be null.'"); this.connectionProcessor = Objects.requireNonNull(connectionProcessor, "'connectionProcessor' cannot be null."); this.tracerProvider = Objects.requireNonNull(tracerProvider, "'tracerProvider' cannot be null."); this.messageSerializer = Objects.requireNonNull(messageSerializer, "'messageSerializer' cannot be null."); this.onClientClose = Objects.requireNonNull(onClientClose, "'onClientClose' cannot be null."); this.unNamedSessionManager = new ServiceBusSessionManager(entityPath, entityType, connectionProcessor, tracerProvider, messageSerializer, receiverOptions); } /** * Create a link for the next available session and use the link to create a {@link ServiceBusReceiverAsyncClient} * to receive messages from that session. * @return A {@link ServiceBusReceiverAsyncClient} that is tied to the available session. */ /** * Create a link for the "sessionId" and use the link to create a {@link ServiceBusReceiverAsyncClient} * to receive messages from the session. * @param sessionId The session Id. * @return A {@link ServiceBusReceiverAsyncClient} that is tied to the specified session. * @throws IllegalArgumentException if {@code sessionId} is null or empty. */ public Mono<ServiceBusReceiverAsyncClient> acceptSession(String sessionId) { if (CoreUtils.isNullOrEmpty(sessionId)) { return monoError(logger, new IllegalArgumentException("sessionId can not be null or empty")); } ReceiverOptions newReceiverOptions = new ReceiverOptions(receiverOptions.getReceiveMode(), receiverOptions.getPrefetchCount(), sessionId, null, receiverOptions.getMaxLockRenewDuration()); final ServiceBusSessionManager sessionSpecificManager = new ServiceBusSessionManager(entityPath, entityType, connectionProcessor, tracerProvider, messageSerializer, newReceiverOptions); return sessionSpecificManager.getActiveLink().thenReturn(new ServiceBusReceiverAsyncClient( fullyQualifiedNamespace, entityPath, entityType, newReceiverOptions, connectionProcessor, ServiceBusConstants.OPERATION_TIMEOUT, tracerProvider, messageSerializer, onClientClose, sessionSpecificManager)); } /** * Create a {@link ServiceBusReceiverAsyncClient} that processes at most {@code maxConcurrentSessions} sessions. * * @param maxConcurrentSessions Maximum number of concurrent sessions to process at any given time. * * @return The {@link ServiceBusReceiverAsyncClient} object that will be used to receive messages. * @throws IllegalArgumentException if {@code maxConcurrentSessions} is less than 1. */ public ServiceBusReceiverAsyncClient getReceiverClient(int maxConcurrentSessions) { if (maxConcurrentSessions < 1) { throw logger.logExceptionAsError( new IllegalArgumentException("Maximum number of concurrent sessions must be positive.")); } ReceiverOptions newReceiverOptions = new ReceiverOptions(receiverOptions.getReceiveMode(), receiverOptions.getPrefetchCount(), null, maxConcurrentSessions, receiverOptions.getMaxLockRenewDuration()); ServiceBusSessionManager newSessionManager = new ServiceBusSessionManager(entityPath, entityType, connectionProcessor, tracerProvider, messageSerializer, newReceiverOptions); return new ServiceBusReceiverAsyncClient(fullyQualifiedNamespace, entityPath, entityType, newReceiverOptions, connectionProcessor, ServiceBusConstants.OPERATION_TIMEOUT, tracerProvider, messageSerializer, onClientClose, newSessionManager); } @Override public void close() { this.onClientClose.run(); } }
class ServiceBusSessionReceiverAsyncClient implements AutoCloseable { private final String fullyQualifiedNamespace; private final String entityPath; private final MessagingEntityType entityType; private final ReceiverOptions receiverOptions; private final ServiceBusConnectionProcessor connectionProcessor; private final TracerProvider tracerProvider; private final MessageSerializer messageSerializer; private final Runnable onClientClose; private final ServiceBusSessionManager unNamedSessionManager; private final ClientLogger logger = new ClientLogger(ServiceBusSessionReceiverAsyncClient.class); ServiceBusSessionReceiverAsyncClient(String fullyQualifiedNamespace, String entityPath, MessagingEntityType entityType, ReceiverOptions receiverOptions, ServiceBusConnectionProcessor connectionProcessor, TracerProvider tracerProvider, MessageSerializer messageSerializer, Runnable onClientClose) { this.fullyQualifiedNamespace = Objects.requireNonNull(fullyQualifiedNamespace, "'fullyQualifiedNamespace' cannot be null."); this.entityPath = Objects.requireNonNull(entityPath, "'entityPath' cannot be null."); this.entityType = Objects.requireNonNull(entityType, "'entityType' cannot be null."); this.receiverOptions = Objects.requireNonNull(receiverOptions, "'receiveOptions cannot be null.'"); this.connectionProcessor = Objects.requireNonNull(connectionProcessor, "'connectionProcessor' cannot be null."); this.tracerProvider = Objects.requireNonNull(tracerProvider, "'tracerProvider' cannot be null."); this.messageSerializer = Objects.requireNonNull(messageSerializer, "'messageSerializer' cannot be null."); this.onClientClose = Objects.requireNonNull(onClientClose, "'onClientClose' cannot be null."); this.unNamedSessionManager = new ServiceBusSessionManager(entityPath, entityType, connectionProcessor, tracerProvider, messageSerializer, receiverOptions); } /** * Acquires a session lock for the next available session and create a {@link ServiceBusReceiverAsyncClient} * to receive messages from the session. It will wait until a session is available if no one is available * immediately. * * {@codesnippet com.azure.messaging.servicebus.servicebusasyncreceiverclient.instantiation * * @return A {@link ServiceBusReceiverAsyncClient} that is tied to the available session. * @throws UnsupportedOperationException if the queue or topic subscription is not session-enabled. */ @ServiceMethod(returns = ReturnType.SINGLE) /** * Acquires a session lock for {@code sessionId} and create a {@link ServiceBusReceiverAsyncClient} * to receive messages from the session. If the session is already locked by another client, an * {@link com.azure.core.amqp.exception.AmqpException} is thrown. * * {@codesnippet com.azure.messaging.servicebus.servicebusasyncreceiverclient.instantiation * * @param sessionId The session Id. * @return A {@link ServiceBusReceiverAsyncClient} that is tied to the specified session. * @throws NullPointerException if {@code sessionId} is null. * @throws IllegalArgumentException if {@code sessionId} is empty. * @throws UnsupportedOperationException if the queue or topic subscription is not session-enabled. * @throws com.azure.core.amqp.exception.AmqpException if the lock cannot be acquired. */ @ServiceMethod(returns = ReturnType.SINGLE) public Mono<ServiceBusReceiverAsyncClient> acceptSession(String sessionId) { if (sessionId == null) { return monoError(logger, new NullPointerException("'sessionId' cannot be null")); } if (CoreUtils.isNullOrEmpty(sessionId)) { return monoError(logger, new IllegalArgumentException("'sessionId' cannot be empty")); } final ReceiverOptions newReceiverOptions = new ReceiverOptions(receiverOptions.getReceiveMode(), receiverOptions.getPrefetchCount(), receiverOptions.getMaxLockRenewDuration(), receiverOptions.isAutoLockRenewEnabled(), sessionId, null); final ServiceBusSessionManager sessionSpecificManager = new ServiceBusSessionManager(entityPath, entityType, connectionProcessor, tracerProvider, messageSerializer, newReceiverOptions); return sessionSpecificManager.getActiveLink().map(receiveLink -> new ServiceBusReceiverAsyncClient( fullyQualifiedNamespace, entityPath, entityType, newReceiverOptions, connectionProcessor, ServiceBusConstants.OPERATION_TIMEOUT, tracerProvider, messageSerializer, () -> { }, sessionSpecificManager)); } @Override public void close() { this.onClientClose.run(); } }
Good catch. Fixed
public Mono<ServiceBusReceiverAsyncClient> acceptSession(String sessionId) { if (CoreUtils.isNullOrEmpty(sessionId)) { return monoError(logger, new IllegalArgumentException("sessionId can not be null or empty")); } ReceiverOptions newReceiverOptions = new ReceiverOptions(receiverOptions.getReceiveMode(), receiverOptions.getPrefetchCount(), sessionId, null, receiverOptions.getMaxLockRenewDuration()); final ServiceBusSessionManager sessionSpecificManager = new ServiceBusSessionManager(entityPath, entityType, connectionProcessor, tracerProvider, messageSerializer, newReceiverOptions); return sessionSpecificManager.getActiveLink().thenReturn(new ServiceBusReceiverAsyncClient( fullyQualifiedNamespace, entityPath, entityType, newReceiverOptions, connectionProcessor, ServiceBusConstants.OPERATION_TIMEOUT, tracerProvider, messageSerializer, onClientClose, sessionSpecificManager)); }
ReceiverOptions newReceiverOptions = new ReceiverOptions(receiverOptions.getReceiveMode(),
public Mono<ServiceBusReceiverAsyncClient> acceptSession(String sessionId) { if (sessionId == null) { return monoError(logger, new NullPointerException("'sessionId' cannot be null")); } if (CoreUtils.isNullOrEmpty(sessionId)) { return monoError(logger, new IllegalArgumentException("'sessionId' cannot be empty")); } final ReceiverOptions newReceiverOptions = new ReceiverOptions(receiverOptions.getReceiveMode(), receiverOptions.getPrefetchCount(), receiverOptions.getMaxLockRenewDuration(), receiverOptions.isAutoLockRenewEnabled(), sessionId, null); final ServiceBusSessionManager sessionSpecificManager = new ServiceBusSessionManager(entityPath, entityType, connectionProcessor, tracerProvider, messageSerializer, newReceiverOptions); return sessionSpecificManager.getActiveLink().map(receiveLink -> new ServiceBusReceiverAsyncClient( fullyQualifiedNamespace, entityPath, entityType, newReceiverOptions, connectionProcessor, ServiceBusConstants.OPERATION_TIMEOUT, tracerProvider, messageSerializer, () -> { }, sessionSpecificManager)); }
class ServiceBusSessionReceiverAsyncClient implements AutoCloseable { private final String fullyQualifiedNamespace; private final String entityPath; private final MessagingEntityType entityType; private final ReceiverOptions receiverOptions; private final ServiceBusConnectionProcessor connectionProcessor; private final TracerProvider tracerProvider; private final MessageSerializer messageSerializer; private final Runnable onClientClose; private final ServiceBusSessionManager unNamedSessionManager; private final ClientLogger logger = new ClientLogger(ServiceBusSessionReceiverAsyncClient.class); ServiceBusSessionReceiverAsyncClient(String fullyQualifiedNamespace, String entityPath, MessagingEntityType entityType, ReceiverOptions receiverOptions, ServiceBusConnectionProcessor connectionProcessor, TracerProvider tracerProvider, MessageSerializer messageSerializer, Runnable onClientClose) { this.fullyQualifiedNamespace = Objects.requireNonNull(fullyQualifiedNamespace, "'fullyQualifiedNamespace' cannot be null."); this.entityPath = Objects.requireNonNull(entityPath, "'entityPath' cannot be null."); this.entityType = Objects.requireNonNull(entityType, "'entityType' cannot be null."); this.receiverOptions = Objects.requireNonNull(receiverOptions, "'receiveOptions cannot be null.'"); this.connectionProcessor = Objects.requireNonNull(connectionProcessor, "'connectionProcessor' cannot be null."); this.tracerProvider = Objects.requireNonNull(tracerProvider, "'tracerProvider' cannot be null."); this.messageSerializer = Objects.requireNonNull(messageSerializer, "'messageSerializer' cannot be null."); this.onClientClose = Objects.requireNonNull(onClientClose, "'onClientClose' cannot be null."); this.unNamedSessionManager = new ServiceBusSessionManager(entityPath, entityType, connectionProcessor, tracerProvider, messageSerializer, receiverOptions); } /** * Create a link for the next available session and use the link to create a {@link ServiceBusReceiverAsyncClient} * to receive messages from that session. * @return A {@link ServiceBusReceiverAsyncClient} that is tied to the available session. */ public Mono<ServiceBusReceiverAsyncClient> acceptNextSession() { return unNamedSessionManager.getActiveLink().flatMap(receiveLink -> receiveLink.getSessionId() .map(sessionId -> { ReceiverOptions newReceiverOptions = new ReceiverOptions(receiverOptions.getReceiveMode(), receiverOptions.getPrefetchCount(), sessionId, null, receiverOptions.getMaxLockRenewDuration()); final ServiceBusSessionManager sessionSpecificManager = new ServiceBusSessionManager(entityPath, entityType, connectionProcessor, tracerProvider, messageSerializer, newReceiverOptions, receiveLink); return new ServiceBusReceiverAsyncClient(fullyQualifiedNamespace, entityPath, entityType, newReceiverOptions, connectionProcessor, ServiceBusConstants.OPERATION_TIMEOUT, tracerProvider, messageSerializer, onClientClose, sessionSpecificManager); })); } /** * Create a link for the "sessionId" and use the link to create a {@link ServiceBusReceiverAsyncClient} * to receive messages from the session. * @param sessionId The session Id. * @return A {@link ServiceBusReceiverAsyncClient} that is tied to the specified session. * @throws IllegalArgumentException if {@code sessionId} is null or empty. */ /** * Create a {@link ServiceBusReceiverAsyncClient} that processes at most {@code maxConcurrentSessions} sessions. * * @param maxConcurrentSessions Maximum number of concurrent sessions to process at any given time. * * @return The {@link ServiceBusReceiverAsyncClient} object that will be used to receive messages. * @throws IllegalArgumentException if {@code maxConcurrentSessions} is less than 1. */ public ServiceBusReceiverAsyncClient getReceiverClient(int maxConcurrentSessions) { if (maxConcurrentSessions < 1) { throw logger.logExceptionAsError( new IllegalArgumentException("Maximum number of concurrent sessions must be positive.")); } ReceiverOptions newReceiverOptions = new ReceiverOptions(receiverOptions.getReceiveMode(), receiverOptions.getPrefetchCount(), null, maxConcurrentSessions, receiverOptions.getMaxLockRenewDuration()); ServiceBusSessionManager newSessionManager = new ServiceBusSessionManager(entityPath, entityType, connectionProcessor, tracerProvider, messageSerializer, newReceiverOptions); return new ServiceBusReceiverAsyncClient(fullyQualifiedNamespace, entityPath, entityType, newReceiverOptions, connectionProcessor, ServiceBusConstants.OPERATION_TIMEOUT, tracerProvider, messageSerializer, onClientClose, newSessionManager); } @Override public void close() { this.onClientClose.run(); } }
class ServiceBusSessionReceiverAsyncClient implements AutoCloseable { private final String fullyQualifiedNamespace; private final String entityPath; private final MessagingEntityType entityType; private final ReceiverOptions receiverOptions; private final ServiceBusConnectionProcessor connectionProcessor; private final TracerProvider tracerProvider; private final MessageSerializer messageSerializer; private final Runnable onClientClose; private final ServiceBusSessionManager unNamedSessionManager; private final ClientLogger logger = new ClientLogger(ServiceBusSessionReceiverAsyncClient.class); ServiceBusSessionReceiverAsyncClient(String fullyQualifiedNamespace, String entityPath, MessagingEntityType entityType, ReceiverOptions receiverOptions, ServiceBusConnectionProcessor connectionProcessor, TracerProvider tracerProvider, MessageSerializer messageSerializer, Runnable onClientClose) { this.fullyQualifiedNamespace = Objects.requireNonNull(fullyQualifiedNamespace, "'fullyQualifiedNamespace' cannot be null."); this.entityPath = Objects.requireNonNull(entityPath, "'entityPath' cannot be null."); this.entityType = Objects.requireNonNull(entityType, "'entityType' cannot be null."); this.receiverOptions = Objects.requireNonNull(receiverOptions, "'receiveOptions cannot be null.'"); this.connectionProcessor = Objects.requireNonNull(connectionProcessor, "'connectionProcessor' cannot be null."); this.tracerProvider = Objects.requireNonNull(tracerProvider, "'tracerProvider' cannot be null."); this.messageSerializer = Objects.requireNonNull(messageSerializer, "'messageSerializer' cannot be null."); this.onClientClose = Objects.requireNonNull(onClientClose, "'onClientClose' cannot be null."); this.unNamedSessionManager = new ServiceBusSessionManager(entityPath, entityType, connectionProcessor, tracerProvider, messageSerializer, receiverOptions); } /** * Acquires a session lock for the next available session and create a {@link ServiceBusReceiverAsyncClient} * to receive messages from the session. It will wait until a session is available if no one is available * immediately. * * {@codesnippet com.azure.messaging.servicebus.servicebusasyncreceiverclient.instantiation * * @return A {@link ServiceBusReceiverAsyncClient} that is tied to the available session. * @throws UnsupportedOperationException if the queue or topic subscription is not session-enabled. */ @ServiceMethod(returns = ReturnType.SINGLE) public Mono<ServiceBusReceiverAsyncClient> acceptNextSession() { return unNamedSessionManager.getActiveLink().flatMap(receiveLink -> receiveLink.getSessionId() .map(sessionId -> { final ReceiverOptions newReceiverOptions = new ReceiverOptions(receiverOptions.getReceiveMode(), receiverOptions.getPrefetchCount(), receiverOptions.getMaxLockRenewDuration(), receiverOptions.isAutoLockRenewEnabled(), sessionId, null); final ServiceBusSessionManager sessionSpecificManager = new ServiceBusSessionManager(entityPath, entityType, connectionProcessor, tracerProvider, messageSerializer, newReceiverOptions, receiveLink); return new ServiceBusReceiverAsyncClient(fullyQualifiedNamespace, entityPath, entityType, newReceiverOptions, connectionProcessor, ServiceBusConstants.OPERATION_TIMEOUT, tracerProvider, messageSerializer, () -> { }, sessionSpecificManager); })); } /** * Acquires a session lock for {@code sessionId} and create a {@link ServiceBusReceiverAsyncClient} * to receive messages from the session. If the session is already locked by another client, an * {@link com.azure.core.amqp.exception.AmqpException} is thrown. * * {@codesnippet com.azure.messaging.servicebus.servicebusasyncreceiverclient.instantiation * * @param sessionId The session Id. * @return A {@link ServiceBusReceiverAsyncClient} that is tied to the specified session. * @throws NullPointerException if {@code sessionId} is null. * @throws IllegalArgumentException if {@code sessionId} is empty. * @throws UnsupportedOperationException if the queue or topic subscription is not session-enabled. * @throws com.azure.core.amqp.exception.AmqpException if the lock cannot be acquired. */ @ServiceMethod(returns = ReturnType.SINGLE) @Override public void close() { this.onClientClose.run(); } }
Updated
public Mono<ServiceBusReceiverAsyncClient> acceptSession(String sessionId) { if (CoreUtils.isNullOrEmpty(sessionId)) { return monoError(logger, new IllegalArgumentException("sessionId can not be null or empty")); } ReceiverOptions newReceiverOptions = new ReceiverOptions(receiverOptions.getReceiveMode(), receiverOptions.getPrefetchCount(), sessionId, null, receiverOptions.getMaxLockRenewDuration()); final ServiceBusSessionManager sessionSpecificManager = new ServiceBusSessionManager(entityPath, entityType, connectionProcessor, tracerProvider, messageSerializer, newReceiverOptions); return sessionSpecificManager.getActiveLink().thenReturn(new ServiceBusReceiverAsyncClient( fullyQualifiedNamespace, entityPath, entityType, newReceiverOptions, connectionProcessor, ServiceBusConstants.OPERATION_TIMEOUT, tracerProvider, messageSerializer, onClientClose, sessionSpecificManager)); }
return sessionSpecificManager.getActiveLink().thenReturn(new ServiceBusReceiverAsyncClient(
public Mono<ServiceBusReceiverAsyncClient> acceptSession(String sessionId) { if (sessionId == null) { return monoError(logger, new NullPointerException("'sessionId' cannot be null")); } if (CoreUtils.isNullOrEmpty(sessionId)) { return monoError(logger, new IllegalArgumentException("'sessionId' cannot be empty")); } final ReceiverOptions newReceiverOptions = new ReceiverOptions(receiverOptions.getReceiveMode(), receiverOptions.getPrefetchCount(), receiverOptions.getMaxLockRenewDuration(), receiverOptions.isAutoLockRenewEnabled(), sessionId, null); final ServiceBusSessionManager sessionSpecificManager = new ServiceBusSessionManager(entityPath, entityType, connectionProcessor, tracerProvider, messageSerializer, newReceiverOptions); return sessionSpecificManager.getActiveLink().map(receiveLink -> new ServiceBusReceiverAsyncClient( fullyQualifiedNamespace, entityPath, entityType, newReceiverOptions, connectionProcessor, ServiceBusConstants.OPERATION_TIMEOUT, tracerProvider, messageSerializer, () -> { }, sessionSpecificManager)); }
class ServiceBusSessionReceiverAsyncClient implements AutoCloseable { private final String fullyQualifiedNamespace; private final String entityPath; private final MessagingEntityType entityType; private final ReceiverOptions receiverOptions; private final ServiceBusConnectionProcessor connectionProcessor; private final TracerProvider tracerProvider; private final MessageSerializer messageSerializer; private final Runnable onClientClose; private final ServiceBusSessionManager unNamedSessionManager; private final ClientLogger logger = new ClientLogger(ServiceBusSessionReceiverAsyncClient.class); ServiceBusSessionReceiverAsyncClient(String fullyQualifiedNamespace, String entityPath, MessagingEntityType entityType, ReceiverOptions receiverOptions, ServiceBusConnectionProcessor connectionProcessor, TracerProvider tracerProvider, MessageSerializer messageSerializer, Runnable onClientClose) { this.fullyQualifiedNamespace = Objects.requireNonNull(fullyQualifiedNamespace, "'fullyQualifiedNamespace' cannot be null."); this.entityPath = Objects.requireNonNull(entityPath, "'entityPath' cannot be null."); this.entityType = Objects.requireNonNull(entityType, "'entityType' cannot be null."); this.receiverOptions = Objects.requireNonNull(receiverOptions, "'receiveOptions cannot be null.'"); this.connectionProcessor = Objects.requireNonNull(connectionProcessor, "'connectionProcessor' cannot be null."); this.tracerProvider = Objects.requireNonNull(tracerProvider, "'tracerProvider' cannot be null."); this.messageSerializer = Objects.requireNonNull(messageSerializer, "'messageSerializer' cannot be null."); this.onClientClose = Objects.requireNonNull(onClientClose, "'onClientClose' cannot be null."); this.unNamedSessionManager = new ServiceBusSessionManager(entityPath, entityType, connectionProcessor, tracerProvider, messageSerializer, receiverOptions); } /** * Create a link for the next available session and use the link to create a {@link ServiceBusReceiverAsyncClient} * to receive messages from that session. * @return A {@link ServiceBusReceiverAsyncClient} that is tied to the available session. */ public Mono<ServiceBusReceiverAsyncClient> acceptNextSession() { return unNamedSessionManager.getActiveLink().flatMap(receiveLink -> receiveLink.getSessionId() .map(sessionId -> { ReceiverOptions newReceiverOptions = new ReceiverOptions(receiverOptions.getReceiveMode(), receiverOptions.getPrefetchCount(), sessionId, null, receiverOptions.getMaxLockRenewDuration()); final ServiceBusSessionManager sessionSpecificManager = new ServiceBusSessionManager(entityPath, entityType, connectionProcessor, tracerProvider, messageSerializer, newReceiverOptions, receiveLink); return new ServiceBusReceiverAsyncClient(fullyQualifiedNamespace, entityPath, entityType, newReceiverOptions, connectionProcessor, ServiceBusConstants.OPERATION_TIMEOUT, tracerProvider, messageSerializer, onClientClose, sessionSpecificManager); })); } /** * Create a link for the "sessionId" and use the link to create a {@link ServiceBusReceiverAsyncClient} * to receive messages from the session. * @param sessionId The session Id. * @return A {@link ServiceBusReceiverAsyncClient} that is tied to the specified session. * @throws IllegalArgumentException if {@code sessionId} is null or empty. */ /** * Create a {@link ServiceBusReceiverAsyncClient} that processes at most {@code maxConcurrentSessions} sessions. * * @param maxConcurrentSessions Maximum number of concurrent sessions to process at any given time. * * @return The {@link ServiceBusReceiverAsyncClient} object that will be used to receive messages. * @throws IllegalArgumentException if {@code maxConcurrentSessions} is less than 1. */ public ServiceBusReceiverAsyncClient getReceiverClient(int maxConcurrentSessions) { if (maxConcurrentSessions < 1) { throw logger.logExceptionAsError( new IllegalArgumentException("Maximum number of concurrent sessions must be positive.")); } ReceiverOptions newReceiverOptions = new ReceiverOptions(receiverOptions.getReceiveMode(), receiverOptions.getPrefetchCount(), null, maxConcurrentSessions, receiverOptions.getMaxLockRenewDuration()); ServiceBusSessionManager newSessionManager = new ServiceBusSessionManager(entityPath, entityType, connectionProcessor, tracerProvider, messageSerializer, newReceiverOptions); return new ServiceBusReceiverAsyncClient(fullyQualifiedNamespace, entityPath, entityType, newReceiverOptions, connectionProcessor, ServiceBusConstants.OPERATION_TIMEOUT, tracerProvider, messageSerializer, onClientClose, newSessionManager); } @Override public void close() { this.onClientClose.run(); } }
class ServiceBusSessionReceiverAsyncClient implements AutoCloseable { private final String fullyQualifiedNamespace; private final String entityPath; private final MessagingEntityType entityType; private final ReceiverOptions receiverOptions; private final ServiceBusConnectionProcessor connectionProcessor; private final TracerProvider tracerProvider; private final MessageSerializer messageSerializer; private final Runnable onClientClose; private final ServiceBusSessionManager unNamedSessionManager; private final ClientLogger logger = new ClientLogger(ServiceBusSessionReceiverAsyncClient.class); ServiceBusSessionReceiverAsyncClient(String fullyQualifiedNamespace, String entityPath, MessagingEntityType entityType, ReceiverOptions receiverOptions, ServiceBusConnectionProcessor connectionProcessor, TracerProvider tracerProvider, MessageSerializer messageSerializer, Runnable onClientClose) { this.fullyQualifiedNamespace = Objects.requireNonNull(fullyQualifiedNamespace, "'fullyQualifiedNamespace' cannot be null."); this.entityPath = Objects.requireNonNull(entityPath, "'entityPath' cannot be null."); this.entityType = Objects.requireNonNull(entityType, "'entityType' cannot be null."); this.receiverOptions = Objects.requireNonNull(receiverOptions, "'receiveOptions cannot be null.'"); this.connectionProcessor = Objects.requireNonNull(connectionProcessor, "'connectionProcessor' cannot be null."); this.tracerProvider = Objects.requireNonNull(tracerProvider, "'tracerProvider' cannot be null."); this.messageSerializer = Objects.requireNonNull(messageSerializer, "'messageSerializer' cannot be null."); this.onClientClose = Objects.requireNonNull(onClientClose, "'onClientClose' cannot be null."); this.unNamedSessionManager = new ServiceBusSessionManager(entityPath, entityType, connectionProcessor, tracerProvider, messageSerializer, receiverOptions); } /** * Acquires a session lock for the next available session and create a {@link ServiceBusReceiverAsyncClient} * to receive messages from the session. It will wait until a session is available if no one is available * immediately. * * {@codesnippet com.azure.messaging.servicebus.servicebusasyncreceiverclient.instantiation * * @return A {@link ServiceBusReceiverAsyncClient} that is tied to the available session. * @throws UnsupportedOperationException if the queue or topic subscription is not session-enabled. */ @ServiceMethod(returns = ReturnType.SINGLE) public Mono<ServiceBusReceiverAsyncClient> acceptNextSession() { return unNamedSessionManager.getActiveLink().flatMap(receiveLink -> receiveLink.getSessionId() .map(sessionId -> { final ReceiverOptions newReceiverOptions = new ReceiverOptions(receiverOptions.getReceiveMode(), receiverOptions.getPrefetchCount(), receiverOptions.getMaxLockRenewDuration(), receiverOptions.isAutoLockRenewEnabled(), sessionId, null); final ServiceBusSessionManager sessionSpecificManager = new ServiceBusSessionManager(entityPath, entityType, connectionProcessor, tracerProvider, messageSerializer, newReceiverOptions, receiveLink); return new ServiceBusReceiverAsyncClient(fullyQualifiedNamespace, entityPath, entityType, newReceiverOptions, connectionProcessor, ServiceBusConstants.OPERATION_TIMEOUT, tracerProvider, messageSerializer, () -> { }, sessionSpecificManager); })); } /** * Acquires a session lock for {@code sessionId} and create a {@link ServiceBusReceiverAsyncClient} * to receive messages from the session. If the session is already locked by another client, an * {@link com.azure.core.amqp.exception.AmqpException} is thrown. * * {@codesnippet com.azure.messaging.servicebus.servicebusasyncreceiverclient.instantiation * * @param sessionId The session Id. * @return A {@link ServiceBusReceiverAsyncClient} that is tied to the specified session. * @throws NullPointerException if {@code sessionId} is null. * @throws IllegalArgumentException if {@code sessionId} is empty. * @throws UnsupportedOperationException if the queue or topic subscription is not session-enabled. * @throws com.azure.core.amqp.exception.AmqpException if the lock cannot be acquired. */ @ServiceMethod(returns = ReturnType.SINGLE) @Override public void close() { this.onClientClose.run(); } }
Fixed
public Mono<ServiceBusReceiverAsyncClient> acceptSession(String sessionId) { if (CoreUtils.isNullOrEmpty(sessionId)) { return monoError(logger, new IllegalArgumentException("sessionId can not be null or empty")); } ReceiverOptions newReceiverOptions = new ReceiverOptions(receiverOptions.getReceiveMode(), receiverOptions.getPrefetchCount(), sessionId, null, receiverOptions.getMaxLockRenewDuration()); final ServiceBusSessionManager sessionSpecificManager = new ServiceBusSessionManager(entityPath, entityType, connectionProcessor, tracerProvider, messageSerializer, newReceiverOptions); return sessionSpecificManager.getActiveLink().thenReturn(new ServiceBusReceiverAsyncClient( fullyQualifiedNamespace, entityPath, entityType, newReceiverOptions, connectionProcessor, ServiceBusConstants.OPERATION_TIMEOUT, tracerProvider, messageSerializer, onClientClose, sessionSpecificManager)); }
return monoError(logger, new IllegalArgumentException("sessionId can not be null or empty"));
public Mono<ServiceBusReceiverAsyncClient> acceptSession(String sessionId) { if (sessionId == null) { return monoError(logger, new NullPointerException("'sessionId' cannot be null")); } if (CoreUtils.isNullOrEmpty(sessionId)) { return monoError(logger, new IllegalArgumentException("'sessionId' cannot be empty")); } final ReceiverOptions newReceiverOptions = new ReceiverOptions(receiverOptions.getReceiveMode(), receiverOptions.getPrefetchCount(), receiverOptions.getMaxLockRenewDuration(), receiverOptions.isAutoLockRenewEnabled(), sessionId, null); final ServiceBusSessionManager sessionSpecificManager = new ServiceBusSessionManager(entityPath, entityType, connectionProcessor, tracerProvider, messageSerializer, newReceiverOptions); return sessionSpecificManager.getActiveLink().map(receiveLink -> new ServiceBusReceiverAsyncClient( fullyQualifiedNamespace, entityPath, entityType, newReceiverOptions, connectionProcessor, ServiceBusConstants.OPERATION_TIMEOUT, tracerProvider, messageSerializer, () -> { }, sessionSpecificManager)); }
class ServiceBusSessionReceiverAsyncClient implements AutoCloseable { private final String fullyQualifiedNamespace; private final String entityPath; private final MessagingEntityType entityType; private final ReceiverOptions receiverOptions; private final ServiceBusConnectionProcessor connectionProcessor; private final TracerProvider tracerProvider; private final MessageSerializer messageSerializer; private final Runnable onClientClose; private final ServiceBusSessionManager unNamedSessionManager; private final ClientLogger logger = new ClientLogger(ServiceBusSessionReceiverAsyncClient.class); ServiceBusSessionReceiverAsyncClient(String fullyQualifiedNamespace, String entityPath, MessagingEntityType entityType, ReceiverOptions receiverOptions, ServiceBusConnectionProcessor connectionProcessor, TracerProvider tracerProvider, MessageSerializer messageSerializer, Runnable onClientClose) { this.fullyQualifiedNamespace = Objects.requireNonNull(fullyQualifiedNamespace, "'fullyQualifiedNamespace' cannot be null."); this.entityPath = Objects.requireNonNull(entityPath, "'entityPath' cannot be null."); this.entityType = Objects.requireNonNull(entityType, "'entityType' cannot be null."); this.receiverOptions = Objects.requireNonNull(receiverOptions, "'receiveOptions cannot be null.'"); this.connectionProcessor = Objects.requireNonNull(connectionProcessor, "'connectionProcessor' cannot be null."); this.tracerProvider = Objects.requireNonNull(tracerProvider, "'tracerProvider' cannot be null."); this.messageSerializer = Objects.requireNonNull(messageSerializer, "'messageSerializer' cannot be null."); this.onClientClose = Objects.requireNonNull(onClientClose, "'onClientClose' cannot be null."); this.unNamedSessionManager = new ServiceBusSessionManager(entityPath, entityType, connectionProcessor, tracerProvider, messageSerializer, receiverOptions); } /** * Create a link for the next available session and use the link to create a {@link ServiceBusReceiverAsyncClient} * to receive messages from that session. * @return A {@link ServiceBusReceiverAsyncClient} that is tied to the available session. */ public Mono<ServiceBusReceiverAsyncClient> acceptNextSession() { return unNamedSessionManager.getActiveLink().flatMap(receiveLink -> receiveLink.getSessionId() .map(sessionId -> { ReceiverOptions newReceiverOptions = new ReceiverOptions(receiverOptions.getReceiveMode(), receiverOptions.getPrefetchCount(), sessionId, null, receiverOptions.getMaxLockRenewDuration()); final ServiceBusSessionManager sessionSpecificManager = new ServiceBusSessionManager(entityPath, entityType, connectionProcessor, tracerProvider, messageSerializer, newReceiverOptions, receiveLink); return new ServiceBusReceiverAsyncClient(fullyQualifiedNamespace, entityPath, entityType, newReceiverOptions, connectionProcessor, ServiceBusConstants.OPERATION_TIMEOUT, tracerProvider, messageSerializer, onClientClose, sessionSpecificManager); })); } /** * Create a link for the "sessionId" and use the link to create a {@link ServiceBusReceiverAsyncClient} * to receive messages from the session. * @param sessionId The session Id. * @return A {@link ServiceBusReceiverAsyncClient} that is tied to the specified session. * @throws IllegalArgumentException if {@code sessionId} is null or empty. */ /** * Create a {@link ServiceBusReceiverAsyncClient} that processes at most {@code maxConcurrentSessions} sessions. * * @param maxConcurrentSessions Maximum number of concurrent sessions to process at any given time. * * @return The {@link ServiceBusReceiverAsyncClient} object that will be used to receive messages. * @throws IllegalArgumentException if {@code maxConcurrentSessions} is less than 1. */ public ServiceBusReceiverAsyncClient getReceiverClient(int maxConcurrentSessions) { if (maxConcurrentSessions < 1) { throw logger.logExceptionAsError( new IllegalArgumentException("Maximum number of concurrent sessions must be positive.")); } ReceiverOptions newReceiverOptions = new ReceiverOptions(this.receiverOptions.getReceiveMode(), this.receiverOptions.getPrefetchCount(), null, maxConcurrentSessions, this.receiverOptions.getMaxLockRenewDuration()); return new ServiceBusReceiverAsyncClient(fullyQualifiedNamespace, entityPath, entityType, newReceiverOptions, connectionProcessor, ServiceBusConstants.OPERATION_TIMEOUT, tracerProvider, messageSerializer, onClientClose); } @Override public void close() { this.onClientClose.run(); } }
class ServiceBusSessionReceiverAsyncClient implements AutoCloseable { private final String fullyQualifiedNamespace; private final String entityPath; private final MessagingEntityType entityType; private final ReceiverOptions receiverOptions; private final ServiceBusConnectionProcessor connectionProcessor; private final TracerProvider tracerProvider; private final MessageSerializer messageSerializer; private final Runnable onClientClose; private final ServiceBusSessionManager unNamedSessionManager; private final ClientLogger logger = new ClientLogger(ServiceBusSessionReceiverAsyncClient.class); ServiceBusSessionReceiverAsyncClient(String fullyQualifiedNamespace, String entityPath, MessagingEntityType entityType, ReceiverOptions receiverOptions, ServiceBusConnectionProcessor connectionProcessor, TracerProvider tracerProvider, MessageSerializer messageSerializer, Runnable onClientClose) { this.fullyQualifiedNamespace = Objects.requireNonNull(fullyQualifiedNamespace, "'fullyQualifiedNamespace' cannot be null."); this.entityPath = Objects.requireNonNull(entityPath, "'entityPath' cannot be null."); this.entityType = Objects.requireNonNull(entityType, "'entityType' cannot be null."); this.receiverOptions = Objects.requireNonNull(receiverOptions, "'receiveOptions cannot be null.'"); this.connectionProcessor = Objects.requireNonNull(connectionProcessor, "'connectionProcessor' cannot be null."); this.tracerProvider = Objects.requireNonNull(tracerProvider, "'tracerProvider' cannot be null."); this.messageSerializer = Objects.requireNonNull(messageSerializer, "'messageSerializer' cannot be null."); this.onClientClose = Objects.requireNonNull(onClientClose, "'onClientClose' cannot be null."); this.unNamedSessionManager = new ServiceBusSessionManager(entityPath, entityType, connectionProcessor, tracerProvider, messageSerializer, receiverOptions); } /** * Acquires a session lock for the next available session and create a {@link ServiceBusReceiverAsyncClient} * to receive messages from the session. It will wait until a session is available if no one is available * immediately. * * {@codesnippet com.azure.messaging.servicebus.servicebusasyncreceiverclient.instantiation * * @return A {@link ServiceBusReceiverAsyncClient} that is tied to the available session. * @throws UnsupportedOperationException if the queue or topic subscription is not session-enabled. */ @ServiceMethod(returns = ReturnType.SINGLE) public Mono<ServiceBusReceiverAsyncClient> acceptNextSession() { return unNamedSessionManager.getActiveLink().flatMap(receiveLink -> receiveLink.getSessionId() .map(sessionId -> { final ReceiverOptions newReceiverOptions = new ReceiverOptions(receiverOptions.getReceiveMode(), receiverOptions.getPrefetchCount(), receiverOptions.getMaxLockRenewDuration(), receiverOptions.isAutoLockRenewEnabled(), sessionId, null); final ServiceBusSessionManager sessionSpecificManager = new ServiceBusSessionManager(entityPath, entityType, connectionProcessor, tracerProvider, messageSerializer, newReceiverOptions, receiveLink); return new ServiceBusReceiverAsyncClient(fullyQualifiedNamespace, entityPath, entityType, newReceiverOptions, connectionProcessor, ServiceBusConstants.OPERATION_TIMEOUT, tracerProvider, messageSerializer, () -> { }, sessionSpecificManager); })); } /** * Acquires a session lock for {@code sessionId} and create a {@link ServiceBusReceiverAsyncClient} * to receive messages from the session. If the session is already locked by another client, an * {@link com.azure.core.amqp.exception.AmqpException} is thrown. * * {@codesnippet com.azure.messaging.servicebus.servicebusasyncreceiverclient.instantiation * * @param sessionId The session Id. * @return A {@link ServiceBusReceiverAsyncClient} that is tied to the specified session. * @throws NullPointerException if {@code sessionId} is null. * @throws IllegalArgumentException if {@code sessionId} is empty. * @throws UnsupportedOperationException if the queue or topic subscription is not session-enabled. * @throws com.azure.core.amqp.exception.AmqpException if the lock cannot be acquired. */ @ServiceMethod(returns = ReturnType.SINGLE) @Override public void close() { this.onClientClose.run(); } }
Updated to throw separate errors.
public Mono<ServiceBusReceiverAsyncClient> acceptSession(String sessionId) { if (CoreUtils.isNullOrEmpty(sessionId)) { return monoError(logger, new IllegalArgumentException("sessionId can not be null or empty")); } ReceiverOptions newReceiverOptions = new ReceiverOptions(receiverOptions.getReceiveMode(), receiverOptions.getPrefetchCount(), sessionId, null, receiverOptions.getMaxLockRenewDuration()); final ServiceBusSessionManager sessionSpecificManager = new ServiceBusSessionManager(entityPath, entityType, connectionProcessor, tracerProvider, messageSerializer, newReceiverOptions); return sessionSpecificManager.getActiveLink().thenReturn(new ServiceBusReceiverAsyncClient( fullyQualifiedNamespace, entityPath, entityType, newReceiverOptions, connectionProcessor, ServiceBusConstants.OPERATION_TIMEOUT, tracerProvider, messageSerializer, onClientClose, sessionSpecificManager)); }
if (CoreUtils.isNullOrEmpty(sessionId)) {
public Mono<ServiceBusReceiverAsyncClient> acceptSession(String sessionId) { if (sessionId == null) { return monoError(logger, new NullPointerException("'sessionId' cannot be null")); } if (CoreUtils.isNullOrEmpty(sessionId)) { return monoError(logger, new IllegalArgumentException("'sessionId' cannot be empty")); } final ReceiverOptions newReceiverOptions = new ReceiverOptions(receiverOptions.getReceiveMode(), receiverOptions.getPrefetchCount(), receiverOptions.getMaxLockRenewDuration(), receiverOptions.isAutoLockRenewEnabled(), sessionId, null); final ServiceBusSessionManager sessionSpecificManager = new ServiceBusSessionManager(entityPath, entityType, connectionProcessor, tracerProvider, messageSerializer, newReceiverOptions); return sessionSpecificManager.getActiveLink().map(receiveLink -> new ServiceBusReceiverAsyncClient( fullyQualifiedNamespace, entityPath, entityType, newReceiverOptions, connectionProcessor, ServiceBusConstants.OPERATION_TIMEOUT, tracerProvider, messageSerializer, () -> { }, sessionSpecificManager)); }
class ServiceBusSessionReceiverAsyncClient implements AutoCloseable { private final String fullyQualifiedNamespace; private final String entityPath; private final MessagingEntityType entityType; private final ReceiverOptions receiverOptions; private final ServiceBusConnectionProcessor connectionProcessor; private final TracerProvider tracerProvider; private final MessageSerializer messageSerializer; private final Runnable onClientClose; private final ServiceBusSessionManager unNamedSessionManager; private final ClientLogger logger = new ClientLogger(ServiceBusSessionReceiverAsyncClient.class); ServiceBusSessionReceiverAsyncClient(String fullyQualifiedNamespace, String entityPath, MessagingEntityType entityType, ReceiverOptions receiverOptions, ServiceBusConnectionProcessor connectionProcessor, TracerProvider tracerProvider, MessageSerializer messageSerializer, Runnable onClientClose) { this.fullyQualifiedNamespace = Objects.requireNonNull(fullyQualifiedNamespace, "'fullyQualifiedNamespace' cannot be null."); this.entityPath = Objects.requireNonNull(entityPath, "'entityPath' cannot be null."); this.entityType = Objects.requireNonNull(entityType, "'entityType' cannot be null."); this.receiverOptions = Objects.requireNonNull(receiverOptions, "'receiveOptions cannot be null.'"); this.connectionProcessor = Objects.requireNonNull(connectionProcessor, "'connectionProcessor' cannot be null."); this.tracerProvider = Objects.requireNonNull(tracerProvider, "'tracerProvider' cannot be null."); this.messageSerializer = Objects.requireNonNull(messageSerializer, "'messageSerializer' cannot be null."); this.onClientClose = Objects.requireNonNull(onClientClose, "'onClientClose' cannot be null."); this.unNamedSessionManager = new ServiceBusSessionManager(entityPath, entityType, connectionProcessor, tracerProvider, messageSerializer, receiverOptions); } /** * Create a link for the next available session and use the link to create a {@link ServiceBusReceiverAsyncClient} * to receive messages from that session. * @return A {@link ServiceBusReceiverAsyncClient} that is tied to the available session. */ public Mono<ServiceBusReceiverAsyncClient> acceptNextSession() { return unNamedSessionManager.getActiveLink().flatMap(receiveLink -> receiveLink.getSessionId() .map(sessionId -> { ReceiverOptions newReceiverOptions = new ReceiverOptions(receiverOptions.getReceiveMode(), receiverOptions.getPrefetchCount(), sessionId, null, receiverOptions.getMaxLockRenewDuration()); final ServiceBusSessionManager sessionSpecificManager = new ServiceBusSessionManager(entityPath, entityType, connectionProcessor, tracerProvider, messageSerializer, newReceiverOptions, receiveLink); return new ServiceBusReceiverAsyncClient(fullyQualifiedNamespace, entityPath, entityType, newReceiverOptions, connectionProcessor, ServiceBusConstants.OPERATION_TIMEOUT, tracerProvider, messageSerializer, onClientClose, sessionSpecificManager); })); } /** * Create a link for the "sessionId" and use the link to create a {@link ServiceBusReceiverAsyncClient} * to receive messages from the session. * @param sessionId The session Id. * @return A {@link ServiceBusReceiverAsyncClient} that is tied to the specified session. * @throws IllegalArgumentException if {@code sessionId} is null or empty. */ /** * Create a {@link ServiceBusReceiverAsyncClient} that processes at most {@code maxConcurrentSessions} sessions. * * @param maxConcurrentSessions Maximum number of concurrent sessions to process at any given time. * * @return The {@link ServiceBusReceiverAsyncClient} object that will be used to receive messages. * @throws IllegalArgumentException if {@code maxConcurrentSessions} is less than 1. */ public ServiceBusReceiverAsyncClient getReceiverClient(int maxConcurrentSessions) { if (maxConcurrentSessions < 1) { throw logger.logExceptionAsError( new IllegalArgumentException("Maximum number of concurrent sessions must be positive.")); } ReceiverOptions newReceiverOptions = new ReceiverOptions(this.receiverOptions.getReceiveMode(), this.receiverOptions.getPrefetchCount(), null, maxConcurrentSessions, this.receiverOptions.getMaxLockRenewDuration()); return new ServiceBusReceiverAsyncClient(fullyQualifiedNamespace, entityPath, entityType, newReceiverOptions, connectionProcessor, ServiceBusConstants.OPERATION_TIMEOUT, tracerProvider, messageSerializer, onClientClose); } @Override public void close() { this.onClientClose.run(); } }
class ServiceBusSessionReceiverAsyncClient implements AutoCloseable { private final String fullyQualifiedNamespace; private final String entityPath; private final MessagingEntityType entityType; private final ReceiverOptions receiverOptions; private final ServiceBusConnectionProcessor connectionProcessor; private final TracerProvider tracerProvider; private final MessageSerializer messageSerializer; private final Runnable onClientClose; private final ServiceBusSessionManager unNamedSessionManager; private final ClientLogger logger = new ClientLogger(ServiceBusSessionReceiverAsyncClient.class); ServiceBusSessionReceiverAsyncClient(String fullyQualifiedNamespace, String entityPath, MessagingEntityType entityType, ReceiverOptions receiverOptions, ServiceBusConnectionProcessor connectionProcessor, TracerProvider tracerProvider, MessageSerializer messageSerializer, Runnable onClientClose) { this.fullyQualifiedNamespace = Objects.requireNonNull(fullyQualifiedNamespace, "'fullyQualifiedNamespace' cannot be null."); this.entityPath = Objects.requireNonNull(entityPath, "'entityPath' cannot be null."); this.entityType = Objects.requireNonNull(entityType, "'entityType' cannot be null."); this.receiverOptions = Objects.requireNonNull(receiverOptions, "'receiveOptions cannot be null.'"); this.connectionProcessor = Objects.requireNonNull(connectionProcessor, "'connectionProcessor' cannot be null."); this.tracerProvider = Objects.requireNonNull(tracerProvider, "'tracerProvider' cannot be null."); this.messageSerializer = Objects.requireNonNull(messageSerializer, "'messageSerializer' cannot be null."); this.onClientClose = Objects.requireNonNull(onClientClose, "'onClientClose' cannot be null."); this.unNamedSessionManager = new ServiceBusSessionManager(entityPath, entityType, connectionProcessor, tracerProvider, messageSerializer, receiverOptions); } /** * Acquires a session lock for the next available session and create a {@link ServiceBusReceiverAsyncClient} * to receive messages from the session. It will wait until a session is available if no one is available * immediately. * * {@codesnippet com.azure.messaging.servicebus.servicebusasyncreceiverclient.instantiation * * @return A {@link ServiceBusReceiverAsyncClient} that is tied to the available session. * @throws UnsupportedOperationException if the queue or topic subscription is not session-enabled. */ @ServiceMethod(returns = ReturnType.SINGLE) public Mono<ServiceBusReceiverAsyncClient> acceptNextSession() { return unNamedSessionManager.getActiveLink().flatMap(receiveLink -> receiveLink.getSessionId() .map(sessionId -> { final ReceiverOptions newReceiverOptions = new ReceiverOptions(receiverOptions.getReceiveMode(), receiverOptions.getPrefetchCount(), receiverOptions.getMaxLockRenewDuration(), receiverOptions.isAutoLockRenewEnabled(), sessionId, null); final ServiceBusSessionManager sessionSpecificManager = new ServiceBusSessionManager(entityPath, entityType, connectionProcessor, tracerProvider, messageSerializer, newReceiverOptions, receiveLink); return new ServiceBusReceiverAsyncClient(fullyQualifiedNamespace, entityPath, entityType, newReceiverOptions, connectionProcessor, ServiceBusConstants.OPERATION_TIMEOUT, tracerProvider, messageSerializer, () -> { }, sessionSpecificManager); })); } /** * Acquires a session lock for {@code sessionId} and create a {@link ServiceBusReceiverAsyncClient} * to receive messages from the session. If the session is already locked by another client, an * {@link com.azure.core.amqp.exception.AmqpException} is thrown. * * {@codesnippet com.azure.messaging.servicebus.servicebusasyncreceiverclient.instantiation * * @param sessionId The session Id. * @return A {@link ServiceBusReceiverAsyncClient} that is tied to the specified session. * @throws NullPointerException if {@code sessionId} is null. * @throws IllegalArgumentException if {@code sessionId} is empty. * @throws UnsupportedOperationException if the queue or topic subscription is not session-enabled. * @throws com.azure.core.amqp.exception.AmqpException if the lock cannot be acquired. */ @ServiceMethod(returns = ReturnType.SINGLE) @Override public void close() { this.onClientClose.run(); } }
Updated.
public Mono<ServiceBusReceiverAsyncClient> acceptNextSession() { return unNamedSessionManager.getActiveLink().flatMap(receiveLink -> receiveLink.getSessionId() .map(sessionId -> { ReceiverOptions newReceiverOptions = new ReceiverOptions(receiverOptions.getReceiveMode(), receiverOptions.getPrefetchCount(), sessionId, null, receiverOptions.getMaxLockRenewDuration()); final ServiceBusSessionManager sessionSpecificManager = new ServiceBusSessionManager(entityPath, entityType, connectionProcessor, tracerProvider, messageSerializer, newReceiverOptions, receiveLink); return new ServiceBusReceiverAsyncClient(fullyQualifiedNamespace, entityPath, entityType, newReceiverOptions, connectionProcessor, ServiceBusConstants.OPERATION_TIMEOUT, tracerProvider, messageSerializer, onClientClose, sessionSpecificManager); })); }
ReceiverOptions newReceiverOptions = new ReceiverOptions(receiverOptions.getReceiveMode(),
public Mono<ServiceBusReceiverAsyncClient> acceptNextSession() { return unNamedSessionManager.getActiveLink().flatMap(receiveLink -> receiveLink.getSessionId() .map(sessionId -> { final ReceiverOptions newReceiverOptions = new ReceiverOptions(receiverOptions.getReceiveMode(), receiverOptions.getPrefetchCount(), receiverOptions.getMaxLockRenewDuration(), receiverOptions.isAutoLockRenewEnabled(), sessionId, null); final ServiceBusSessionManager sessionSpecificManager = new ServiceBusSessionManager(entityPath, entityType, connectionProcessor, tracerProvider, messageSerializer, newReceiverOptions, receiveLink); return new ServiceBusReceiverAsyncClient(fullyQualifiedNamespace, entityPath, entityType, newReceiverOptions, connectionProcessor, ServiceBusConstants.OPERATION_TIMEOUT, tracerProvider, messageSerializer, () -> { }, sessionSpecificManager); })); }
class ServiceBusSessionReceiverAsyncClient implements AutoCloseable { private final String fullyQualifiedNamespace; private final String entityPath; private final MessagingEntityType entityType; private final ReceiverOptions receiverOptions; private final ServiceBusConnectionProcessor connectionProcessor; private final TracerProvider tracerProvider; private final MessageSerializer messageSerializer; private final Runnable onClientClose; private final ServiceBusSessionManager unNamedSessionManager; private final ClientLogger logger = new ClientLogger(ServiceBusSessionReceiverAsyncClient.class); ServiceBusSessionReceiverAsyncClient(String fullyQualifiedNamespace, String entityPath, MessagingEntityType entityType, ReceiverOptions receiverOptions, ServiceBusConnectionProcessor connectionProcessor, TracerProvider tracerProvider, MessageSerializer messageSerializer, Runnable onClientClose) { this.fullyQualifiedNamespace = Objects.requireNonNull(fullyQualifiedNamespace, "'fullyQualifiedNamespace' cannot be null."); this.entityPath = Objects.requireNonNull(entityPath, "'entityPath' cannot be null."); this.entityType = Objects.requireNonNull(entityType, "'entityType' cannot be null."); this.receiverOptions = Objects.requireNonNull(receiverOptions, "'receiveOptions cannot be null.'"); this.connectionProcessor = Objects.requireNonNull(connectionProcessor, "'connectionProcessor' cannot be null."); this.tracerProvider = Objects.requireNonNull(tracerProvider, "'tracerProvider' cannot be null."); this.messageSerializer = Objects.requireNonNull(messageSerializer, "'messageSerializer' cannot be null."); this.onClientClose = Objects.requireNonNull(onClientClose, "'onClientClose' cannot be null."); this.unNamedSessionManager = new ServiceBusSessionManager(entityPath, entityType, connectionProcessor, tracerProvider, messageSerializer, receiverOptions); } /** * Create a link for the next available session and use the link to create a {@link ServiceBusReceiverAsyncClient} * to receive messages from that session. * @return A {@link ServiceBusReceiverAsyncClient} that is tied to the available session. */ /** * Create a link for the "sessionId" and use the link to create a {@link ServiceBusReceiverAsyncClient} * to receive messages from the session. * @param sessionId The session Id. * @return A {@link ServiceBusReceiverAsyncClient} that is tied to the specified session. * @throws IllegalArgumentException if {@code sessionId} is null or empty. */ public Mono<ServiceBusReceiverAsyncClient> acceptSession(String sessionId) { if (CoreUtils.isNullOrEmpty(sessionId)) { return monoError(logger, new IllegalArgumentException("sessionId can not be null or empty")); } ReceiverOptions newReceiverOptions = new ReceiverOptions(receiverOptions.getReceiveMode(), receiverOptions.getPrefetchCount(), sessionId, null, receiverOptions.getMaxLockRenewDuration()); final ServiceBusSessionManager sessionSpecificManager = new ServiceBusSessionManager(entityPath, entityType, connectionProcessor, tracerProvider, messageSerializer, newReceiverOptions); return sessionSpecificManager.getActiveLink().thenReturn(new ServiceBusReceiverAsyncClient( fullyQualifiedNamespace, entityPath, entityType, newReceiverOptions, connectionProcessor, ServiceBusConstants.OPERATION_TIMEOUT, tracerProvider, messageSerializer, onClientClose, sessionSpecificManager)); } /** * Create a {@link ServiceBusReceiverAsyncClient} that processes at most {@code maxConcurrentSessions} sessions. * * @param maxConcurrentSessions Maximum number of concurrent sessions to process at any given time. * * @return The {@link ServiceBusReceiverAsyncClient} object that will be used to receive messages. * @throws IllegalArgumentException if {@code maxConcurrentSessions} is less than 1. */ public ServiceBusReceiverAsyncClient getReceiverClient(int maxConcurrentSessions) { if (maxConcurrentSessions < 1) { throw logger.logExceptionAsError( new IllegalArgumentException("Maximum number of concurrent sessions must be positive.")); } ReceiverOptions newReceiverOptions = new ReceiverOptions(receiverOptions.getReceiveMode(), receiverOptions.getPrefetchCount(), null, maxConcurrentSessions, receiverOptions.getMaxLockRenewDuration()); ServiceBusSessionManager newSessionManager = new ServiceBusSessionManager(entityPath, entityType, connectionProcessor, tracerProvider, messageSerializer, newReceiverOptions); return new ServiceBusReceiverAsyncClient(fullyQualifiedNamespace, entityPath, entityType, newReceiverOptions, connectionProcessor, ServiceBusConstants.OPERATION_TIMEOUT, tracerProvider, messageSerializer, onClientClose, newSessionManager); } @Override public void close() { this.onClientClose.run(); } }
class ServiceBusSessionReceiverAsyncClient implements AutoCloseable { private final String fullyQualifiedNamespace; private final String entityPath; private final MessagingEntityType entityType; private final ReceiverOptions receiverOptions; private final ServiceBusConnectionProcessor connectionProcessor; private final TracerProvider tracerProvider; private final MessageSerializer messageSerializer; private final Runnable onClientClose; private final ServiceBusSessionManager unNamedSessionManager; private final ClientLogger logger = new ClientLogger(ServiceBusSessionReceiverAsyncClient.class); ServiceBusSessionReceiverAsyncClient(String fullyQualifiedNamespace, String entityPath, MessagingEntityType entityType, ReceiverOptions receiverOptions, ServiceBusConnectionProcessor connectionProcessor, TracerProvider tracerProvider, MessageSerializer messageSerializer, Runnable onClientClose) { this.fullyQualifiedNamespace = Objects.requireNonNull(fullyQualifiedNamespace, "'fullyQualifiedNamespace' cannot be null."); this.entityPath = Objects.requireNonNull(entityPath, "'entityPath' cannot be null."); this.entityType = Objects.requireNonNull(entityType, "'entityType' cannot be null."); this.receiverOptions = Objects.requireNonNull(receiverOptions, "'receiveOptions cannot be null.'"); this.connectionProcessor = Objects.requireNonNull(connectionProcessor, "'connectionProcessor' cannot be null."); this.tracerProvider = Objects.requireNonNull(tracerProvider, "'tracerProvider' cannot be null."); this.messageSerializer = Objects.requireNonNull(messageSerializer, "'messageSerializer' cannot be null."); this.onClientClose = Objects.requireNonNull(onClientClose, "'onClientClose' cannot be null."); this.unNamedSessionManager = new ServiceBusSessionManager(entityPath, entityType, connectionProcessor, tracerProvider, messageSerializer, receiverOptions); } /** * Acquires a session lock for the next available session and create a {@link ServiceBusReceiverAsyncClient} * to receive messages from the session. It will wait until a session is available if no one is available * immediately. * * {@codesnippet com.azure.messaging.servicebus.servicebusasyncreceiverclient.instantiation * * @return A {@link ServiceBusReceiverAsyncClient} that is tied to the available session. * @throws UnsupportedOperationException if the queue or topic subscription is not session-enabled. */ @ServiceMethod(returns = ReturnType.SINGLE) /** * Acquires a session lock for {@code sessionId} and create a {@link ServiceBusReceiverAsyncClient} * to receive messages from the session. If the session is already locked by another client, an * {@link com.azure.core.amqp.exception.AmqpException} is thrown. * * {@codesnippet com.azure.messaging.servicebus.servicebusasyncreceiverclient.instantiation * * @param sessionId The session Id. * @return A {@link ServiceBusReceiverAsyncClient} that is tied to the specified session. * @throws NullPointerException if {@code sessionId} is null. * @throws IllegalArgumentException if {@code sessionId} is empty. * @throws UnsupportedOperationException if the queue or topic subscription is not session-enabled. * @throws com.azure.core.amqp.exception.AmqpException if the lock cannot be acquired. */ @ServiceMethod(returns = ReturnType.SINGLE) public Mono<ServiceBusReceiverAsyncClient> acceptSession(String sessionId) { if (sessionId == null) { return monoError(logger, new NullPointerException("'sessionId' cannot be null")); } if (CoreUtils.isNullOrEmpty(sessionId)) { return monoError(logger, new IllegalArgumentException("'sessionId' cannot be empty")); } final ReceiverOptions newReceiverOptions = new ReceiverOptions(receiverOptions.getReceiveMode(), receiverOptions.getPrefetchCount(), receiverOptions.getMaxLockRenewDuration(), receiverOptions.isAutoLockRenewEnabled(), sessionId, null); final ServiceBusSessionManager sessionSpecificManager = new ServiceBusSessionManager(entityPath, entityType, connectionProcessor, tracerProvider, messageSerializer, newReceiverOptions); return sessionSpecificManager.getActiveLink().map(receiveLink -> new ServiceBusReceiverAsyncClient( fullyQualifiedNamespace, entityPath, entityType, newReceiverOptions, connectionProcessor, ServiceBusConstants.OPERATION_TIMEOUT, tracerProvider, messageSerializer, () -> { }, sessionSpecificManager)); } @Override public void close() { this.onClientClose.run(); } }
This calls the ServiceBusClientBuilder's close() method.
public void close() { this.onClientClose.run(); }
this.onClientClose.run();
public void close() { this.onClientClose.run(); }
class ServiceBusSessionReceiverAsyncClient implements AutoCloseable { private final String fullyQualifiedNamespace; private final String entityPath; private final MessagingEntityType entityType; private final ReceiverOptions receiverOptions; private final ServiceBusConnectionProcessor connectionProcessor; private final TracerProvider tracerProvider; private final MessageSerializer messageSerializer; private final Runnable onClientClose; private final ServiceBusSessionManager unNamedSessionManager; private final ClientLogger logger = new ClientLogger(ServiceBusSessionReceiverAsyncClient.class); ServiceBusSessionReceiverAsyncClient(String fullyQualifiedNamespace, String entityPath, MessagingEntityType entityType, ReceiverOptions receiverOptions, ServiceBusConnectionProcessor connectionProcessor, TracerProvider tracerProvider, MessageSerializer messageSerializer, Runnable onClientClose) { this.fullyQualifiedNamespace = Objects.requireNonNull(fullyQualifiedNamespace, "'fullyQualifiedNamespace' cannot be null."); this.entityPath = Objects.requireNonNull(entityPath, "'entityPath' cannot be null."); this.entityType = Objects.requireNonNull(entityType, "'entityType' cannot be null."); this.receiverOptions = Objects.requireNonNull(receiverOptions, "'receiveOptions cannot be null.'"); this.connectionProcessor = Objects.requireNonNull(connectionProcessor, "'connectionProcessor' cannot be null."); this.tracerProvider = Objects.requireNonNull(tracerProvider, "'tracerProvider' cannot be null."); this.messageSerializer = Objects.requireNonNull(messageSerializer, "'messageSerializer' cannot be null."); this.onClientClose = Objects.requireNonNull(onClientClose, "'onClientClose' cannot be null."); this.unNamedSessionManager = new ServiceBusSessionManager(entityPath, entityType, connectionProcessor, tracerProvider, messageSerializer, receiverOptions); } /** * Create a link for the next available session and use the link to create a {@link ServiceBusReceiverAsyncClient} * to receive messages from that session. * @return A {@link ServiceBusReceiverAsyncClient} that is tied to the available session. */ public Mono<ServiceBusReceiverAsyncClient> acceptNextSession() { return unNamedSessionManager.getActiveLink().flatMap(receiveLink -> receiveLink.getSessionId() .map(sessionId -> { ReceiverOptions newReceiverOptions = new ReceiverOptions(receiverOptions.getReceiveMode(), receiverOptions.getPrefetchCount(), sessionId, null, receiverOptions.getMaxLockRenewDuration()); final ServiceBusSessionManager sessionSpecificManager = new ServiceBusSessionManager(entityPath, entityType, connectionProcessor, tracerProvider, messageSerializer, newReceiverOptions, receiveLink); return new ServiceBusReceiverAsyncClient(fullyQualifiedNamespace, entityPath, entityType, newReceiverOptions, connectionProcessor, ServiceBusConstants.OPERATION_TIMEOUT, tracerProvider, messageSerializer, onClientClose, sessionSpecificManager); })); } /** * Create a link for the "sessionId" and use the link to create a {@link ServiceBusReceiverAsyncClient} * to receive messages from the session. * @param sessionId The session Id. * @return A {@link ServiceBusReceiverAsyncClient} that is tied to the specified session. * @throws IllegalArgumentException if {@code sessionId} is null or empty. */ public Mono<ServiceBusReceiverAsyncClient> acceptSession(String sessionId) { if (CoreUtils.isNullOrEmpty(sessionId)) { return monoError(logger, new IllegalArgumentException("sessionId can not be null or empty")); } ReceiverOptions newReceiverOptions = new ReceiverOptions(receiverOptions.getReceiveMode(), receiverOptions.getPrefetchCount(), sessionId, null, receiverOptions.getMaxLockRenewDuration()); final ServiceBusSessionManager sessionSpecificManager = new ServiceBusSessionManager(entityPath, entityType, connectionProcessor, tracerProvider, messageSerializer, newReceiverOptions); return sessionSpecificManager.getActiveLink().thenReturn(new ServiceBusReceiverAsyncClient( fullyQualifiedNamespace, entityPath, entityType, newReceiverOptions, connectionProcessor, ServiceBusConstants.OPERATION_TIMEOUT, tracerProvider, messageSerializer, onClientClose, sessionSpecificManager)); } /** * Create a {@link ServiceBusReceiverAsyncClient} that processes at most {@code maxConcurrentSessions} sessions. * * @param maxConcurrentSessions Maximum number of concurrent sessions to process at any given time. * * @return The {@link ServiceBusReceiverAsyncClient} object that will be used to receive messages. * @throws IllegalArgumentException if {@code maxConcurrentSessions} is less than 1. */ public ServiceBusReceiverAsyncClient getReceiverClient(int maxConcurrentSessions) { if (maxConcurrentSessions < 1) { throw logger.logExceptionAsError( new IllegalArgumentException("Maximum number of concurrent sessions must be positive.")); } ReceiverOptions newReceiverOptions = new ReceiverOptions(receiverOptions.getReceiveMode(), receiverOptions.getPrefetchCount(), null, maxConcurrentSessions, receiverOptions.getMaxLockRenewDuration()); ServiceBusSessionManager newSessionManager = new ServiceBusSessionManager(entityPath, entityType, connectionProcessor, tracerProvider, messageSerializer, newReceiverOptions); return new ServiceBusReceiverAsyncClient(fullyQualifiedNamespace, entityPath, entityType, newReceiverOptions, connectionProcessor, ServiceBusConstants.OPERATION_TIMEOUT, tracerProvider, messageSerializer, onClientClose, newSessionManager); } @Override }
class ServiceBusSessionReceiverAsyncClient implements AutoCloseable { private final String fullyQualifiedNamespace; private final String entityPath; private final MessagingEntityType entityType; private final ReceiverOptions receiverOptions; private final ServiceBusConnectionProcessor connectionProcessor; private final TracerProvider tracerProvider; private final MessageSerializer messageSerializer; private final Runnable onClientClose; private final ServiceBusSessionManager unNamedSessionManager; private final ClientLogger logger = new ClientLogger(ServiceBusSessionReceiverAsyncClient.class); ServiceBusSessionReceiverAsyncClient(String fullyQualifiedNamespace, String entityPath, MessagingEntityType entityType, ReceiverOptions receiverOptions, ServiceBusConnectionProcessor connectionProcessor, TracerProvider tracerProvider, MessageSerializer messageSerializer, Runnable onClientClose) { this.fullyQualifiedNamespace = Objects.requireNonNull(fullyQualifiedNamespace, "'fullyQualifiedNamespace' cannot be null."); this.entityPath = Objects.requireNonNull(entityPath, "'entityPath' cannot be null."); this.entityType = Objects.requireNonNull(entityType, "'entityType' cannot be null."); this.receiverOptions = Objects.requireNonNull(receiverOptions, "'receiveOptions cannot be null.'"); this.connectionProcessor = Objects.requireNonNull(connectionProcessor, "'connectionProcessor' cannot be null."); this.tracerProvider = Objects.requireNonNull(tracerProvider, "'tracerProvider' cannot be null."); this.messageSerializer = Objects.requireNonNull(messageSerializer, "'messageSerializer' cannot be null."); this.onClientClose = Objects.requireNonNull(onClientClose, "'onClientClose' cannot be null."); this.unNamedSessionManager = new ServiceBusSessionManager(entityPath, entityType, connectionProcessor, tracerProvider, messageSerializer, receiverOptions); } /** * Acquires a session lock for the next available session and create a {@link ServiceBusReceiverAsyncClient} * to receive messages from the session. It will wait until a session is available if no one is available * immediately. * * {@codesnippet com.azure.messaging.servicebus.servicebusasyncreceiverclient.instantiation * * @return A {@link ServiceBusReceiverAsyncClient} that is tied to the available session. * @throws UnsupportedOperationException if the queue or topic subscription is not session-enabled. */ @ServiceMethod(returns = ReturnType.SINGLE) public Mono<ServiceBusReceiverAsyncClient> acceptNextSession() { return unNamedSessionManager.getActiveLink().flatMap(receiveLink -> receiveLink.getSessionId() .map(sessionId -> { final ReceiverOptions newReceiverOptions = new ReceiverOptions(receiverOptions.getReceiveMode(), receiverOptions.getPrefetchCount(), receiverOptions.getMaxLockRenewDuration(), receiverOptions.isAutoLockRenewEnabled(), sessionId, null); final ServiceBusSessionManager sessionSpecificManager = new ServiceBusSessionManager(entityPath, entityType, connectionProcessor, tracerProvider, messageSerializer, newReceiverOptions, receiveLink); return new ServiceBusReceiverAsyncClient(fullyQualifiedNamespace, entityPath, entityType, newReceiverOptions, connectionProcessor, ServiceBusConstants.OPERATION_TIMEOUT, tracerProvider, messageSerializer, () -> { }, sessionSpecificManager); })); } /** * Acquires a session lock for {@code sessionId} and create a {@link ServiceBusReceiverAsyncClient} * to receive messages from the session. If the session is already locked by another client, an * {@link com.azure.core.amqp.exception.AmqpException} is thrown. * * {@codesnippet com.azure.messaging.servicebus.servicebusasyncreceiverclient.instantiation * * @param sessionId The session Id. * @return A {@link ServiceBusReceiverAsyncClient} that is tied to the specified session. * @throws NullPointerException if {@code sessionId} is null. * @throws IllegalArgumentException if {@code sessionId} is empty. * @throws UnsupportedOperationException if the queue or topic subscription is not session-enabled. * @throws com.azure.core.amqp.exception.AmqpException if the lock cannot be acquired. */ @ServiceMethod(returns = ReturnType.SINGLE) public Mono<ServiceBusReceiverAsyncClient> acceptSession(String sessionId) { if (sessionId == null) { return monoError(logger, new NullPointerException("'sessionId' cannot be null")); } if (CoreUtils.isNullOrEmpty(sessionId)) { return monoError(logger, new IllegalArgumentException("'sessionId' cannot be empty")); } final ReceiverOptions newReceiverOptions = new ReceiverOptions(receiverOptions.getReceiveMode(), receiverOptions.getPrefetchCount(), receiverOptions.getMaxLockRenewDuration(), receiverOptions.isAutoLockRenewEnabled(), sessionId, null); final ServiceBusSessionManager sessionSpecificManager = new ServiceBusSessionManager(entityPath, entityType, connectionProcessor, tracerProvider, messageSerializer, newReceiverOptions); return sessionSpecificManager.getActiveLink().map(receiveLink -> new ServiceBusReceiverAsyncClient( fullyQualifiedNamespace, entityPath, entityType, newReceiverOptions, connectionProcessor, ServiceBusConstants.OPERATION_TIMEOUT, tracerProvider, messageSerializer, () -> { }, sessionSpecificManager)); } @Override }
Updated
public Mono<ServiceBusReceiverAsyncClient> acceptSession(String sessionId) { sessionId = Objects.requireNonNull(sessionId, "'sessionId' cannot be null"); if (CoreUtils.isNullOrEmpty(sessionId)) { return monoError(logger, new IllegalArgumentException("'sessionId' cannot be empty")); } final ReceiverOptions newReceiverOptions = new ReceiverOptions(receiverOptions.getReceiveMode(), receiverOptions.getPrefetchCount(), receiverOptions.getMaxLockRenewDuration(), receiverOptions.isAutoLockRenewEnabled(), sessionId, null); final ServiceBusSessionManager sessionSpecificManager = new ServiceBusSessionManager(entityPath, entityType, connectionProcessor, tracerProvider, messageSerializer, newReceiverOptions); return sessionSpecificManager.getActiveLink().map(receiveLink -> new ServiceBusReceiverAsyncClient( fullyQualifiedNamespace, entityPath, entityType, newReceiverOptions, connectionProcessor, ServiceBusConstants.OPERATION_TIMEOUT, tracerProvider, messageSerializer, () -> { }, sessionSpecificManager)); }
sessionId = Objects.requireNonNull(sessionId, "'sessionId' cannot be null");
public Mono<ServiceBusReceiverAsyncClient> acceptSession(String sessionId) { if (sessionId == null) { return monoError(logger, new NullPointerException("'sessionId' cannot be null")); } if (CoreUtils.isNullOrEmpty(sessionId)) { return monoError(logger, new IllegalArgumentException("'sessionId' cannot be empty")); } final ReceiverOptions newReceiverOptions = new ReceiverOptions(receiverOptions.getReceiveMode(), receiverOptions.getPrefetchCount(), receiverOptions.getMaxLockRenewDuration(), receiverOptions.isAutoLockRenewEnabled(), sessionId, null); final ServiceBusSessionManager sessionSpecificManager = new ServiceBusSessionManager(entityPath, entityType, connectionProcessor, tracerProvider, messageSerializer, newReceiverOptions); return sessionSpecificManager.getActiveLink().map(receiveLink -> new ServiceBusReceiverAsyncClient( fullyQualifiedNamespace, entityPath, entityType, newReceiverOptions, connectionProcessor, ServiceBusConstants.OPERATION_TIMEOUT, tracerProvider, messageSerializer, () -> { }, sessionSpecificManager)); }
class ServiceBusSessionReceiverAsyncClient implements AutoCloseable { private final String fullyQualifiedNamespace; private final String entityPath; private final MessagingEntityType entityType; private final ReceiverOptions receiverOptions; private final ServiceBusConnectionProcessor connectionProcessor; private final TracerProvider tracerProvider; private final MessageSerializer messageSerializer; private final Runnable onClientClose; private final ServiceBusSessionManager unNamedSessionManager; private final ClientLogger logger = new ClientLogger(ServiceBusSessionReceiverAsyncClient.class); ServiceBusSessionReceiverAsyncClient(String fullyQualifiedNamespace, String entityPath, MessagingEntityType entityType, ReceiverOptions receiverOptions, ServiceBusConnectionProcessor connectionProcessor, TracerProvider tracerProvider, MessageSerializer messageSerializer, Runnable onClientClose) { this.fullyQualifiedNamespace = Objects.requireNonNull(fullyQualifiedNamespace, "'fullyQualifiedNamespace' cannot be null."); this.entityPath = Objects.requireNonNull(entityPath, "'entityPath' cannot be null."); this.entityType = Objects.requireNonNull(entityType, "'entityType' cannot be null."); this.receiverOptions = Objects.requireNonNull(receiverOptions, "'receiveOptions cannot be null.'"); this.connectionProcessor = Objects.requireNonNull(connectionProcessor, "'connectionProcessor' cannot be null."); this.tracerProvider = Objects.requireNonNull(tracerProvider, "'tracerProvider' cannot be null."); this.messageSerializer = Objects.requireNonNull(messageSerializer, "'messageSerializer' cannot be null."); this.onClientClose = Objects.requireNonNull(onClientClose, "'onClientClose' cannot be null."); this.unNamedSessionManager = new ServiceBusSessionManager(entityPath, entityType, connectionProcessor, tracerProvider, messageSerializer, receiverOptions); } /** * Acquires a session lock for the next available session and create a {@link ServiceBusReceiverAsyncClient} * to receive messages from the session. * <p>Accept next available session</p> * {@codesnippet com.azure.messaging.servicebus.servicebusasyncreceiverclient.instantiation * </p> * @return A {@link ServiceBusReceiverAsyncClient} that is tied to the available session. * @throws UnsupportedOperationException if the queue or topic subscription is not session-enabled. */ public Mono<ServiceBusReceiverAsyncClient> acceptNextSession() { return unNamedSessionManager.getActiveLink().flatMap(receiveLink -> receiveLink.getSessionId() .map(sessionId -> { final ReceiverOptions newReceiverOptions = new ReceiverOptions(receiverOptions.getReceiveMode(), receiverOptions.getPrefetchCount(), receiverOptions.getMaxLockRenewDuration(), receiverOptions.isAutoLockRenewEnabled(), sessionId, null); final ServiceBusSessionManager sessionSpecificManager = new ServiceBusSessionManager(entityPath, entityType, connectionProcessor, tracerProvider, messageSerializer, newReceiverOptions, receiveLink); return new ServiceBusReceiverAsyncClient(fullyQualifiedNamespace, entityPath, entityType, newReceiverOptions, connectionProcessor, ServiceBusConstants.OPERATION_TIMEOUT, tracerProvider, messageSerializer, () -> { }, sessionSpecificManager); })); } /** * Acquires a session lock for {@code sessionId} and create a {@link ServiceBusReceiverAsyncClient} * to receive messages from the session. * <p> * {@codesnippet com.azure.messaging.servicebus.servicebusasyncreceiverclient.instantiation * </p> * @param sessionId The session Id. * @return A {@link ServiceBusReceiverAsyncClient} that is tied to the specified session. * @throws NullPointerException if {@code sessionId} is null. * @throws IllegalArgumentException if {@code sessionId} is empty. * @throws UnsupportedOperationException if the queue or topic subscription is not session-enabled. * @throws com.azure.core.amqp.exception.AmqpException if the session has been locked by another session receiver. */ @Override public void close() { this.onClientClose.run(); } }
class ServiceBusSessionReceiverAsyncClient implements AutoCloseable { private final String fullyQualifiedNamespace; private final String entityPath; private final MessagingEntityType entityType; private final ReceiverOptions receiverOptions; private final ServiceBusConnectionProcessor connectionProcessor; private final TracerProvider tracerProvider; private final MessageSerializer messageSerializer; private final Runnable onClientClose; private final ServiceBusSessionManager unNamedSessionManager; private final ClientLogger logger = new ClientLogger(ServiceBusSessionReceiverAsyncClient.class); ServiceBusSessionReceiverAsyncClient(String fullyQualifiedNamespace, String entityPath, MessagingEntityType entityType, ReceiverOptions receiverOptions, ServiceBusConnectionProcessor connectionProcessor, TracerProvider tracerProvider, MessageSerializer messageSerializer, Runnable onClientClose) { this.fullyQualifiedNamespace = Objects.requireNonNull(fullyQualifiedNamespace, "'fullyQualifiedNamespace' cannot be null."); this.entityPath = Objects.requireNonNull(entityPath, "'entityPath' cannot be null."); this.entityType = Objects.requireNonNull(entityType, "'entityType' cannot be null."); this.receiverOptions = Objects.requireNonNull(receiverOptions, "'receiveOptions cannot be null.'"); this.connectionProcessor = Objects.requireNonNull(connectionProcessor, "'connectionProcessor' cannot be null."); this.tracerProvider = Objects.requireNonNull(tracerProvider, "'tracerProvider' cannot be null."); this.messageSerializer = Objects.requireNonNull(messageSerializer, "'messageSerializer' cannot be null."); this.onClientClose = Objects.requireNonNull(onClientClose, "'onClientClose' cannot be null."); this.unNamedSessionManager = new ServiceBusSessionManager(entityPath, entityType, connectionProcessor, tracerProvider, messageSerializer, receiverOptions); } /** * Acquires a session lock for the next available session and create a {@link ServiceBusReceiverAsyncClient} * to receive messages from the session. It will wait until a session is available if no one is available * immediately. * * {@codesnippet com.azure.messaging.servicebus.servicebusasyncreceiverclient.instantiation * * @return A {@link ServiceBusReceiverAsyncClient} that is tied to the available session. * @throws UnsupportedOperationException if the queue or topic subscription is not session-enabled. */ @ServiceMethod(returns = ReturnType.SINGLE) public Mono<ServiceBusReceiverAsyncClient> acceptNextSession() { return unNamedSessionManager.getActiveLink().flatMap(receiveLink -> receiveLink.getSessionId() .map(sessionId -> { final ReceiverOptions newReceiverOptions = new ReceiverOptions(receiverOptions.getReceiveMode(), receiverOptions.getPrefetchCount(), receiverOptions.getMaxLockRenewDuration(), receiverOptions.isAutoLockRenewEnabled(), sessionId, null); final ServiceBusSessionManager sessionSpecificManager = new ServiceBusSessionManager(entityPath, entityType, connectionProcessor, tracerProvider, messageSerializer, newReceiverOptions, receiveLink); return new ServiceBusReceiverAsyncClient(fullyQualifiedNamespace, entityPath, entityType, newReceiverOptions, connectionProcessor, ServiceBusConstants.OPERATION_TIMEOUT, tracerProvider, messageSerializer, () -> { }, sessionSpecificManager); })); } /** * Acquires a session lock for {@code sessionId} and create a {@link ServiceBusReceiverAsyncClient} * to receive messages from the session. If the session is already locked by another client, an * {@link com.azure.core.amqp.exception.AmqpException} is thrown. * * {@codesnippet com.azure.messaging.servicebus.servicebusasyncreceiverclient.instantiation * * @param sessionId The session Id. * @return A {@link ServiceBusReceiverAsyncClient} that is tied to the specified session. * @throws NullPointerException if {@code sessionId} is null. * @throws IllegalArgumentException if {@code sessionId} is empty. * @throws UnsupportedOperationException if the queue or topic subscription is not session-enabled. * @throws com.azure.core.amqp.exception.AmqpException if the lock cannot be acquired. */ @ServiceMethod(returns = ReturnType.SINGLE) @Override public void close() { this.onClientClose.run(); } }
Processor will never build a sync client. So, this method can be removed.
ServiceBusReceiverClient buildClientForProcessor() { return new ServiceBusReceiverClient(buildAsyncClientForProcessor(false), retryOptions.getTryTimeout()); }
}
ServiceBusReceiverClient buildClientForProcessor() { return new ServiceBusReceiverClient(buildAsyncClientForProcessor(false), retryOptions.getTryTimeout()); }
class ServiceBusSessionReceiverClientBuilder { private boolean enableAutoComplete = true; private Integer maxConcurrentSessions = null; private int prefetchCount = DEFAULT_PREFETCH_COUNT; private String queueName; private ReceiveMode receiveMode = ReceiveMode.PEEK_LOCK; private String subscriptionName; private String topicName; private Duration maxAutoLockRenewDuration = MAX_LOCK_RENEW_DEFAULT_DURATION; private ServiceBusSessionReceiverClientBuilder() { } /** * Disables auto-complete and auto-abandon of received messages. By default, a successfully processed message is * {@link ServiceBusReceiverAsyncClient * the message is processed, it is {@link ServiceBusReceiverAsyncClient * abandoned}. * * @return The modified {@link ServiceBusSessionReceiverClientBuilder} object. */ public ServiceBusSessionReceiverClientBuilder disableAutoComplete() { this.enableAutoComplete = false; return this; } /** * Sets the amount of time to continue auto-renewing the session lock. Setting {@link Duration * {@code null} disables auto-renewal. For {@link ReceiveMode * auto-renewal is disabled. * * @param maxAutoLockRenewDuration the amount of time to continue auto-renewing the session lock. * {@link Duration * * @return The updated {@link ServiceBusSessionReceiverClientBuilder} object. * @throws IllegalArgumentException If {code maxAutoLockRenewDuration} is negative. */ public ServiceBusSessionReceiverClientBuilder maxAutoLockRenewDuration(Duration maxAutoLockRenewDuration) { validateAndThrow(maxAutoLockRenewDuration); this.maxAutoLockRenewDuration = maxAutoLockRenewDuration; return this; } /** * Enables session processing roll-over by processing at most {@code maxConcurrentSessions}. * * @param maxConcurrentSessions Maximum number of concurrent sessions to process at any given time. * * @return The modified {@link ServiceBusSessionReceiverClientBuilder} object. * @throws IllegalArgumentException if {@code maxConcurrentSessions} is less than 1. */ ServiceBusSessionReceiverClientBuilder maxConcurrentSessions(int maxConcurrentSessions) { if (maxConcurrentSessions < 1) { throw logger.logExceptionAsError(new IllegalArgumentException( "maxConcurrentSessions cannot be less than 1.")); } this.maxConcurrentSessions = maxConcurrentSessions; return this; } /** * Sets the prefetch count of the receiver. For both {@link ReceiveMode * ReceiveMode * * Prefetch speeds up the message flow by aiming to have a message readily available for local retrieval when * and before the application asks for one using {@link ServiceBusReceiverAsyncClient * Setting a non-zero value will prefetch that number of messages. Setting the value to zero turns prefetch * off. * * @param prefetchCount The prefetch count. * * @return The modified {@link ServiceBusSessionReceiverClientBuilder} object. * @throws IllegalArgumentException If {code prefetchCount} is negative. */ public ServiceBusSessionReceiverClientBuilder prefetchCount(int prefetchCount) { validateAndThrow(prefetchCount); this.prefetchCount = prefetchCount; return this; } /** * Sets the name of the queue to create a receiver for. * * @param queueName Name of the queue. * * @return The modified {@link ServiceBusSessionReceiverClientBuilder} object. */ public ServiceBusSessionReceiverClientBuilder queueName(String queueName) { this.queueName = queueName; return this; } /** * Sets the receive mode for the receiver. * * @param receiveMode Mode for receiving messages. * * @return The modified {@link ServiceBusSessionReceiverClientBuilder} object. */ public ServiceBusSessionReceiverClientBuilder receiveMode(ReceiveMode receiveMode) { this.receiveMode = receiveMode; return this; } /** * Sets the name of the subscription in the topic to listen to. <b>{@link * </b> * * @param subscriptionName Name of the subscription. * * @return The modified {@link ServiceBusSessionReceiverClientBuilder} object. * @see */ public ServiceBusSessionReceiverClientBuilder subscriptionName(String subscriptionName) { this.subscriptionName = subscriptionName; return this; } /** * Sets the name of the topic. <b>{@link * * @param topicName Name of the topic. * * @return The modified {@link ServiceBusSessionReceiverClientBuilder} object. * @see */ public ServiceBusSessionReceiverClientBuilder topicName(String topicName) { this.topicName = topicName; return this; } /** * Creates an <b>asynchronous</b>, <b>session-aware</b> Service Bus receiver responsible for reading {@link * ServiceBusMessage messages} from a specific queue or topic. * * @return An new {@link ServiceBusReceiverAsyncClient} that receives messages from a queue or topic. * @throws IllegalStateException if {@link * topicName} are not set or, both of these fields are set. It is also thrown if the Service Bus {@link * * {@link * * @throws IllegalArgumentException Queue or topic name are not set via {@link * queueName()} or {@link */ ServiceBusReceiverAsyncClient buildAsyncClientForProcessor() { return buildAsyncClientForProcessor(true); } /** * Creates a <b>synchronous</b>, <b>session-aware</b> Service Bus receiver responsible for reading {@link * ServiceBusMessage messages} from a specific queue or topic. * * @return An new {@link ServiceBusReceiverClient} that receives messages from a queue or topic. * @throws IllegalStateException if {@link * topicName} are not set or, both of these fields are set. It is also thrown if the Service Bus {@link * * {@link * * @throws IllegalArgumentException Queue or topic name are not set via {@link * queueName()} or {@link */ private ServiceBusReceiverAsyncClient buildAsyncClientForProcessor(boolean isAutoCompleteAllowed) { final MessagingEntityType entityType = validateEntityPaths(logger, connectionStringEntityName, topicName, queueName); final String entityPath = getEntityPath(logger, entityType, queueName, topicName, subscriptionName, SubQueue.NONE); if (!isAutoCompleteAllowed && enableAutoComplete) { logger.warning( "'enableAutoComplete' is not supported in synchronous client except through callback receive."); enableAutoComplete = false; } else if (enableAutoComplete && receiveMode == ReceiveMode.RECEIVE_AND_DELETE) { throw logger.logExceptionAsError(new IllegalStateException( "'enableAutoComplete' is not valid for RECEIVE_AND_DELETE mode.")); } if (receiveMode == ReceiveMode.RECEIVE_AND_DELETE) { maxAutoLockRenewDuration = Duration.ZERO; } final ServiceBusConnectionProcessor connectionProcessor = getOrCreateConnectionProcessor(messageSerializer); final ReceiverOptions receiverOptions = new ReceiverOptions(receiveMode, prefetchCount, maxAutoLockRenewDuration, enableAutoComplete, null, maxConcurrentSessions); final ServiceBusSessionManager sessionManager = new ServiceBusSessionManager(entityPath, entityType, connectionProcessor, tracerProvider, messageSerializer, receiverOptions); return new ServiceBusReceiverAsyncClient(connectionProcessor.getFullyQualifiedNamespace(), entityPath, entityType, receiverOptions, connectionProcessor, ServiceBusConstants.OPERATION_TIMEOUT, tracerProvider, messageSerializer, ServiceBusClientBuilder.this::onClientClose, sessionManager); } /** * Creates an <b>asynchronous</b>, <b>session-aware</b> Service Bus receiver responsible for reading {@link * ServiceBusMessage messages} from a specific queue or topic. * * @return An new {@link ServiceBusSessionReceiverAsyncClient} that receives messages from a queue or topic. * @throws IllegalStateException if {@link * topicName} are not set or, both of these fields are set. It is also thrown if the Service Bus {@link * * {@link * * @throws IllegalArgumentException Queue or topic name are not set via {@link * queueName()} or {@link */ public ServiceBusSessionReceiverAsyncClient buildAsyncClient() { return buildAsyncClient(true); } /** * Creates a <b>synchronous</b>, <b>session-aware</b> Service Bus receiver responsible for reading {@link * ServiceBusMessage messages} from a specific queue or topic. * * @return An new {@link ServiceBusReceiverClient} that receives messages from a queue or topic. * @throws IllegalStateException if {@link * topicName} are not set or, both of these fields are set. It is also thrown if the Service Bus {@link * * {@link * * @throws IllegalArgumentException Queue or topic name are not set via {@link * queueName()} or {@link */ public ServiceBusSessionReceiverClient buildClient() { return new ServiceBusSessionReceiverClient(buildAsyncClient(false), retryOptions.getTryTimeout()); } private ServiceBusSessionReceiverAsyncClient buildAsyncClient(boolean isAutoCompleteAllowed) { final MessagingEntityType entityType = validateEntityPaths(logger, connectionStringEntityName, topicName, queueName); final String entityPath = getEntityPath(logger, entityType, queueName, topicName, subscriptionName, SubQueue.NONE); if (!isAutoCompleteAllowed && enableAutoComplete) { logger.warning( "'enableAutoComplete' is not supported in synchronous client except through callback receive."); enableAutoComplete = false; } else if (enableAutoComplete && receiveMode == ReceiveMode.RECEIVE_AND_DELETE) { throw logger.logExceptionAsError(new IllegalStateException( "'enableAutoComplete' is not valid for RECEIVE_AND_DELETE mode.")); } if (receiveMode == ReceiveMode.RECEIVE_AND_DELETE) { maxAutoLockRenewDuration = Duration.ZERO; } final ServiceBusConnectionProcessor connectionProcessor = getOrCreateConnectionProcessor(messageSerializer); final ReceiverOptions receiverOptions = new ReceiverOptions(receiveMode, prefetchCount, maxAutoLockRenewDuration, enableAutoComplete, null, maxConcurrentSessions); return new ServiceBusSessionReceiverAsyncClient(connectionProcessor.getFullyQualifiedNamespace(), entityPath, entityType, receiverOptions, connectionProcessor, tracerProvider, messageSerializer, ServiceBusClientBuilder.this::onClientClose); } }
class ServiceBusSessionReceiverClientBuilder { private boolean enableAutoComplete = true; private Integer maxConcurrentSessions = null; private int prefetchCount = DEFAULT_PREFETCH_COUNT; private String queueName; private ReceiveMode receiveMode = ReceiveMode.PEEK_LOCK; private String subscriptionName; private String topicName; private Duration maxAutoLockRenewDuration = MAX_LOCK_RENEW_DEFAULT_DURATION; private ServiceBusSessionReceiverClientBuilder() { } /** * Disables auto-complete and auto-abandon of received messages. By default, a successfully processed message is * {@link ServiceBusReceiverAsyncClient * the message is processed, it is {@link ServiceBusReceiverAsyncClient * abandoned}. * * @return The modified {@link ServiceBusSessionReceiverClientBuilder} object. */ public ServiceBusSessionReceiverClientBuilder disableAutoComplete() { this.enableAutoComplete = false; return this; } /** * Sets the amount of time to continue auto-renewing the session lock. Setting {@link Duration * {@code null} disables auto-renewal. For {@link ReceiveMode * auto-renewal is disabled. * * @param maxAutoLockRenewDuration the amount of time to continue auto-renewing the session lock. * {@link Duration * * @return The updated {@link ServiceBusSessionReceiverClientBuilder} object. * @throws IllegalArgumentException If {code maxAutoLockRenewDuration} is negative. */ public ServiceBusSessionReceiverClientBuilder maxAutoLockRenewDuration(Duration maxAutoLockRenewDuration) { validateAndThrow(maxAutoLockRenewDuration); this.maxAutoLockRenewDuration = maxAutoLockRenewDuration; return this; } /** * Enables session processing roll-over by processing at most {@code maxConcurrentSessions}. * * @param maxConcurrentSessions Maximum number of concurrent sessions to process at any given time. * * @return The modified {@link ServiceBusSessionReceiverClientBuilder} object. * @throws IllegalArgumentException if {@code maxConcurrentSessions} is less than 1. */ ServiceBusSessionReceiverClientBuilder maxConcurrentSessions(int maxConcurrentSessions) { if (maxConcurrentSessions < 1) { throw logger.logExceptionAsError(new IllegalArgumentException( "maxConcurrentSessions cannot be less than 1.")); } this.maxConcurrentSessions = maxConcurrentSessions; return this; } /** * Sets the prefetch count of the receiver. For both {@link ReceiveMode * ReceiveMode * * Prefetch speeds up the message flow by aiming to have a message readily available for local retrieval when * and before the application asks for one using {@link ServiceBusReceiverAsyncClient * Setting a non-zero value will prefetch that number of messages. Setting the value to zero turns prefetch * off. * * @param prefetchCount The prefetch count. * * @return The modified {@link ServiceBusSessionReceiverClientBuilder} object. * @throws IllegalArgumentException If {code prefetchCount} is negative. */ public ServiceBusSessionReceiverClientBuilder prefetchCount(int prefetchCount) { validateAndThrow(prefetchCount); this.prefetchCount = prefetchCount; return this; } /** * Sets the name of the queue to create a receiver for. * * @param queueName Name of the queue. * * @return The modified {@link ServiceBusSessionReceiverClientBuilder} object. */ public ServiceBusSessionReceiverClientBuilder queueName(String queueName) { this.queueName = queueName; return this; } /** * Sets the receive mode for the receiver. * * @param receiveMode Mode for receiving messages. * * @return The modified {@link ServiceBusSessionReceiverClientBuilder} object. */ public ServiceBusSessionReceiverClientBuilder receiveMode(ReceiveMode receiveMode) { this.receiveMode = receiveMode; return this; } /** * Sets the name of the subscription in the topic to listen to. <b>{@link * </b> * * @param subscriptionName Name of the subscription. * * @return The modified {@link ServiceBusSessionReceiverClientBuilder} object. * @see */ public ServiceBusSessionReceiverClientBuilder subscriptionName(String subscriptionName) { this.subscriptionName = subscriptionName; return this; } /** * Sets the name of the topic. <b>{@link * * @param topicName Name of the topic. * * @return The modified {@link ServiceBusSessionReceiverClientBuilder} object. * @see */ public ServiceBusSessionReceiverClientBuilder topicName(String topicName) { this.topicName = topicName; return this; } /** * Creates an <b>asynchronous</b>, <b>session-aware</b> Service Bus receiver responsible for reading {@link * ServiceBusMessage messages} from a specific queue or topic. * * @return An new {@link ServiceBusReceiverAsyncClient} that receives messages from a queue or topic. * @throws IllegalStateException if {@link * topicName} are not set or, both of these fields are set. It is also thrown if the Service Bus {@link * * {@link * * @throws IllegalArgumentException Queue or topic name are not set via {@link * queueName()} or {@link */ ServiceBusReceiverAsyncClient buildAsyncClientForProcessor() { return buildAsyncClientForProcessor(true); } /** * Creates a <b>synchronous</b>, <b>session-aware</b> Service Bus receiver responsible for reading {@link * ServiceBusMessage messages} from a specific queue or topic. * * @return An new {@link ServiceBusReceiverClient} that receives messages from a queue or topic. * @throws IllegalStateException if {@link * topicName} are not set or, both of these fields are set. It is also thrown if the Service Bus {@link * * {@link * * @throws IllegalArgumentException Queue or topic name are not set via {@link * queueName()} or {@link */ private ServiceBusReceiverAsyncClient buildAsyncClientForProcessor(boolean isAutoCompleteAllowed) { final MessagingEntityType entityType = validateEntityPaths(logger, connectionStringEntityName, topicName, queueName); final String entityPath = getEntityPath(logger, entityType, queueName, topicName, subscriptionName, SubQueue.NONE); if (!isAutoCompleteAllowed && enableAutoComplete) { logger.warning( "'enableAutoComplete' is not supported in synchronous client except through callback receive."); enableAutoComplete = false; } else if (enableAutoComplete && receiveMode == ReceiveMode.RECEIVE_AND_DELETE) { throw logger.logExceptionAsError(new IllegalStateException( "'enableAutoComplete' is not valid for RECEIVE_AND_DELETE mode.")); } if (receiveMode == ReceiveMode.RECEIVE_AND_DELETE) { maxAutoLockRenewDuration = Duration.ZERO; } final ServiceBusConnectionProcessor connectionProcessor = getOrCreateConnectionProcessor(messageSerializer); final ReceiverOptions receiverOptions = new ReceiverOptions(receiveMode, prefetchCount, maxAutoLockRenewDuration, enableAutoComplete, null, maxConcurrentSessions); final ServiceBusSessionManager sessionManager = new ServiceBusSessionManager(entityPath, entityType, connectionProcessor, tracerProvider, messageSerializer, receiverOptions); return new ServiceBusReceiverAsyncClient(connectionProcessor.getFullyQualifiedNamespace(), entityPath, entityType, receiverOptions, connectionProcessor, ServiceBusConstants.OPERATION_TIMEOUT, tracerProvider, messageSerializer, ServiceBusClientBuilder.this::onClientClose, sessionManager); } /** * Creates an <b>asynchronous</b>, <b>session-aware</b> Service Bus receiver responsible for reading {@link * ServiceBusMessage messages} from a specific queue or topic. * * @return An new {@link ServiceBusSessionReceiverAsyncClient} that receives messages from a queue or topic. * @throws IllegalStateException if {@link * topicName} are not set or, both of these fields are set. It is also thrown if the Service Bus {@link * * {@link * * @throws IllegalArgumentException Queue or topic name are not set via {@link * queueName()} or {@link */ public ServiceBusSessionReceiverAsyncClient buildAsyncClient() { return buildAsyncClient(true); } /** * Creates a <b>synchronous</b>, <b>session-aware</b> Service Bus receiver responsible for reading {@link * ServiceBusMessage messages} from a specific queue or topic. * * @return An new {@link ServiceBusReceiverClient} that receives messages from a queue or topic. * @throws IllegalStateException if {@link * topicName} are not set or, both of these fields are set. It is also thrown if the Service Bus {@link * * {@link * * @throws IllegalArgumentException Queue or topic name are not set via {@link * queueName()} or {@link */ public ServiceBusSessionReceiverClient buildClient() { return new ServiceBusSessionReceiverClient(buildAsyncClient(false), retryOptions.getTryTimeout()); } private ServiceBusSessionReceiverAsyncClient buildAsyncClient(boolean isAutoCompleteAllowed) { final MessagingEntityType entityType = validateEntityPaths(logger, connectionStringEntityName, topicName, queueName); final String entityPath = getEntityPath(logger, entityType, queueName, topicName, subscriptionName, SubQueue.NONE); if (!isAutoCompleteAllowed && enableAutoComplete) { logger.warning( "'enableAutoComplete' is not supported in synchronous client except through callback receive."); enableAutoComplete = false; } else if (enableAutoComplete && receiveMode == ReceiveMode.RECEIVE_AND_DELETE) { throw logger.logExceptionAsError(new IllegalStateException( "'enableAutoComplete' is not valid for RECEIVE_AND_DELETE mode.")); } if (receiveMode == ReceiveMode.RECEIVE_AND_DELETE) { maxAutoLockRenewDuration = Duration.ZERO; } final ServiceBusConnectionProcessor connectionProcessor = getOrCreateConnectionProcessor(messageSerializer); final ReceiverOptions receiverOptions = new ReceiverOptions(receiveMode, prefetchCount, maxAutoLockRenewDuration, enableAutoComplete, null, maxConcurrentSessions); return new ServiceBusSessionReceiverAsyncClient(connectionProcessor.getFullyQualifiedNamespace(), entityPath, entityType, receiverOptions, connectionProcessor, tracerProvider, messageSerializer, ServiceBusClientBuilder.this::onClientClose); } }
This timeout is for a single attempt, right? If the user has set retry policy to 3 attempts, will this not wait for the 2nd attempt after the first attempt fails due to timeout?
public ServiceBusSessionReceiverClient buildClient() { return new ServiceBusSessionReceiverClient(buildAsyncClient(false), retryOptions.getTryTimeout()); }
retryOptions.getTryTimeout());
public ServiceBusSessionReceiverClient buildClient() { return new ServiceBusSessionReceiverClient(buildAsyncClient(false), retryOptions.getTryTimeout()); }
class ServiceBusSessionReceiverClientBuilder { private boolean enableAutoComplete = true; private Integer maxConcurrentSessions = null; private int prefetchCount = DEFAULT_PREFETCH_COUNT; private String queueName; private ReceiveMode receiveMode = ReceiveMode.PEEK_LOCK; private String subscriptionName; private String topicName; private Duration maxAutoLockRenewDuration = MAX_LOCK_RENEW_DEFAULT_DURATION; private ServiceBusSessionReceiverClientBuilder() { } /** * Disables auto-complete and auto-abandon of received messages. By default, a successfully processed message is * {@link ServiceBusReceiverAsyncClient * the message is processed, it is {@link ServiceBusReceiverAsyncClient * abandoned}. * * @return The modified {@link ServiceBusSessionReceiverClientBuilder} object. */ public ServiceBusSessionReceiverClientBuilder disableAutoComplete() { this.enableAutoComplete = false; return this; } /** * Sets the amount of time to continue auto-renewing the session lock. Setting {@link Duration * {@code null} disables auto-renewal. For {@link ReceiveMode * auto-renewal is disabled. * * @param maxAutoLockRenewDuration the amount of time to continue auto-renewing the session lock. * {@link Duration * * @return The updated {@link ServiceBusSessionReceiverClientBuilder} object. * @throws IllegalArgumentException If {code maxAutoLockRenewDuration} is negative. */ public ServiceBusSessionReceiverClientBuilder maxAutoLockRenewDuration(Duration maxAutoLockRenewDuration) { validateAndThrow(maxAutoLockRenewDuration); this.maxAutoLockRenewDuration = maxAutoLockRenewDuration; return this; } /** * Enables session processing roll-over by processing at most {@code maxConcurrentSessions}. * * @param maxConcurrentSessions Maximum number of concurrent sessions to process at any given time. * * @return The modified {@link ServiceBusSessionReceiverClientBuilder} object. * @throws IllegalArgumentException if {@code maxConcurrentSessions} is less than 1. */ ServiceBusSessionReceiverClientBuilder maxConcurrentSessions(int maxConcurrentSessions) { if (maxConcurrentSessions < 1) { throw logger.logExceptionAsError(new IllegalArgumentException( "maxConcurrentSessions cannot be less than 1.")); } this.maxConcurrentSessions = maxConcurrentSessions; return this; } /** * Sets the prefetch count of the receiver. For both {@link ReceiveMode * ReceiveMode * * Prefetch speeds up the message flow by aiming to have a message readily available for local retrieval when * and before the application asks for one using {@link ServiceBusReceiverAsyncClient * Setting a non-zero value will prefetch that number of messages. Setting the value to zero turns prefetch * off. * * @param prefetchCount The prefetch count. * * @return The modified {@link ServiceBusSessionReceiverClientBuilder} object. * @throws IllegalArgumentException If {code prefetchCount} is negative. */ public ServiceBusSessionReceiverClientBuilder prefetchCount(int prefetchCount) { validateAndThrow(prefetchCount); this.prefetchCount = prefetchCount; return this; } /** * Sets the name of the queue to create a receiver for. * * @param queueName Name of the queue. * * @return The modified {@link ServiceBusSessionReceiverClientBuilder} object. */ public ServiceBusSessionReceiverClientBuilder queueName(String queueName) { this.queueName = queueName; return this; } /** * Sets the receive mode for the receiver. * * @param receiveMode Mode for receiving messages. * * @return The modified {@link ServiceBusSessionReceiverClientBuilder} object. */ public ServiceBusSessionReceiverClientBuilder receiveMode(ReceiveMode receiveMode) { this.receiveMode = receiveMode; return this; } /** * Sets the name of the subscription in the topic to listen to. <b>{@link * </b> * * @param subscriptionName Name of the subscription. * * @return The modified {@link ServiceBusSessionReceiverClientBuilder} object. * @see */ public ServiceBusSessionReceiverClientBuilder subscriptionName(String subscriptionName) { this.subscriptionName = subscriptionName; return this; } /** * Sets the name of the topic. <b>{@link * * @param topicName Name of the topic. * * @return The modified {@link ServiceBusSessionReceiverClientBuilder} object. * @see */ public ServiceBusSessionReceiverClientBuilder topicName(String topicName) { this.topicName = topicName; return this; } /** * Creates an <b>asynchronous</b>, <b>session-aware</b> Service Bus receiver responsible for reading {@link * ServiceBusMessage messages} from a specific queue or topic. * * @return An new {@link ServiceBusReceiverAsyncClient} that receives messages from a queue or topic. * @throws IllegalStateException if {@link * topicName} are not set or, both of these fields are set. It is also thrown if the Service Bus {@link * * {@link * * @throws IllegalArgumentException Queue or topic name are not set via {@link * queueName()} or {@link */ ServiceBusReceiverAsyncClient buildAsyncClientForProcessor() { return buildAsyncClientForProcessor(true); } /** * Creates a <b>synchronous</b>, <b>session-aware</b> Service Bus receiver responsible for reading {@link * ServiceBusMessage messages} from a specific queue or topic. * * @return An new {@link ServiceBusReceiverClient} that receives messages from a queue or topic. * @throws IllegalStateException if {@link * topicName} are not set or, both of these fields are set. It is also thrown if the Service Bus {@link * * {@link * * @throws IllegalArgumentException Queue or topic name are not set via {@link * queueName()} or {@link */ ServiceBusReceiverClient buildClientForProcessor() { return new ServiceBusReceiverClient(buildAsyncClientForProcessor(false), retryOptions.getTryTimeout()); } private ServiceBusReceiverAsyncClient buildAsyncClientForProcessor(boolean isAutoCompleteAllowed) { final MessagingEntityType entityType = validateEntityPaths(logger, connectionStringEntityName, topicName, queueName); final String entityPath = getEntityPath(logger, entityType, queueName, topicName, subscriptionName, SubQueue.NONE); if (!isAutoCompleteAllowed && enableAutoComplete) { logger.warning( "'enableAutoComplete' is not supported in synchronous client except through callback receive."); enableAutoComplete = false; } else if (enableAutoComplete && receiveMode == ReceiveMode.RECEIVE_AND_DELETE) { throw logger.logExceptionAsError(new IllegalStateException( "'enableAutoComplete' is not valid for RECEIVE_AND_DELETE mode.")); } if (receiveMode == ReceiveMode.RECEIVE_AND_DELETE) { maxAutoLockRenewDuration = Duration.ZERO; } final ServiceBusConnectionProcessor connectionProcessor = getOrCreateConnectionProcessor(messageSerializer); final ReceiverOptions receiverOptions = new ReceiverOptions(receiveMode, prefetchCount, maxAutoLockRenewDuration, enableAutoComplete, null, maxConcurrentSessions); final ServiceBusSessionManager sessionManager = new ServiceBusSessionManager(entityPath, entityType, connectionProcessor, tracerProvider, messageSerializer, receiverOptions); return new ServiceBusReceiverAsyncClient(connectionProcessor.getFullyQualifiedNamespace(), entityPath, entityType, receiverOptions, connectionProcessor, ServiceBusConstants.OPERATION_TIMEOUT, tracerProvider, messageSerializer, ServiceBusClientBuilder.this::onClientClose, sessionManager); } /** * Creates an <b>asynchronous</b>, <b>session-aware</b> Service Bus receiver responsible for reading {@link * ServiceBusMessage messages} from a specific queue or topic. * * @return An new {@link ServiceBusSessionReceiverAsyncClient} that receives messages from a queue or topic. * @throws IllegalStateException if {@link * topicName} are not set or, both of these fields are set. It is also thrown if the Service Bus {@link * * {@link * * @throws IllegalArgumentException Queue or topic name are not set via {@link * queueName()} or {@link */ public ServiceBusSessionReceiverAsyncClient buildAsyncClient() { return buildAsyncClient(true); } /** * Creates a <b>synchronous</b>, <b>session-aware</b> Service Bus receiver responsible for reading {@link * ServiceBusMessage messages} from a specific queue or topic. * * @return An new {@link ServiceBusReceiverClient} that receives messages from a queue or topic. * @throws IllegalStateException if {@link * topicName} are not set or, both of these fields are set. It is also thrown if the Service Bus {@link * * {@link * * @throws IllegalArgumentException Queue or topic name are not set via {@link * queueName()} or {@link */ private ServiceBusSessionReceiverAsyncClient buildAsyncClient(boolean isAutoCompleteAllowed) { final MessagingEntityType entityType = validateEntityPaths(logger, connectionStringEntityName, topicName, queueName); final String entityPath = getEntityPath(logger, entityType, queueName, topicName, subscriptionName, SubQueue.NONE); if (!isAutoCompleteAllowed && enableAutoComplete) { logger.warning( "'enableAutoComplete' is not supported in synchronous client except through callback receive."); enableAutoComplete = false; } else if (enableAutoComplete && receiveMode == ReceiveMode.RECEIVE_AND_DELETE) { throw logger.logExceptionAsError(new IllegalStateException( "'enableAutoComplete' is not valid for RECEIVE_AND_DELETE mode.")); } if (receiveMode == ReceiveMode.RECEIVE_AND_DELETE) { maxAutoLockRenewDuration = Duration.ZERO; } final ServiceBusConnectionProcessor connectionProcessor = getOrCreateConnectionProcessor(messageSerializer); final ReceiverOptions receiverOptions = new ReceiverOptions(receiveMode, prefetchCount, maxAutoLockRenewDuration, enableAutoComplete, null, maxConcurrentSessions); return new ServiceBusSessionReceiverAsyncClient(connectionProcessor.getFullyQualifiedNamespace(), entityPath, entityType, receiverOptions, connectionProcessor, tracerProvider, messageSerializer, ServiceBusClientBuilder.this::onClientClose); } }
class ServiceBusSessionReceiverClientBuilder { private boolean enableAutoComplete = true; private Integer maxConcurrentSessions = null; private int prefetchCount = DEFAULT_PREFETCH_COUNT; private String queueName; private ReceiveMode receiveMode = ReceiveMode.PEEK_LOCK; private String subscriptionName; private String topicName; private Duration maxAutoLockRenewDuration = MAX_LOCK_RENEW_DEFAULT_DURATION; private ServiceBusSessionReceiverClientBuilder() { } /** * Disables auto-complete and auto-abandon of received messages. By default, a successfully processed message is * {@link ServiceBusReceiverAsyncClient * the message is processed, it is {@link ServiceBusReceiverAsyncClient * abandoned}. * * @return The modified {@link ServiceBusSessionReceiverClientBuilder} object. */ public ServiceBusSessionReceiverClientBuilder disableAutoComplete() { this.enableAutoComplete = false; return this; } /** * Sets the amount of time to continue auto-renewing the session lock. Setting {@link Duration * {@code null} disables auto-renewal. For {@link ReceiveMode * auto-renewal is disabled. * * @param maxAutoLockRenewDuration the amount of time to continue auto-renewing the session lock. * {@link Duration * * @return The updated {@link ServiceBusSessionReceiverClientBuilder} object. * @throws IllegalArgumentException If {code maxAutoLockRenewDuration} is negative. */ public ServiceBusSessionReceiverClientBuilder maxAutoLockRenewDuration(Duration maxAutoLockRenewDuration) { validateAndThrow(maxAutoLockRenewDuration); this.maxAutoLockRenewDuration = maxAutoLockRenewDuration; return this; } /** * Enables session processing roll-over by processing at most {@code maxConcurrentSessions}. * * @param maxConcurrentSessions Maximum number of concurrent sessions to process at any given time. * * @return The modified {@link ServiceBusSessionReceiverClientBuilder} object. * @throws IllegalArgumentException if {@code maxConcurrentSessions} is less than 1. */ ServiceBusSessionReceiverClientBuilder maxConcurrentSessions(int maxConcurrentSessions) { if (maxConcurrentSessions < 1) { throw logger.logExceptionAsError(new IllegalArgumentException( "maxConcurrentSessions cannot be less than 1.")); } this.maxConcurrentSessions = maxConcurrentSessions; return this; } /** * Sets the prefetch count of the receiver. For both {@link ReceiveMode * ReceiveMode * * Prefetch speeds up the message flow by aiming to have a message readily available for local retrieval when * and before the application asks for one using {@link ServiceBusReceiverAsyncClient * Setting a non-zero value will prefetch that number of messages. Setting the value to zero turns prefetch * off. * * @param prefetchCount The prefetch count. * * @return The modified {@link ServiceBusSessionReceiverClientBuilder} object. * @throws IllegalArgumentException If {code prefetchCount} is negative. */ public ServiceBusSessionReceiverClientBuilder prefetchCount(int prefetchCount) { validateAndThrow(prefetchCount); this.prefetchCount = prefetchCount; return this; } /** * Sets the name of the queue to create a receiver for. * * @param queueName Name of the queue. * * @return The modified {@link ServiceBusSessionReceiverClientBuilder} object. */ public ServiceBusSessionReceiverClientBuilder queueName(String queueName) { this.queueName = queueName; return this; } /** * Sets the receive mode for the receiver. * * @param receiveMode Mode for receiving messages. * * @return The modified {@link ServiceBusSessionReceiverClientBuilder} object. */ public ServiceBusSessionReceiverClientBuilder receiveMode(ReceiveMode receiveMode) { this.receiveMode = receiveMode; return this; } /** * Sets the name of the subscription in the topic to listen to. <b>{@link * </b> * * @param subscriptionName Name of the subscription. * * @return The modified {@link ServiceBusSessionReceiverClientBuilder} object. * @see */ public ServiceBusSessionReceiverClientBuilder subscriptionName(String subscriptionName) { this.subscriptionName = subscriptionName; return this; } /** * Sets the name of the topic. <b>{@link * * @param topicName Name of the topic. * * @return The modified {@link ServiceBusSessionReceiverClientBuilder} object. * @see */ public ServiceBusSessionReceiverClientBuilder topicName(String topicName) { this.topicName = topicName; return this; } /** * Creates an <b>asynchronous</b>, <b>session-aware</b> Service Bus receiver responsible for reading {@link * ServiceBusMessage messages} from a specific queue or topic. * * @return An new {@link ServiceBusReceiverAsyncClient} that receives messages from a queue or topic. * @throws IllegalStateException if {@link * topicName} are not set or, both of these fields are set. It is also thrown if the Service Bus {@link * * {@link * * @throws IllegalArgumentException Queue or topic name are not set via {@link * queueName()} or {@link */ ServiceBusReceiverAsyncClient buildAsyncClientForProcessor() { return buildAsyncClientForProcessor(true); } /** * Creates a <b>synchronous</b>, <b>session-aware</b> Service Bus receiver responsible for reading {@link * ServiceBusMessage messages} from a specific queue or topic. * * @return An new {@link ServiceBusReceiverClient} that receives messages from a queue or topic. * @throws IllegalStateException if {@link * topicName} are not set or, both of these fields are set. It is also thrown if the Service Bus {@link * * {@link * * @throws IllegalArgumentException Queue or topic name are not set via {@link * queueName()} or {@link */ ServiceBusReceiverClient buildClientForProcessor() { return new ServiceBusReceiverClient(buildAsyncClientForProcessor(false), retryOptions.getTryTimeout()); } private ServiceBusReceiverAsyncClient buildAsyncClientForProcessor(boolean isAutoCompleteAllowed) { final MessagingEntityType entityType = validateEntityPaths(logger, connectionStringEntityName, topicName, queueName); final String entityPath = getEntityPath(logger, entityType, queueName, topicName, subscriptionName, SubQueue.NONE); if (!isAutoCompleteAllowed && enableAutoComplete) { logger.warning( "'enableAutoComplete' is not supported in synchronous client except through callback receive."); enableAutoComplete = false; } else if (enableAutoComplete && receiveMode == ReceiveMode.RECEIVE_AND_DELETE) { throw logger.logExceptionAsError(new IllegalStateException( "'enableAutoComplete' is not valid for RECEIVE_AND_DELETE mode.")); } if (receiveMode == ReceiveMode.RECEIVE_AND_DELETE) { maxAutoLockRenewDuration = Duration.ZERO; } final ServiceBusConnectionProcessor connectionProcessor = getOrCreateConnectionProcessor(messageSerializer); final ReceiverOptions receiverOptions = new ReceiverOptions(receiveMode, prefetchCount, maxAutoLockRenewDuration, enableAutoComplete, null, maxConcurrentSessions); final ServiceBusSessionManager sessionManager = new ServiceBusSessionManager(entityPath, entityType, connectionProcessor, tracerProvider, messageSerializer, receiverOptions); return new ServiceBusReceiverAsyncClient(connectionProcessor.getFullyQualifiedNamespace(), entityPath, entityType, receiverOptions, connectionProcessor, ServiceBusConstants.OPERATION_TIMEOUT, tracerProvider, messageSerializer, ServiceBusClientBuilder.this::onClientClose, sessionManager); } /** * Creates an <b>asynchronous</b>, <b>session-aware</b> Service Bus receiver responsible for reading {@link * ServiceBusMessage messages} from a specific queue or topic. * * @return An new {@link ServiceBusSessionReceiverAsyncClient} that receives messages from a queue or topic. * @throws IllegalStateException if {@link * topicName} are not set or, both of these fields are set. It is also thrown if the Service Bus {@link * * {@link * * @throws IllegalArgumentException Queue or topic name are not set via {@link * queueName()} or {@link */ public ServiceBusSessionReceiverAsyncClient buildAsyncClient() { return buildAsyncClient(true); } /** * Creates a <b>synchronous</b>, <b>session-aware</b> Service Bus receiver responsible for reading {@link * ServiceBusMessage messages} from a specific queue or topic. * * @return An new {@link ServiceBusReceiverClient} that receives messages from a queue or topic. * @throws IllegalStateException if {@link * topicName} are not set or, both of these fields are set. It is also thrown if the Service Bus {@link * * {@link * * @throws IllegalArgumentException Queue or topic name are not set via {@link * queueName()} or {@link */ private ServiceBusSessionReceiverAsyncClient buildAsyncClient(boolean isAutoCompleteAllowed) { final MessagingEntityType entityType = validateEntityPaths(logger, connectionStringEntityName, topicName, queueName); final String entityPath = getEntityPath(logger, entityType, queueName, topicName, subscriptionName, SubQueue.NONE); if (!isAutoCompleteAllowed && enableAutoComplete) { logger.warning( "'enableAutoComplete' is not supported in synchronous client except through callback receive."); enableAutoComplete = false; } else if (enableAutoComplete && receiveMode == ReceiveMode.RECEIVE_AND_DELETE) { throw logger.logExceptionAsError(new IllegalStateException( "'enableAutoComplete' is not valid for RECEIVE_AND_DELETE mode.")); } if (receiveMode == ReceiveMode.RECEIVE_AND_DELETE) { maxAutoLockRenewDuration = Duration.ZERO; } final ServiceBusConnectionProcessor connectionProcessor = getOrCreateConnectionProcessor(messageSerializer); final ReceiverOptions receiverOptions = new ReceiverOptions(receiveMode, prefetchCount, maxAutoLockRenewDuration, enableAutoComplete, null, maxConcurrentSessions); return new ServiceBusSessionReceiverAsyncClient(connectionProcessor.getFullyQualifiedNamespace(), entityPath, entityType, receiverOptions, connectionProcessor, tracerProvider, messageSerializer, ServiceBusClientBuilder.this::onClientClose); } }
This line can be deleted.
public void createFromString() { String socketString = "Amqpwebsockets"; AmqpTransportType actual = AmqpTransportType.fromString(socketString); actual.toString(); Assertions.assertEquals(AmqpTransportType.AMQP_WEB_SOCKETS, actual); }
actual.toString();
public void createFromString() { String socketString = "Amqpwebsockets"; AmqpTransportType actual = AmqpTransportType.fromString(socketString); Assertions.assertEquals(AmqpTransportType.AMQP_WEB_SOCKETS, actual); }
class AmqpTransportTypeTest { /** * Verifies that we can parse the transport type from string */ @Test /** * Verifies that an exception is thrown when an unknown transport type string is passed. */ @Test public void illegalTransportTypeString() { String socketString = "AmqpNonExistent"; assertThrows(IllegalArgumentException.class, () -> { AmqpTransportType actual = AmqpTransportType.fromString(socketString); }); } }
class AmqpTransportTypeTest { /** * Verifies that we can parse the transport type from string */ @Test /** * Verifies that an exception is thrown when an unknown transport type string is passed. */ @Test public void illegalTransportTypeString() { String socketString = "AmqpNonExistent"; assertThrows(IllegalArgumentException.class, () -> { AmqpTransportType actual = AmqpTransportType.fromString(socketString); }); } }
```suggestion final String lockToken = UUID.randomUUID().toString(); ```
void errorSourceOnReceiveMessage() { final String lockToken1 = UUID.randomUUID().toString(); final OffsetDateTime expiration = OffsetDateTime.now().plus(Duration.ofMinutes(5)); final MessageWithLockToken message = mock(MessageWithLockToken.class); when(messageSerializer.deserialize(message, ServiceBusReceivedMessage.class)).thenReturn(receivedMessage); when(receivedMessage.getLockToken()).thenReturn(lockToken1); when(receivedMessage.getLockedUntil()).thenReturn(expiration); when(connection.createReceiveLink(anyString(), anyString(), any(ReceiveMode.class), any(), any(MessagingEntityType.class))).thenReturn(Mono.error(new AmqpException(false, "some receive link Error.", null))); StepVerifier.create(receiver.receiveMessages().take(1)) .verifyErrorMatches(throwable -> { Assertions.assertTrue(throwable instanceof ServiceBusAmqpException); final ServiceBusErrorSource actual = ((ServiceBusAmqpException) throwable).getErrorSource(); Assertions.assertEquals(ServiceBusErrorSource.RECEIVE, actual); return true; }); verify(amqpReceiveLink, never()).updateDisposition(eq(lockToken1), any(DeliveryState.class)); }
final String lockToken1 = UUID.randomUUID().toString();
void errorSourceOnReceiveMessage() { final String lockToken = UUID.randomUUID().toString(); final OffsetDateTime expiration = OffsetDateTime.now().plus(Duration.ofMinutes(5)); final MessageWithLockToken message = mock(MessageWithLockToken.class); when(messageSerializer.deserialize(message, ServiceBusReceivedMessage.class)).thenReturn(receivedMessage); when(receivedMessage.getLockToken()).thenReturn(lockToken); when(receivedMessage.getLockedUntil()).thenReturn(expiration); when(connection.createReceiveLink(anyString(), anyString(), any(ReceiveMode.class), any(), any(MessagingEntityType.class))).thenReturn(Mono.error(new AmqpException(false, "some receive link Error.", null))); StepVerifier.create(receiver.receiveMessages().take(1)) .verifyErrorSatisfies(throwable -> { Assertions.assertTrue(throwable instanceof ServiceBusAmqpException); final ServiceBusErrorSource actual = ((ServiceBusAmqpException) throwable).getErrorSource(); Assertions.assertEquals(ServiceBusErrorSource.RECEIVE, actual); }); verify(amqpReceiveLink, never()).updateDisposition(eq(lockToken), any(DeliveryState.class)); }
class ServiceBusReceiverAsyncClientTest { private static final ClientOptions CLIENT_OPTIONS = new ClientOptions(); private static final String PAYLOAD = "hello"; private static final byte[] PAYLOAD_BYTES = PAYLOAD.getBytes(UTF_8); private static final int PREFETCH = 5; private static final String NAMESPACE = "my-namespace-foo.net"; private static final String ENTITY_PATH = "queue-name"; private static final MessagingEntityType ENTITY_TYPE = MessagingEntityType.QUEUE; private static final String NAMESPACE_CONNECTION_STRING = String.format( "Endpoint=sb: NAMESPACE, "some-name", "something-else"); private static final Duration CLEANUP_INTERVAL = Duration.ofSeconds(10); private static final String SESSION_ID = "my-session-id"; private final ClientLogger logger = new ClientLogger(ServiceBusReceiverAsyncClientTest.class); private final String messageTrackingUUID = UUID.randomUUID().toString(); private final ReplayProcessor<AmqpEndpointState> endpointProcessor = ReplayProcessor.cacheLast(); private final FluxSink<AmqpEndpointState> endpointSink = endpointProcessor.sink(FluxSink.OverflowStrategy.BUFFER); private final DirectProcessor<Message> messageProcessor = DirectProcessor.create(); private final FluxSink<Message> messageSink = messageProcessor.sink(FluxSink.OverflowStrategy.BUFFER); private ServiceBusConnectionProcessor connectionProcessor; private ServiceBusReceiverAsyncClient receiver; private ServiceBusReceiverAsyncClient sessionReceiver; @Mock private ServiceBusReactorReceiver amqpReceiveLink; @Mock private ServiceBusReactorReceiver sessionReceiveLink; @Mock private ServiceBusAmqpConnection connection; @Mock private TokenCredential tokenCredential; @Mock private MessageSerializer messageSerializer; @Mock private TracerProvider tracerProvider; @Mock private ServiceBusManagementNode managementNode; @Mock private ServiceBusReceivedMessage receivedMessage; @Mock private ServiceBusReceivedMessage receivedMessage2; @Mock private Runnable onClientClose; @BeforeAll static void beforeAll() { StepVerifier.setDefaultTimeout(Duration.ofSeconds(100)); } @AfterAll static void afterAll() { StepVerifier.resetDefaultTimeout(); } @BeforeEach void setup(TestInfo testInfo) { logger.info("[{}] Setting up.", testInfo.getDisplayName()); MockitoAnnotations.initMocks(this); when(amqpReceiveLink.receive()).thenReturn(messageProcessor.publishOn(Schedulers.single())); when(amqpReceiveLink.getEndpointStates()).thenReturn(endpointProcessor); when(sessionReceiveLink.receive()).thenReturn(messageProcessor.publishOn(Schedulers.single())); when(sessionReceiveLink.getEndpointStates()).thenReturn(endpointProcessor); ConnectionOptions connectionOptions = new ConnectionOptions(NAMESPACE, tokenCredential, CbsAuthorizationType.SHARED_ACCESS_SIGNATURE, AmqpTransportType.AMQP, new AmqpRetryOptions(), ProxyOptions.SYSTEM_DEFAULTS, Schedulers.boundedElastic(), CLIENT_OPTIONS, SslDomain.VerifyMode.VERIFY_PEER_NAME); when(connection.getEndpointStates()).thenReturn(endpointProcessor); endpointSink.next(AmqpEndpointState.ACTIVE); when(connection.getManagementNode(ENTITY_PATH, ENTITY_TYPE)) .thenReturn(Mono.just(managementNode)); when(connection.createReceiveLink(anyString(), anyString(), any(ReceiveMode.class), any(), any(MessagingEntityType.class))).thenReturn(Mono.just(amqpReceiveLink)); when(connection.createReceiveLink(anyString(), anyString(), any(ReceiveMode.class), any(), any(MessagingEntityType.class), anyString())).thenReturn(Mono.just(sessionReceiveLink)); connectionProcessor = Flux.<ServiceBusAmqpConnection>create(sink -> sink.next(connection)) .subscribeWith(new ServiceBusConnectionProcessor(connectionOptions.getFullyQualifiedNamespace(), connectionOptions.getRetry())); receiver = new ServiceBusReceiverAsyncClient(NAMESPACE, ENTITY_PATH, MessagingEntityType.QUEUE, new ReceiverOptions(ReceiveMode.PEEK_LOCK, PREFETCH, null, false), connectionProcessor, CLEANUP_INTERVAL, tracerProvider, messageSerializer, onClientClose); sessionReceiver = new ServiceBusReceiverAsyncClient(NAMESPACE, ENTITY_PATH, MessagingEntityType.QUEUE, new ReceiverOptions(ReceiveMode.PEEK_LOCK, PREFETCH, null, false, "Some-Session", false, null), connectionProcessor, CLEANUP_INTERVAL, tracerProvider, messageSerializer, onClientClose); } @AfterEach void teardown(TestInfo testInfo) { logger.info("[{}] Tearing down.", testInfo.getDisplayName()); receiver.close(); Mockito.framework().clearInlineMocks(); } /** * Verifies that when user calls peek more than one time, It returns different object. */ @SuppressWarnings("unchecked") @Test void peekTwoMessages() { final long sequence1 = 10; final long sequence2 = 12; final ArgumentCaptor<Long> captor = ArgumentCaptor.forClass(Long.class); when(receivedMessage.getSequenceNumber()).thenReturn(sequence1); when(receivedMessage2.getSequenceNumber()).thenReturn(sequence2); when(managementNode.peek(anyLong(), isNull(), isNull())) .thenReturn(Mono.just(receivedMessage), Mono.just(receivedMessage2)); StepVerifier.create(receiver.peekMessage()) .expectNext(receivedMessage) .verifyComplete(); StepVerifier.create(receiver.peekMessage()) .expectNext(receivedMessage2) .verifyComplete(); verify(managementNode, times(2)).peek(captor.capture(), isNull(), isNull()); final List<Long> allValues = captor.getAllValues(); Assertions.assertEquals(2, allValues.size()); Assertions.assertTrue(allValues.contains(0L)); Assertions.assertTrue(allValues.contains(11L)); } /** * Verifies that when no messages are returned, that it does not error. */ @Test void peekEmptyEntity() { when(managementNode.peek(0, null, null)) .thenReturn(Mono.empty()); StepVerifier.create(receiver.peekMessage()) .verifyComplete(); } /** * Verifies that this peek one messages from a sequence Number. */ @Test void peekWithSequenceOneMessage() { final int fromSequenceNumber = 10; final ServiceBusReceivedMessage receivedMessage = mock(ServiceBusReceivedMessage.class); when(managementNode.peek(fromSequenceNumber, null, null)).thenReturn(Mono.just(receivedMessage)); StepVerifier.create(receiver.peekMessageAt(fromSequenceNumber)) .expectNext(receivedMessage) .verifyComplete(); } /** * Verifies that this receives a number of messages. Verifies that the initial credits we add are equal to the * prefetch value. */ @Test void receivesNumberOfEvents() { final int numberOfEvents = 1; final List<Message> messages = getMessages(); final String lockToken = UUID.randomUUID().toString(); ServiceBusReceivedMessage receivedMessage = mock(ServiceBusReceivedMessage.class); when(receivedMessage.getLockedUntil()).thenReturn(OffsetDateTime.now()); when(receivedMessage.getLockToken()).thenReturn(lockToken); when(messageSerializer.deserialize(any(Message.class), eq(ServiceBusReceivedMessage.class))) .thenReturn(receivedMessage); StepVerifier.create(receiver.receiveMessages().take(numberOfEvents)) .then(() -> messages.forEach(m -> messageSink.next(m))) .expectNextCount(numberOfEvents) .verifyComplete(); verify(amqpReceiveLink).addCredits(PREFETCH); verify(amqpReceiveLink, never()).updateDisposition(eq(lockToken), any()); } /** * Verifies that we error if we try to settle a message with null transaction-id. */ @ParameterizedTest @EnumSource(DispositionStatus.class) void settleWithNullTransactionId(DispositionStatus dispositionStatus) { ServiceBusTransactionContext nullTransactionId = new ServiceBusTransactionContext(null); when(connection.getManagementNode(ENTITY_PATH, ENTITY_TYPE)).thenReturn(Mono.just(managementNode)); when(managementNode.updateDisposition(any(), eq(dispositionStatus), isNull(), isNull(), isNull(), isNull(), isNull(), isNull())) .thenReturn(Mono.delay(Duration.ofMillis(250)).then()); when(receivedMessage.getLockToken()).thenReturn("mylockToken"); final Mono<Void> operation; switch (dispositionStatus) { case DEFERRED: operation = receiver.defer(receivedMessage, new DeferOptions().setTransactionContext(nullTransactionId)); break; case ABANDONED: operation = receiver.abandon(receivedMessage, new AbandonOptions().setTransactionContext(nullTransactionId)); break; case COMPLETED: operation = receiver.complete(receivedMessage, new CompleteOptions().setTransactionContext(nullTransactionId)); break; case SUSPENDED: operation = receiver.deadLetter(receivedMessage, new DeadLetterOptions().setTransactionContext(nullTransactionId)); break; default: throw new IllegalArgumentException("Unrecognized operation: " + dispositionStatus); } StepVerifier.create(operation) .expectError(NullPointerException.class) .verify(); verify(managementNode, never()).updateDisposition(any(), eq(dispositionStatus), isNull(), isNull(), isNull(), isNull(), isNull(), isNull()); } /** * Verifies that we error if we try to complete a null message. */ @Test void completeNullMessage() { StepVerifier.create(receiver.complete(null)).expectError(NullPointerException.class).verify(); } /** * Verifies that we error if we complete in RECEIVE_AND_DELETE mode. */ @Test void completeInReceiveAndDeleteMode() { final ReceiverOptions options = new ReceiverOptions(ReceiveMode.RECEIVE_AND_DELETE, PREFETCH, null, false); ServiceBusReceiverAsyncClient client = new ServiceBusReceiverAsyncClient(NAMESPACE, ENTITY_PATH, MessagingEntityType.QUEUE, options, connectionProcessor, CLEANUP_INTERVAL, tracerProvider, messageSerializer, onClientClose); final String lockToken1 = UUID.randomUUID().toString(); when(receivedMessage.getLockToken()).thenReturn(lockToken1); try { StepVerifier.create(client.complete(receivedMessage)) .expectError(UnsupportedOperationException.class) .verify(); } finally { client.close(); } } /** * Verifies that this peek batch of messages. */ @Test void peekMessages() { final int numberOfEvents = 2; when(managementNode.peek(0, null, null, numberOfEvents)) .thenReturn(Flux.fromArray(new ServiceBusReceivedMessage[]{receivedMessage, receivedMessage2})); StepVerifier.create(receiver.peekMessages(numberOfEvents)) .expectNextCount(numberOfEvents) .verifyComplete(); } /** * Verifies that this peek batch of messages. */ @Test void peekMessagesEmptyEntity() { final int numberOfEvents = 2; when(managementNode.peek(0, null, null, numberOfEvents)) .thenReturn(Flux.fromIterable(Collections.emptyList())); StepVerifier.create(receiver.peekMessages(numberOfEvents)) .verifyComplete(); } /** * Verifies that this peek batch of messages from a sequence Number. */ @Test void peekBatchWithSequenceNumberMessages() { final int numberOfEvents = 2; final int fromSequenceNumber = 10; when(managementNode.peek(fromSequenceNumber, null, null, numberOfEvents)) .thenReturn(Flux.fromArray(new ServiceBusReceivedMessage[]{receivedMessage, receivedMessage2})); StepVerifier.create(receiver.peekMessagesAt(numberOfEvents, fromSequenceNumber)) .expectNext(receivedMessage, receivedMessage2) .verifyComplete(); } /** * Verifies that we can deadletter a message with an error and description. */ @Test void deadLetterWithDescription() { final String lockToken1 = UUID.randomUUID().toString(); final String description = "some-dead-letter-description"; final String reason = "dead-letter-reason"; final Map<String, Object> propertiesToModify = new HashMap<>(); propertiesToModify.put("something", true); final DeadLetterOptions deadLetterOptions = new DeadLetterOptions().setDeadLetterReason(reason) .setDeadLetterErrorDescription(description) .setPropertiesToModify(propertiesToModify); final OffsetDateTime expiration = OffsetDateTime.now().plus(Duration.ofMinutes(5)); final MessageWithLockToken message = mock(MessageWithLockToken.class); when(messageSerializer.deserialize(message, ServiceBusReceivedMessage.class)).thenReturn(receivedMessage); when(receivedMessage.getLockToken()).thenReturn(lockToken1); when(receivedMessage.getLockedUntil()).thenReturn(expiration); when(amqpReceiveLink.updateDisposition(eq(lockToken1), argThat(e -> e.getType() == DeliveryStateType.Rejected))).thenReturn(Mono.empty()); StepVerifier.create(receiver.receiveMessages() .take(1) .flatMap(context -> receiver.deadLetter(context.getMessage(), deadLetterOptions))) .then(() -> messageSink.next(message)) .expectNext() .verifyComplete(); verify(amqpReceiveLink).updateDisposition(eq(lockToken1), isA(Rejected.class)); } /** * Verifies that error source is populated when any error happened while renewing lock. */ @Test void errorSourceOnRenewMessageLock() { final Duration maxDuration = Duration.ofSeconds(8); final String lockToken = "some-token"; when(receivedMessage.getLockToken()).thenReturn(lockToken); when(managementNode.renewMessageLock(lockToken, null)) .thenReturn(Mono.error(new AmqpException(false, "some error occurred.", null))); StepVerifier.create(receiver.renewMessageLock(receivedMessage, maxDuration)) .verifyErrorMatches(throwable -> { Assertions.assertTrue(throwable instanceof ServiceBusAmqpException); final ServiceBusErrorSource actual = ((ServiceBusAmqpException) throwable).getErrorSource(); Assertions.assertEquals(ServiceBusErrorSource.RENEW_LOCK, actual); return true; }); verify(managementNode, times(1)).renewMessageLock(lockToken, null); } /** * Verifies that error source is populated when any error happened while renewing lock. */ @Test void errorSourceOnSessionLock() { when(managementNode.renewSessionLock(SESSION_ID, null)).thenReturn(Mono.error(new AmqpException(false, "some error occurred.", null))); StepVerifier.create(sessionReceiver.renewSessionLock(SESSION_ID)) .verifyErrorMatches(throwable -> { Assertions.assertTrue(throwable instanceof ServiceBusAmqpException); final ServiceBusErrorSource actual = ((ServiceBusAmqpException) throwable).getErrorSource(); Assertions.assertEquals(ServiceBusErrorSource.RENEW_LOCK, actual); return true; }); } /** * Verifies that error source is populated when there is any error during message settlement. */ @ParameterizedTest @MethodSource void errorSourceOnSettlement(DispositionStatus dispositionStatus, ServiceBusErrorSource expectedErrorSource, DeliveryStateType expectedDeliveryState) { final String lockToken1 = UUID.randomUUID().toString(); final OffsetDateTime expiration = OffsetDateTime.now().plus(Duration.ofMinutes(5)); final MessageWithLockToken message = mock(MessageWithLockToken.class); when(messageSerializer.deserialize(message, ServiceBusReceivedMessage.class)).thenReturn(receivedMessage); when(receivedMessage.getLockToken()).thenReturn(lockToken1); when(receivedMessage.getLockedUntil()).thenReturn(expiration); when(amqpReceiveLink.updateDisposition(eq(lockToken1), argThat(e -> e.getType() == expectedDeliveryState))) .thenReturn(Mono.error(new AmqpException(false, "some error occurred.", null))); StepVerifier.create(receiver.receiveMessages().take(1) .flatMap(context -> { final Mono<Void> operation; switch (dispositionStatus) { case DEFERRED: operation = receiver.defer(receivedMessage); break; case ABANDONED: operation = receiver.abandon(receivedMessage); break; case COMPLETED: operation = receiver.complete(receivedMessage); break; case SUSPENDED: operation = receiver.deadLetter(receivedMessage); break; default: throw new IllegalArgumentException("Unrecognized operation: " + dispositionStatus); } return operation; })) .then(() -> messageSink.next(message)) .expectNext() .verifyErrorMatches(throwable -> { Assertions.assertTrue(throwable instanceof ServiceBusAmqpException); final ServiceBusErrorSource actual = ((ServiceBusAmqpException) throwable).getErrorSource(); Assertions.assertEquals(expectedErrorSource, actual); return true; }); verify(amqpReceiveLink).updateDisposition(eq(lockToken1), any(DeliveryState.class)); } /** * Verifies that error source is populated when there is any error during receiving of message. */ @Test /** * Verifies that the user can complete settlement methods on received message. */ @ParameterizedTest @EnumSource(DispositionStatus.class) void settleMessageOnManagement(DispositionStatus dispositionStatus) { final String lockToken1 = UUID.randomUUID().toString(); final String lockToken2 = UUID.randomUUID().toString(); final OffsetDateTime expiration = OffsetDateTime.now().plus(Duration.ofMinutes(5)); final long sequenceNumber = 10L; final long sequenceNumber2 = 15L; final MessageWithLockToken message = mock(MessageWithLockToken.class); final MessageWithLockToken message2 = mock(MessageWithLockToken.class); when(messageSerializer.deserialize(message, ServiceBusReceivedMessage.class)).thenReturn(receivedMessage); when(messageSerializer.deserialize(message2, ServiceBusReceivedMessage.class)).thenReturn(receivedMessage2); when(receivedMessage.getLockToken()).thenReturn(lockToken1); when(receivedMessage.getLockedUntil()).thenReturn(expiration); when(receivedMessage2.getLockToken()).thenReturn(lockToken2); when(receivedMessage2.getLockedUntil()).thenReturn(expiration); when(connection.getManagementNode(eq(ENTITY_PATH), eq(ENTITY_TYPE))) .thenReturn(Mono.just(managementNode)); when(managementNode.receiveDeferredMessages(eq(ReceiveMode.PEEK_LOCK), isNull(), isNull(), argThat(arg -> { boolean foundFirst = false; boolean foundSecond = false; for (Long seq : arg) { if (!foundFirst && sequenceNumber == seq) { foundFirst = true; } else if (!foundSecond && sequenceNumber2 == seq) { foundSecond = true; } } return foundFirst && foundSecond; }))) .thenReturn(Flux.just(receivedMessage, receivedMessage2)); when(managementNode.updateDisposition(lockToken1, dispositionStatus, null, null, null, null, null, null)) .thenReturn(Mono.empty()); when(managementNode.updateDisposition(lockToken2, dispositionStatus, null, null, null, null, null, null)) .thenReturn(Mono.empty()); StepVerifier.create(receiver.receiveDeferredMessages(Arrays.asList(sequenceNumber, sequenceNumber2))) .expectNext(receivedMessage, receivedMessage2) .verifyComplete(); final Mono<Void> operation; switch (dispositionStatus) { case DEFERRED: operation = receiver.defer(receivedMessage); break; case ABANDONED: operation = receiver.abandon(receivedMessage); break; case COMPLETED: operation = receiver.complete(receivedMessage); break; case SUSPENDED: operation = receiver.deadLetter(receivedMessage); break; default: throw new IllegalArgumentException("Unrecognized operation: " + dispositionStatus); } StepVerifier.create(operation) .verifyComplete(); verify(managementNode).updateDisposition(lockToken1, dispositionStatus, null, null, null, null, null, null); verify(managementNode, never()).updateDisposition(lockToken2, dispositionStatus, null, null, null, null, null, null); } /** * Verifies that this receive deferred one messages from a sequence Number. */ @Test void receiveDeferredWithSequenceOneMessage() { final int fromSequenceNumber = 10; final ServiceBusReceivedMessage receivedMessage = mock(ServiceBusReceivedMessage.class); when(managementNode.receiveDeferredMessages(any(), any(), any(), any())).thenReturn(Flux.just(receivedMessage)); StepVerifier.create(receiver.receiveDeferredMessage(fromSequenceNumber)) .expectNext(receivedMessage) .verifyComplete(); } /** * Verifies that this receive deferred messages from a sequence Number. */ @Test void receiveDeferredBatchFromSequenceNumber() { final long fromSequenceNumber1 = 10; final long fromSequenceNumber2 = 11; when(managementNode.receiveDeferredMessages(any(), any(), any(), any())) .thenReturn(Flux.fromArray(new ServiceBusReceivedMessage[]{receivedMessage, receivedMessage2})); StepVerifier.create(receiver.receiveDeferredMessages(Arrays.asList(fromSequenceNumber1, fromSequenceNumber2))) .expectNext(receivedMessage) .expectNext(receivedMessage2) .verifyComplete(); } /** * Verifies that the onClientClose is called. */ @Test void callsClientClose() { receiver.close(); verify(onClientClose).run(); } /** * Verifies that the onClientClose is only called once. */ @Test void callsClientCloseOnce() { receiver.close(); receiver.close(); verify(onClientClose).run(); } /** * Tests that invalid options throws and null options. */ @Test void receiveIllegalOptions() { ServiceBusReceiverClientBuilder builder = new ServiceBusClientBuilder() .connectionString(NAMESPACE_CONNECTION_STRING) .receiver() .topicName("baz").subscriptionName("bar") .receiveMode(ReceiveMode.PEEK_LOCK); Assertions.assertThrows(IllegalArgumentException.class, () -> builder.prefetchCount(-1)); } @Test void topicCorrectEntityPath() { final String topicName = "foo"; final String subscriptionName = "bar"; final String entityPath = String.join("/", topicName, "subscriptions", subscriptionName); final ServiceBusReceiverAsyncClient receiver = new ServiceBusClientBuilder() .connectionString(NAMESPACE_CONNECTION_STRING) .receiver() .topicName(topicName) .subscriptionName(subscriptionName) .receiveMode(ReceiveMode.PEEK_LOCK) .buildAsyncClient(); final String actual = receiver.getEntityPath(); final String actualNamespace = receiver.getFullyQualifiedNamespace(); Assertions.assertEquals(entityPath, actual); Assertions.assertEquals(NAMESPACE, actualNamespace); } /** * Verifies that client can call multiple receiveMessages on same receiver instance. */ @Test void canPerformMultipleReceive() { final int numberOfEvents = 1; final List<Message> messages = getMessages(); ServiceBusReceivedMessage receivedMessage = mock(ServiceBusReceivedMessage.class); when(receivedMessage.getLockedUntil()).thenReturn(OffsetDateTime.now()); when(receivedMessage.getLockToken()).thenReturn(UUID.randomUUID().toString()); when(messageSerializer.deserialize(any(Message.class), eq(ServiceBusReceivedMessage.class))) .thenReturn(receivedMessage); StepVerifier.create(receiver.receiveMessages().take(numberOfEvents)) .then(() -> messages.forEach(m -> messageSink.next(m))) .expectNextCount(numberOfEvents) .verifyComplete(); StepVerifier.create(receiver.receiveMessages().take(numberOfEvents)) .then(() -> messages.forEach(m -> messageSink.next(m))) .expectNextCount(numberOfEvents) .verifyComplete(); verify(amqpReceiveLink).addCredits(PREFETCH); } /** * Cannot get session state for non-session receiver. */ @Test void cannotPerformGetSessionState() { final String sessionId = "a-session-id"; StepVerifier.create(receiver.getSessionState(sessionId)) .expectError(IllegalStateException.class) .verify(); } /** * Cannot get session state for non-session receiver. */ @Test void cannotPerformSetSessionState() { final String sessionId = "a-session-id"; final byte[] sessionState = new byte[]{10, 11, 8}; StepVerifier.create(receiver.setSessionState(sessionId, sessionState)) .expectError(IllegalStateException.class) .verify(); } /** * Cannot get session state for non-session receiver. */ @Test void cannotPerformRenewSessionLock() { final String sessionId = "a-session-id"; StepVerifier.create(receiver.renewSessionLock(sessionId)) .expectError(IllegalStateException.class) .verify(); } /** * Verifies that we can get a session state. */ @SuppressWarnings("unchecked") @Test void getSessionState() { final byte[] bytes = new byte[]{95, 11, 54, 10}; when(managementNode.getSessionState(SESSION_ID, null)) .thenReturn(Mono.just(bytes), Mono.empty()); StepVerifier.create(sessionReceiver.getSessionState(SESSION_ID)) .expectNext(bytes) .expectComplete() .verify(); } /** * Verifies that we can set a session state. */ @Test void setSessionState() { final byte[] bytes = new byte[]{95, 11, 54, 10}; when(managementNode.setSessionState(SESSION_ID, bytes, null)).thenReturn(Mono.empty()); StepVerifier.create(sessionReceiver.setSessionState(SESSION_ID, bytes)) .expectComplete() .verify(); } /** * Verifies that we can renew a session state. */ @Test void renewSessionLock() { final OffsetDateTime expiry = Instant.ofEpochSecond(1588011761L).atOffset(ZoneOffset.UTC); when(managementNode.renewSessionLock(SESSION_ID, null)).thenReturn(Mono.just(expiry)); StepVerifier.create(sessionReceiver.renewSessionLock(SESSION_ID)) .expectNext(expiry) .expectComplete() .verify(); } /** * Verifies that we cannot renew a message lock when using a session receiver. */ @Test void cannotRenewMessageLockInSession() { when(receivedMessage.getLockToken()).thenReturn("lock-token"); when(receivedMessage.getSessionId()).thenReturn("fo"); StepVerifier.create(sessionReceiver.renewMessageLock(receivedMessage)) .expectError(IllegalStateException.class) .verify(); } /** * Verifies that we can auto-renew a message lock. */ @Test void autoRenewMessageLock() { final Duration maxDuration = Duration.ofSeconds(8); final Duration renewalPeriod = Duration.ofSeconds(3); final String lockToken = "some-token"; final int atMost = 5; final Duration totalSleepPeriod = maxDuration.plusMillis(500); when(receivedMessage.getLockToken()).thenReturn(lockToken); when(managementNode.renewMessageLock(lockToken, null)) .thenReturn(Mono.fromCallable(() -> OffsetDateTime.now().plus(renewalPeriod))); StepVerifier.create(receiver.renewMessageLock(receivedMessage, maxDuration)) .thenAwait(totalSleepPeriod) .then(() -> logger.info("Finished renewals for first sleep.")) .expectComplete() .verify(Duration.ofSeconds(5)); verify(managementNode, Mockito.atMost(atMost)).renewMessageLock(lockToken, null); } /** * Verifies that it errors when we try a null lock token. */ @Test void autoRenewMessageLockErrorNull() { final Duration maxDuration = Duration.ofSeconds(8); final Duration renewalPeriod = Duration.ofSeconds(3); when(receivedMessage.getLockToken()).thenReturn(null); when(managementNode.renewMessageLock(anyString(), isNull())) .thenReturn(Mono.fromCallable(() -> OffsetDateTime.now().plus(renewalPeriod))); StepVerifier.create(receiver.renewMessageLock(receivedMessage, maxDuration)) .expectError(NullPointerException.class) .verify(); verify(managementNode, never()).renewMessageLock(anyString(), isNull()); } /** * Verifies that it errors when we try an empty string lock token. */ @Test void autoRenewMessageLockErrorEmptyString() { final Duration maxDuration = Duration.ofSeconds(8); final Duration renewalPeriod = Duration.ofSeconds(3); final String lockToken = ""; when(receivedMessage.getLockToken()).thenReturn(""); when(managementNode.renewMessageLock(anyString(), isNull())) .thenReturn(Mono.fromCallable(() -> OffsetDateTime.now().plus(renewalPeriod))); StepVerifier.create(receiver.renewMessageLock(receivedMessage, maxDuration)) .expectError(IllegalArgumentException.class) .verify(); verify(managementNode, never()).renewMessageLock(anyString(), isNull()); } /** * Verifies that we can auto-renew a session lock. */ @Test void autoRenewSessionLock() { final Duration maxDuration = Duration.ofSeconds(8); final Duration renewalPeriod = Duration.ofSeconds(3); final String sessionId = "some-token"; final int atMost = 5; final Duration totalSleepPeriod = maxDuration.plusMillis(500); when(managementNode.renewSessionLock(sessionId, null)) .thenReturn(Mono.fromCallable(() -> OffsetDateTime.now().plus(renewalPeriod))); StepVerifier.create(sessionReceiver.renewSessionLock(sessionId, maxDuration)) .thenAwait(totalSleepPeriod) .then(() -> logger.info("Finished renewals for first sleep.")) .expectComplete() .verify(Duration.ofSeconds(5)); verify(managementNode, Mockito.atMost(atMost)).renewSessionLock(sessionId, null); } /** * Verifies that it errors when we try a null lock token. */ @Test void autoRenewSessionLockErrorNull() { final Duration maxDuration = Duration.ofSeconds(8); final Duration renewalPeriod = Duration.ofSeconds(3); when(managementNode.renewSessionLock(anyString(), isNull())) .thenReturn(Mono.fromCallable(() -> OffsetDateTime.now().plus(renewalPeriod))); StepVerifier.create(sessionReceiver.renewSessionLock(null, maxDuration)) .expectError(NullPointerException.class) .verify(); verify(managementNode, never()).renewSessionLock(anyString(), isNull()); } /** * Verifies that it errors when we try an empty string session id */ @Test void autoRenewSessionLockErrorEmptyString() { final Duration maxDuration = Duration.ofSeconds(8); final Duration renewalPeriod = Duration.ofSeconds(3); final String sessionId = ""; when(managementNode.renewSessionLock(anyString(), isNull())) .thenReturn(Mono.fromCallable(() -> OffsetDateTime.now().plus(renewalPeriod))); StepVerifier.create(sessionReceiver.renewSessionLock(sessionId, maxDuration)) .expectError(IllegalArgumentException.class) .verify(); verify(managementNode, never()).renewSessionLock(anyString(), isNull()); } @Test void autoCompleteMessage() { final int numberOfEvents = 3; final List<Message> messages = getMessages(); final String lockToken = UUID.randomUUID().toString(); final ReceiverOptions receiverOptions = new ReceiverOptions(ReceiveMode.PEEK_LOCK, PREFETCH, null, true); final ServiceBusReceiverAsyncClient receiver2 = new ServiceBusReceiverAsyncClient(NAMESPACE, ENTITY_PATH, MessagingEntityType.QUEUE, receiverOptions, connectionProcessor, CLEANUP_INTERVAL, tracerProvider, messageSerializer, onClientClose); when(receivedMessage.getLockToken()).thenReturn(lockToken); when(messageSerializer.deserialize(any(Message.class), eq(ServiceBusReceivedMessage.class))) .thenReturn(receivedMessage); when(amqpReceiveLink.updateDisposition(lockToken, Accepted.getInstance())).thenReturn(Mono.empty()); try { StepVerifier.create(receiver2.receiveMessages().take(numberOfEvents)) .then(() -> messages.forEach(m -> messageSink.next(m))) .expectNextCount(numberOfEvents) .verifyComplete(); } finally { receiver2.close(); } verify(amqpReceiveLink, times(numberOfEvents)).updateDisposition(lockToken, Accepted.getInstance()); } @Test void autoCompleteMessageSessionReceiver() { final int numberOfEvents = 3; final List<Message> messages = getMessages(); final String lockToken = UUID.randomUUID().toString(); final ReceiverOptions receiverOptions = new ReceiverOptions(ReceiveMode.PEEK_LOCK, PREFETCH, null, true, "Some-Session", false, null); final ServiceBusReceiverAsyncClient sessionReceiver2 = new ServiceBusReceiverAsyncClient(NAMESPACE, ENTITY_PATH, MessagingEntityType.QUEUE, receiverOptions, connectionProcessor, CLEANUP_INTERVAL, tracerProvider, messageSerializer, onClientClose); when(receivedMessage.getLockToken()).thenReturn(lockToken); when(messageSerializer.deserialize(any(Message.class), eq(ServiceBusReceivedMessage.class))) .thenReturn(receivedMessage); when(sessionReceiveLink.updateDisposition(lockToken, Accepted.getInstance())).thenReturn(Mono.empty()); try { StepVerifier.create(sessionReceiver2.receiveMessages().take(numberOfEvents)) .then(() -> messages.forEach(m -> messageSink.next(m))) .expectNextCount(numberOfEvents) .verifyComplete(); } finally { sessionReceiver2.close(); } verify(sessionReceiveLink, times(numberOfEvents)).updateDisposition(lockToken, Accepted.getInstance()); } private List<Message> getMessages() { final Map<String, String> map = Collections.singletonMap("SAMPLE_HEADER", "foo"); return IntStream.range(0, 10) .mapToObj(index -> getMessage(PAYLOAD_BYTES, messageTrackingUUID, map)) .collect(Collectors.toList()); } private static Stream<Arguments> errorSourceOnSettlement() { return Stream.of( Arguments.of(DispositionStatus.DEFERRED, ServiceBusErrorSource.DEFER, DeliveryStateType.Modified), Arguments.of(DispositionStatus.COMPLETED, ServiceBusErrorSource.COMPLETE, DeliveryStateType.Accepted), Arguments.of(DispositionStatus.SUSPENDED, ServiceBusErrorSource.DEAD_LETTER, DeliveryStateType.Rejected), Arguments.of(DispositionStatus.ABANDONED, ServiceBusErrorSource.ABANDONED, DeliveryStateType.Modified)); } }
class ServiceBusReceiverAsyncClientTest { private static final ClientOptions CLIENT_OPTIONS = new ClientOptions(); private static final String PAYLOAD = "hello"; private static final byte[] PAYLOAD_BYTES = PAYLOAD.getBytes(UTF_8); private static final int PREFETCH = 5; private static final String NAMESPACE = "my-namespace-foo.net"; private static final String ENTITY_PATH = "queue-name"; private static final MessagingEntityType ENTITY_TYPE = MessagingEntityType.QUEUE; private static final String NAMESPACE_CONNECTION_STRING = String.format( "Endpoint=sb: NAMESPACE, "some-name", "something-else"); private static final Duration CLEANUP_INTERVAL = Duration.ofSeconds(10); private static final String SESSION_ID = "my-session-id"; private final ClientLogger logger = new ClientLogger(ServiceBusReceiverAsyncClientTest.class); private final String messageTrackingUUID = UUID.randomUUID().toString(); private final ReplayProcessor<AmqpEndpointState> endpointProcessor = ReplayProcessor.cacheLast(); private final FluxSink<AmqpEndpointState> endpointSink = endpointProcessor.sink(FluxSink.OverflowStrategy.BUFFER); private final DirectProcessor<Message> messageProcessor = DirectProcessor.create(); private final FluxSink<Message> messageSink = messageProcessor.sink(FluxSink.OverflowStrategy.BUFFER); private ServiceBusConnectionProcessor connectionProcessor; private ServiceBusReceiverAsyncClient receiver; private ServiceBusReceiverAsyncClient sessionReceiver; @Mock private ServiceBusReactorReceiver amqpReceiveLink; @Mock private ServiceBusReactorReceiver sessionReceiveLink; @Mock private ServiceBusAmqpConnection connection; @Mock private TokenCredential tokenCredential; @Mock private MessageSerializer messageSerializer; @Mock private TracerProvider tracerProvider; @Mock private ServiceBusManagementNode managementNode; @Mock private ServiceBusReceivedMessage receivedMessage; @Mock private ServiceBusReceivedMessage receivedMessage2; @Mock private Runnable onClientClose; @BeforeAll static void beforeAll() { StepVerifier.setDefaultTimeout(Duration.ofSeconds(100)); } @AfterAll static void afterAll() { StepVerifier.resetDefaultTimeout(); } @BeforeEach void setup(TestInfo testInfo) { logger.info("[{}] Setting up.", testInfo.getDisplayName()); MockitoAnnotations.initMocks(this); when(amqpReceiveLink.receive()).thenReturn(messageProcessor.publishOn(Schedulers.single())); when(amqpReceiveLink.getEndpointStates()).thenReturn(endpointProcessor); when(sessionReceiveLink.receive()).thenReturn(messageProcessor.publishOn(Schedulers.single())); when(sessionReceiveLink.getEndpointStates()).thenReturn(endpointProcessor); ConnectionOptions connectionOptions = new ConnectionOptions(NAMESPACE, tokenCredential, CbsAuthorizationType.SHARED_ACCESS_SIGNATURE, AmqpTransportType.AMQP, new AmqpRetryOptions(), ProxyOptions.SYSTEM_DEFAULTS, Schedulers.boundedElastic(), CLIENT_OPTIONS, SslDomain.VerifyMode.VERIFY_PEER_NAME); when(connection.getEndpointStates()).thenReturn(endpointProcessor); endpointSink.next(AmqpEndpointState.ACTIVE); when(connection.getManagementNode(ENTITY_PATH, ENTITY_TYPE)) .thenReturn(Mono.just(managementNode)); when(connection.createReceiveLink(anyString(), anyString(), any(ReceiveMode.class), any(), any(MessagingEntityType.class))).thenReturn(Mono.just(amqpReceiveLink)); when(connection.createReceiveLink(anyString(), anyString(), any(ReceiveMode.class), any(), any(MessagingEntityType.class), anyString())).thenReturn(Mono.just(sessionReceiveLink)); connectionProcessor = Flux.<ServiceBusAmqpConnection>create(sink -> sink.next(connection)) .subscribeWith(new ServiceBusConnectionProcessor(connectionOptions.getFullyQualifiedNamespace(), connectionOptions.getRetry())); receiver = new ServiceBusReceiverAsyncClient(NAMESPACE, ENTITY_PATH, MessagingEntityType.QUEUE, new ReceiverOptions(ReceiveMode.PEEK_LOCK, PREFETCH, null, false), connectionProcessor, CLEANUP_INTERVAL, tracerProvider, messageSerializer, onClientClose); sessionReceiver = new ServiceBusReceiverAsyncClient(NAMESPACE, ENTITY_PATH, MessagingEntityType.QUEUE, new ReceiverOptions(ReceiveMode.PEEK_LOCK, PREFETCH, null, false, "Some-Session", null), connectionProcessor, CLEANUP_INTERVAL, tracerProvider, messageSerializer, onClientClose); } @AfterEach void teardown(TestInfo testInfo) { logger.info("[{}] Tearing down.", testInfo.getDisplayName()); receiver.close(); Mockito.framework().clearInlineMocks(); } /** * Verifies that when user calls peek more than one time, It returns different object. */ @SuppressWarnings("unchecked") @Test void peekTwoMessages() { final long sequence1 = 10; final long sequence2 = 12; final ArgumentCaptor<Long> captor = ArgumentCaptor.forClass(Long.class); when(receivedMessage.getSequenceNumber()).thenReturn(sequence1); when(receivedMessage2.getSequenceNumber()).thenReturn(sequence2); when(managementNode.peek(anyLong(), isNull(), isNull())) .thenReturn(Mono.just(receivedMessage), Mono.just(receivedMessage2)); StepVerifier.create(receiver.peekMessage()) .expectNext(receivedMessage) .verifyComplete(); StepVerifier.create(receiver.peekMessage()) .expectNext(receivedMessage2) .verifyComplete(); verify(managementNode, times(2)).peek(captor.capture(), isNull(), isNull()); final List<Long> allValues = captor.getAllValues(); Assertions.assertEquals(2, allValues.size()); Assertions.assertTrue(allValues.contains(0L)); Assertions.assertTrue(allValues.contains(11L)); } /** * Verifies that when no messages are returned, that it does not error. */ @Test void peekEmptyEntity() { when(managementNode.peek(0, null, null)) .thenReturn(Mono.empty()); StepVerifier.create(receiver.peekMessage()) .verifyComplete(); } /** * Verifies that this peek one messages from a sequence Number. */ @Test void peekWithSequenceOneMessage() { final int fromSequenceNumber = 10; final ServiceBusReceivedMessage receivedMessage = mock(ServiceBusReceivedMessage.class); when(managementNode.peek(fromSequenceNumber, null, null)).thenReturn(Mono.just(receivedMessage)); StepVerifier.create(receiver.peekMessageAt(fromSequenceNumber)) .expectNext(receivedMessage) .verifyComplete(); } /** * Verifies that this receives a number of messages. Verifies that the initial credits we add are equal to the * prefetch value. */ @Test void receivesNumberOfEvents() { final int numberOfEvents = 1; final List<Message> messages = getMessages(); final String lockToken = UUID.randomUUID().toString(); ServiceBusReceivedMessage receivedMessage = mock(ServiceBusReceivedMessage.class); when(receivedMessage.getLockedUntil()).thenReturn(OffsetDateTime.now()); when(receivedMessage.getLockToken()).thenReturn(lockToken); when(messageSerializer.deserialize(any(Message.class), eq(ServiceBusReceivedMessage.class))) .thenReturn(receivedMessage); StepVerifier.create(receiver.receiveMessages().take(numberOfEvents)) .then(() -> messages.forEach(m -> messageSink.next(m))) .expectNextCount(numberOfEvents) .verifyComplete(); verify(amqpReceiveLink).addCredits(PREFETCH); verify(amqpReceiveLink, never()).updateDisposition(eq(lockToken), any()); } /** * Verifies that we error if we try to settle a message with null transaction-id. */ @ParameterizedTest @EnumSource(DispositionStatus.class) void settleWithNullTransactionId(DispositionStatus dispositionStatus) { ServiceBusTransactionContext nullTransactionId = new ServiceBusTransactionContext(null); when(connection.getManagementNode(ENTITY_PATH, ENTITY_TYPE)).thenReturn(Mono.just(managementNode)); when(managementNode.updateDisposition(any(), eq(dispositionStatus), isNull(), isNull(), isNull(), isNull(), isNull(), isNull())) .thenReturn(Mono.delay(Duration.ofMillis(250)).then()); when(receivedMessage.getLockToken()).thenReturn("mylockToken"); final Mono<Void> operation; switch (dispositionStatus) { case DEFERRED: operation = receiver.defer(receivedMessage, new DeferOptions().setTransactionContext(nullTransactionId)); break; case ABANDONED: operation = receiver.abandon(receivedMessage, new AbandonOptions().setTransactionContext(nullTransactionId)); break; case COMPLETED: operation = receiver.complete(receivedMessage, new CompleteOptions().setTransactionContext(nullTransactionId)); break; case SUSPENDED: operation = receiver.deadLetter(receivedMessage, new DeadLetterOptions().setTransactionContext(nullTransactionId)); break; default: throw new IllegalArgumentException("Unrecognized operation: " + dispositionStatus); } StepVerifier.create(operation) .expectError(NullPointerException.class) .verify(); verify(managementNode, never()).updateDisposition(any(), eq(dispositionStatus), isNull(), isNull(), isNull(), isNull(), isNull(), isNull()); } /** * Verifies that we error if we try to complete a null message. */ @Test void completeNullMessage() { StepVerifier.create(receiver.complete(null)).expectError(NullPointerException.class).verify(); } /** * Verifies that we error if we complete in RECEIVE_AND_DELETE mode. */ @Test void completeInReceiveAndDeleteMode() { final ReceiverOptions options = new ReceiverOptions(ReceiveMode.RECEIVE_AND_DELETE, PREFETCH, null, false); ServiceBusReceiverAsyncClient client = new ServiceBusReceiverAsyncClient(NAMESPACE, ENTITY_PATH, MessagingEntityType.QUEUE, options, connectionProcessor, CLEANUP_INTERVAL, tracerProvider, messageSerializer, onClientClose); final String lockToken1 = UUID.randomUUID().toString(); when(receivedMessage.getLockToken()).thenReturn(lockToken1); try { StepVerifier.create(client.complete(receivedMessage)) .expectError(UnsupportedOperationException.class) .verify(); } finally { client.close(); } } /** * Verifies that this peek batch of messages. */ @Test void peekMessages() { final int numberOfEvents = 2; when(managementNode.peek(0, null, null, numberOfEvents)) .thenReturn(Flux.fromArray(new ServiceBusReceivedMessage[]{receivedMessage, receivedMessage2})); StepVerifier.create(receiver.peekMessages(numberOfEvents)) .expectNextCount(numberOfEvents) .verifyComplete(); } /** * Verifies that this peek batch of messages. */ @Test void peekMessagesEmptyEntity() { final int numberOfEvents = 2; when(managementNode.peek(0, null, null, numberOfEvents)) .thenReturn(Flux.fromIterable(Collections.emptyList())); StepVerifier.create(receiver.peekMessages(numberOfEvents)) .verifyComplete(); } /** * Verifies that this peek batch of messages from a sequence Number. */ @Test void peekBatchWithSequenceNumberMessages() { final int numberOfEvents = 2; final int fromSequenceNumber = 10; when(managementNode.peek(fromSequenceNumber, null, null, numberOfEvents)) .thenReturn(Flux.fromArray(new ServiceBusReceivedMessage[]{receivedMessage, receivedMessage2})); StepVerifier.create(receiver.peekMessagesAt(numberOfEvents, fromSequenceNumber)) .expectNext(receivedMessage, receivedMessage2) .verifyComplete(); } /** * Verifies that we can deadletter a message with an error and description. */ @Test void deadLetterWithDescription() { final String lockToken1 = UUID.randomUUID().toString(); final String description = "some-dead-letter-description"; final String reason = "dead-letter-reason"; final Map<String, Object> propertiesToModify = new HashMap<>(); propertiesToModify.put("something", true); final DeadLetterOptions deadLetterOptions = new DeadLetterOptions().setDeadLetterReason(reason) .setDeadLetterErrorDescription(description) .setPropertiesToModify(propertiesToModify); final OffsetDateTime expiration = OffsetDateTime.now().plus(Duration.ofMinutes(5)); final MessageWithLockToken message = mock(MessageWithLockToken.class); when(messageSerializer.deserialize(message, ServiceBusReceivedMessage.class)).thenReturn(receivedMessage); when(receivedMessage.getLockToken()).thenReturn(lockToken1); when(receivedMessage.getLockedUntil()).thenReturn(expiration); when(amqpReceiveLink.updateDisposition(eq(lockToken1), argThat(e -> e.getType() == DeliveryStateType.Rejected))).thenReturn(Mono.empty()); StepVerifier.create(receiver.receiveMessages() .take(1) .flatMap(context -> receiver.deadLetter(context.getMessage(), deadLetterOptions))) .then(() -> messageSink.next(message)) .expectNext() .verifyComplete(); verify(amqpReceiveLink).updateDisposition(eq(lockToken1), isA(Rejected.class)); } /** * Verifies that error source is populated when any error happened while renewing lock. */ @Test void errorSourceOnRenewMessageLock() { final Duration maxDuration = Duration.ofSeconds(8); final String lockToken = "some-token"; when(receivedMessage.getLockToken()).thenReturn(lockToken); when(managementNode.renewMessageLock(lockToken, null)) .thenReturn(Mono.error(new AmqpException(false, "some error occurred.", null))); StepVerifier.create(receiver.renewMessageLock(receivedMessage, maxDuration)) .verifyErrorSatisfies(throwable -> { Assertions.assertTrue(throwable instanceof ServiceBusAmqpException); final ServiceBusErrorSource actual = ((ServiceBusAmqpException) throwable).getErrorSource(); Assertions.assertEquals(ServiceBusErrorSource.RENEW_LOCK, actual); }); verify(managementNode, times(1)).renewMessageLock(lockToken, null); } /** * Verifies that error source is populated when any error happened while renewing lock. */ @Test void errorSourceOnSessionLock() { when(managementNode.renewSessionLock(SESSION_ID, null)).thenReturn(Mono.error(new AmqpException(false, "some error occurred.", null))); StepVerifier.create(sessionReceiver.renewSessionLock(SESSION_ID)) .verifyErrorSatisfies(throwable -> { Assertions.assertTrue(throwable instanceof ServiceBusAmqpException); final ServiceBusErrorSource actual = ((ServiceBusAmqpException) throwable).getErrorSource(); Assertions.assertEquals(ServiceBusErrorSource.RENEW_LOCK, actual); }); } /** * Verifies that error source is not populated when there is no autoComplete. Because user wanted to settle on their * own, we do not need to populate ErrorSource. */ @ParameterizedTest @MethodSource void errorSourceNoneOnSettlement(DispositionStatus dispositionStatus, DeliveryStateType expectedDeliveryState) { final UUID lockTokenUuid = UUID.randomUUID(); final String lockToken1 = lockTokenUuid.toString(); final MessageWithLockToken message = mock(MessageWithLockToken.class); when(receivedMessage.getLockToken()).thenReturn(lockToken1); when(messageSerializer.deserialize(message, ServiceBusReceivedMessage.class)).thenReturn(receivedMessage); when(amqpReceiveLink.updateDisposition(eq(lockToken1), argThat(e -> e.getType() == expectedDeliveryState))) .thenReturn(Mono.error(new AmqpException(false, "some error occurred.", null))); StepVerifier.create(receiver.receiveMessages().take(1) .flatMap(context -> { final Mono<Void> operation; switch (dispositionStatus) { case ABANDONED: operation = receiver.abandon(context.getMessage()); break; case COMPLETED: operation = receiver.complete(context.getMessage()); break; default: throw new IllegalArgumentException("Unrecognized operation: " + dispositionStatus); } return operation; }) ) .then(() -> messageSink.next(message)) .expectNext() .verifyErrorSatisfies(throwable -> { Assertions.assertFalse(throwable instanceof ServiceBusAmqpException); Assertions.assertTrue(throwable instanceof AmqpException); }); verify(amqpReceiveLink).updateDisposition(eq(lockToken1), any(DeliveryState.class)); } /** * Ensure that we throw right error source when there is any issue during autocomplete. Error source should be * {@link ServiceBusErrorSource */ @Test void errorSourceAutoCompleteMessage() { final int numberOfEvents = 2; final int messagesToReceive = 1; final List<Message> messages = getMessages(); final String lockToken = UUID.randomUUID().toString(); final ReceiverOptions receiverOptions = new ReceiverOptions(ReceiveMode.PEEK_LOCK, PREFETCH, null, true); final ServiceBusReceiverAsyncClient receiver2 = new ServiceBusReceiverAsyncClient(NAMESPACE, ENTITY_PATH, MessagingEntityType.QUEUE, receiverOptions, connectionProcessor, CLEANUP_INTERVAL, tracerProvider, messageSerializer, onClientClose); when(receivedMessage.getLockToken()).thenReturn(lockToken); when(messageSerializer.deserialize(any(Message.class), eq(ServiceBusReceivedMessage.class))) .thenReturn(receivedMessage); when(amqpReceiveLink.updateDisposition(lockToken, Accepted.getInstance())).thenReturn(Mono.error(new AmqpException(false, "some error occurred.", null))); try { StepVerifier.create(receiver2.receiveMessages().take(numberOfEvents)) .then(() -> messages.forEach(m -> messageSink.next(m))) .expectNextCount(messagesToReceive) .verifyErrorSatisfies(throwable -> { Assertions.assertTrue(throwable instanceof ServiceBusAmqpException); final ServiceBusErrorSource actual = ((ServiceBusAmqpException) throwable).getErrorSource(); Assertions.assertEquals(ServiceBusErrorSource.COMPLETE, actual); }); } finally { receiver2.close(); } verify(amqpReceiveLink, atLeast(messagesToReceive)).updateDisposition(lockToken, Accepted.getInstance()); } /** * Verifies that error source is populated when there is any error during receiving of message. */ @Test /** * Verifies that the user can complete settlement methods on received message. */ @ParameterizedTest @EnumSource(DispositionStatus.class) void settleMessageOnManagement(DispositionStatus dispositionStatus) { final String lockToken1 = UUID.randomUUID().toString(); final String lockToken2 = UUID.randomUUID().toString(); final OffsetDateTime expiration = OffsetDateTime.now().plus(Duration.ofMinutes(5)); final long sequenceNumber = 10L; final long sequenceNumber2 = 15L; final MessageWithLockToken message = mock(MessageWithLockToken.class); final MessageWithLockToken message2 = mock(MessageWithLockToken.class); when(messageSerializer.deserialize(message, ServiceBusReceivedMessage.class)).thenReturn(receivedMessage); when(messageSerializer.deserialize(message2, ServiceBusReceivedMessage.class)).thenReturn(receivedMessage2); when(receivedMessage.getLockToken()).thenReturn(lockToken1); when(receivedMessage.getLockedUntil()).thenReturn(expiration); when(receivedMessage2.getLockToken()).thenReturn(lockToken2); when(receivedMessage2.getLockedUntil()).thenReturn(expiration); when(connection.getManagementNode(eq(ENTITY_PATH), eq(ENTITY_TYPE))) .thenReturn(Mono.just(managementNode)); when(managementNode.receiveDeferredMessages(eq(ReceiveMode.PEEK_LOCK), isNull(), isNull(), argThat(arg -> { boolean foundFirst = false; boolean foundSecond = false; for (Long seq : arg) { if (!foundFirst && sequenceNumber == seq) { foundFirst = true; } else if (!foundSecond && sequenceNumber2 == seq) { foundSecond = true; } } return foundFirst && foundSecond; }))) .thenReturn(Flux.just(receivedMessage, receivedMessage2)); when(managementNode.updateDisposition(lockToken1, dispositionStatus, null, null, null, null, null, null)) .thenReturn(Mono.empty()); when(managementNode.updateDisposition(lockToken2, dispositionStatus, null, null, null, null, null, null)) .thenReturn(Mono.empty()); StepVerifier.create(receiver.receiveDeferredMessages(Arrays.asList(sequenceNumber, sequenceNumber2))) .expectNext(receivedMessage, receivedMessage2) .verifyComplete(); final Mono<Void> operation; switch (dispositionStatus) { case DEFERRED: operation = receiver.defer(receivedMessage); break; case ABANDONED: operation = receiver.abandon(receivedMessage); break; case COMPLETED: operation = receiver.complete(receivedMessage); break; case SUSPENDED: operation = receiver.deadLetter(receivedMessage); break; default: throw new IllegalArgumentException("Unrecognized operation: " + dispositionStatus); } StepVerifier.create(operation) .verifyComplete(); verify(managementNode).updateDisposition(lockToken1, dispositionStatus, null, null, null, null, null, null); verify(managementNode, never()).updateDisposition(lockToken2, dispositionStatus, null, null, null, null, null, null); } /** * Verifies that this receive deferred one messages from a sequence Number. */ @Test void receiveDeferredWithSequenceOneMessage() { final int fromSequenceNumber = 10; final ServiceBusReceivedMessage receivedMessage = mock(ServiceBusReceivedMessage.class); when(managementNode.receiveDeferredMessages(any(), any(), any(), any())).thenReturn(Flux.just(receivedMessage)); StepVerifier.create(receiver.receiveDeferredMessage(fromSequenceNumber)) .expectNext(receivedMessage) .verifyComplete(); } /** * Verifies that this receive deferred messages from a sequence Number. */ @Test void receiveDeferredBatchFromSequenceNumber() { final long fromSequenceNumber1 = 10; final long fromSequenceNumber2 = 11; when(managementNode.receiveDeferredMessages(any(), any(), any(), any())) .thenReturn(Flux.fromArray(new ServiceBusReceivedMessage[]{receivedMessage, receivedMessage2})); StepVerifier.create(receiver.receiveDeferredMessages(Arrays.asList(fromSequenceNumber1, fromSequenceNumber2))) .expectNext(receivedMessage) .expectNext(receivedMessage2) .verifyComplete(); } /** * Verifies that the onClientClose is called. */ @Test void callsClientClose() { receiver.close(); verify(onClientClose).run(); } /** * Verifies that the onClientClose is only called once. */ @Test void callsClientCloseOnce() { receiver.close(); receiver.close(); verify(onClientClose).run(); } /** * Tests that invalid options throws and null options. */ @Test void receiveIllegalOptions() { ServiceBusReceiverClientBuilder builder = new ServiceBusClientBuilder() .connectionString(NAMESPACE_CONNECTION_STRING) .receiver() .topicName("baz").subscriptionName("bar") .receiveMode(ReceiveMode.PEEK_LOCK); Assertions.assertThrows(IllegalArgumentException.class, () -> builder.prefetchCount(-1)); } @Test void topicCorrectEntityPath() { final String topicName = "foo"; final String subscriptionName = "bar"; final String entityPath = String.join("/", topicName, "subscriptions", subscriptionName); final ServiceBusReceiverAsyncClient receiver = new ServiceBusClientBuilder() .connectionString(NAMESPACE_CONNECTION_STRING) .receiver() .topicName(topicName) .subscriptionName(subscriptionName) .receiveMode(ReceiveMode.PEEK_LOCK) .buildAsyncClient(); final String actual = receiver.getEntityPath(); final String actualNamespace = receiver.getFullyQualifiedNamespace(); Assertions.assertEquals(entityPath, actual); Assertions.assertEquals(NAMESPACE, actualNamespace); } /** * Verifies that client can call multiple receiveMessages on same receiver instance. */ @Test void canPerformMultipleReceive() { final int numberOfEvents = 1; final List<Message> messages = getMessages(); ServiceBusReceivedMessage receivedMessage = mock(ServiceBusReceivedMessage.class); when(receivedMessage.getLockedUntil()).thenReturn(OffsetDateTime.now()); when(receivedMessage.getLockToken()).thenReturn(UUID.randomUUID().toString()); when(messageSerializer.deserialize(any(Message.class), eq(ServiceBusReceivedMessage.class))) .thenReturn(receivedMessage); StepVerifier.create(receiver.receiveMessages().take(numberOfEvents)) .then(() -> messages.forEach(m -> messageSink.next(m))) .expectNextCount(numberOfEvents) .verifyComplete(); StepVerifier.create(receiver.receiveMessages().take(numberOfEvents)) .then(() -> messages.forEach(m -> messageSink.next(m))) .expectNextCount(numberOfEvents) .verifyComplete(); verify(amqpReceiveLink).addCredits(PREFETCH); } /** * Cannot get session state for non-session receiver. */ @Test void cannotPerformGetSessionState() { final String sessionId = "a-session-id"; StepVerifier.create(receiver.getSessionState(sessionId)) .expectError(IllegalStateException.class) .verify(); } /** * Cannot get session state for non-session receiver. */ @Test void cannotPerformSetSessionState() { final String sessionId = "a-session-id"; final byte[] sessionState = new byte[]{10, 11, 8}; StepVerifier.create(receiver.setSessionState(sessionId, sessionState)) .expectError(IllegalStateException.class) .verify(); } /** * Cannot get session state for non-session receiver. */ @Test void cannotPerformRenewSessionLock() { final String sessionId = "a-session-id"; StepVerifier.create(receiver.renewSessionLock(sessionId)) .expectError(IllegalStateException.class) .verify(); } /** * Verifies that we can get a session state. */ @SuppressWarnings("unchecked") @Test void getSessionState() { final byte[] bytes = new byte[]{95, 11, 54, 10}; when(managementNode.getSessionState(SESSION_ID, null)) .thenReturn(Mono.just(bytes), Mono.empty()); StepVerifier.create(sessionReceiver.getSessionState(SESSION_ID)) .expectNext(bytes) .expectComplete() .verify(); } /** * Verifies that we can set a session state. */ @Test void setSessionState() { final byte[] bytes = new byte[]{95, 11, 54, 10}; when(managementNode.setSessionState(SESSION_ID, bytes, null)).thenReturn(Mono.empty()); StepVerifier.create(sessionReceiver.setSessionState(SESSION_ID, bytes)) .expectComplete() .verify(); } /** * Verifies that we can renew a session state. */ @Test void renewSessionLock() { final OffsetDateTime expiry = Instant.ofEpochSecond(1588011761L).atOffset(ZoneOffset.UTC); when(managementNode.renewSessionLock(SESSION_ID, null)).thenReturn(Mono.just(expiry)); StepVerifier.create(sessionReceiver.renewSessionLock(SESSION_ID)) .expectNext(expiry) .expectComplete() .verify(); } /** * Verifies that we cannot renew a message lock when using a session receiver. */ @Test void cannotRenewMessageLockInSession() { when(receivedMessage.getLockToken()).thenReturn("lock-token"); when(receivedMessage.getSessionId()).thenReturn("fo"); StepVerifier.create(sessionReceiver.renewMessageLock(receivedMessage)) .expectError(IllegalStateException.class) .verify(); } /** * Verifies that we can auto-renew a message lock. */ @Test void autoRenewMessageLock() { final Duration maxDuration = Duration.ofSeconds(8); final Duration renewalPeriod = Duration.ofSeconds(3); final String lockToken = "some-token"; final int atMost = 5; final Duration totalSleepPeriod = maxDuration.plusMillis(500); when(receivedMessage.getLockToken()).thenReturn(lockToken); when(managementNode.renewMessageLock(lockToken, null)) .thenReturn(Mono.fromCallable(() -> OffsetDateTime.now().plus(renewalPeriod))); StepVerifier.create(receiver.renewMessageLock(receivedMessage, maxDuration)) .thenAwait(totalSleepPeriod) .then(() -> logger.info("Finished renewals for first sleep.")) .expectComplete() .verify(Duration.ofSeconds(5)); verify(managementNode, Mockito.atMost(atMost)).renewMessageLock(lockToken, null); } /** * Verifies that it errors when we try a null lock token. */ @Test void autoRenewMessageLockErrorNull() { final Duration maxDuration = Duration.ofSeconds(8); final Duration renewalPeriod = Duration.ofSeconds(3); when(receivedMessage.getLockToken()).thenReturn(null); when(managementNode.renewMessageLock(anyString(), isNull())) .thenReturn(Mono.fromCallable(() -> OffsetDateTime.now().plus(renewalPeriod))); StepVerifier.create(receiver.renewMessageLock(receivedMessage, maxDuration)) .expectError(NullPointerException.class) .verify(); verify(managementNode, never()).renewMessageLock(anyString(), isNull()); } /** * Verifies that it errors when we try an empty string lock token. */ @Test void autoRenewMessageLockErrorEmptyString() { final Duration maxDuration = Duration.ofSeconds(8); final Duration renewalPeriod = Duration.ofSeconds(3); final String lockToken = ""; when(receivedMessage.getLockToken()).thenReturn(""); when(managementNode.renewMessageLock(anyString(), isNull())) .thenReturn(Mono.fromCallable(() -> OffsetDateTime.now().plus(renewalPeriod))); StepVerifier.create(receiver.renewMessageLock(receivedMessage, maxDuration)) .expectError(IllegalArgumentException.class) .verify(); verify(managementNode, never()).renewMessageLock(anyString(), isNull()); } /** * Verifies that we can auto-renew a session lock. */ @Test void autoRenewSessionLock() { final Duration maxDuration = Duration.ofSeconds(8); final Duration renewalPeriod = Duration.ofSeconds(3); final String sessionId = "some-token"; final int atMost = 5; final Duration totalSleepPeriod = maxDuration.plusMillis(500); when(managementNode.renewSessionLock(sessionId, null)) .thenReturn(Mono.fromCallable(() -> OffsetDateTime.now().plus(renewalPeriod))); StepVerifier.create(sessionReceiver.renewSessionLock(sessionId, maxDuration)) .thenAwait(totalSleepPeriod) .then(() -> logger.info("Finished renewals for first sleep.")) .expectComplete() .verify(Duration.ofSeconds(5)); verify(managementNode, Mockito.atMost(atMost)).renewSessionLock(sessionId, null); } /** * Verifies that it errors when we try a null lock token. */ @Test void autoRenewSessionLockErrorNull() { final Duration maxDuration = Duration.ofSeconds(8); final Duration renewalPeriod = Duration.ofSeconds(3); when(managementNode.renewSessionLock(anyString(), isNull())) .thenReturn(Mono.fromCallable(() -> OffsetDateTime.now().plus(renewalPeriod))); StepVerifier.create(sessionReceiver.renewSessionLock(null, maxDuration)) .expectError(NullPointerException.class) .verify(); verify(managementNode, never()).renewSessionLock(anyString(), isNull()); } /** * Verifies that it errors when we try an empty string session id */ @Test void autoRenewSessionLockErrorEmptyString() { final Duration maxDuration = Duration.ofSeconds(8); final Duration renewalPeriod = Duration.ofSeconds(3); final String sessionId = ""; when(managementNode.renewSessionLock(anyString(), isNull())) .thenReturn(Mono.fromCallable(() -> OffsetDateTime.now().plus(renewalPeriod))); StepVerifier.create(sessionReceiver.renewSessionLock(sessionId, maxDuration)) .expectError(IllegalArgumentException.class) .verify(); verify(managementNode, never()).renewSessionLock(anyString(), isNull()); } @Test void autoCompleteMessage() { final int numberOfEvents = 3; final List<Message> messages = getMessages(); final String lockToken = UUID.randomUUID().toString(); final ReceiverOptions receiverOptions = new ReceiverOptions(ReceiveMode.PEEK_LOCK, PREFETCH, null, true); final ServiceBusReceiverAsyncClient receiver2 = new ServiceBusReceiverAsyncClient(NAMESPACE, ENTITY_PATH, MessagingEntityType.QUEUE, receiverOptions, connectionProcessor, CLEANUP_INTERVAL, tracerProvider, messageSerializer, onClientClose); when(receivedMessage.getLockToken()).thenReturn(lockToken); when(messageSerializer.deserialize(any(Message.class), eq(ServiceBusReceivedMessage.class))) .thenReturn(receivedMessage); when(amqpReceiveLink.updateDisposition(lockToken, Accepted.getInstance())).thenReturn(Mono.empty()); try { StepVerifier.create(receiver2.receiveMessages().take(numberOfEvents)) .then(() -> messages.forEach(m -> messageSink.next(m))) .expectNextCount(numberOfEvents) .verifyComplete(); } finally { receiver2.close(); } verify(amqpReceiveLink, times(numberOfEvents)).updateDisposition(lockToken, Accepted.getInstance()); } @Test void autoCompleteMessageSessionReceiver() { final int numberOfEvents = 3; final List<Message> messages = getMessages(); final String lockToken = UUID.randomUUID().toString(); final ReceiverOptions receiverOptions = new ReceiverOptions(ReceiveMode.PEEK_LOCK, PREFETCH, null, true, "Some-Session", null); final ServiceBusReceiverAsyncClient sessionReceiver2 = new ServiceBusReceiverAsyncClient(NAMESPACE, ENTITY_PATH, MessagingEntityType.QUEUE, receiverOptions, connectionProcessor, CLEANUP_INTERVAL, tracerProvider, messageSerializer, onClientClose); when(receivedMessage.getLockToken()).thenReturn(lockToken); when(messageSerializer.deserialize(any(Message.class), eq(ServiceBusReceivedMessage.class))) .thenReturn(receivedMessage); when(sessionReceiveLink.updateDisposition(lockToken, Accepted.getInstance())).thenReturn(Mono.empty()); try { StepVerifier.create(sessionReceiver2.receiveMessages().take(numberOfEvents)) .then(() -> messages.forEach(m -> messageSink.next(m))) .expectNextCount(numberOfEvents) .verifyComplete(); } finally { sessionReceiver2.close(); } verify(sessionReceiveLink, times(numberOfEvents)).updateDisposition(lockToken, Accepted.getInstance()); } private List<Message> getMessages() { final Map<String, String> map = Collections.singletonMap("SAMPLE_HEADER", "foo"); return IntStream.range(0, 10) .mapToObj(index -> getMessage(PAYLOAD_BYTES, messageTrackingUUID, map)) .collect(Collectors.toList()); } private static Stream<Arguments> errorSourceNoneOnSettlement() { return Stream.of( Arguments.of(DispositionStatus.COMPLETED, DeliveryStateType.Accepted), Arguments.of(DispositionStatus.ABANDONED, DeliveryStateType.Modified)); } }
Can we also have tests for error source `UNKNOWN`?
void errorSourceOnReceiveMessage() { final String lockToken1 = UUID.randomUUID().toString(); final OffsetDateTime expiration = OffsetDateTime.now().plus(Duration.ofMinutes(5)); final MessageWithLockToken message = mock(MessageWithLockToken.class); when(messageSerializer.deserialize(message, ServiceBusReceivedMessage.class)).thenReturn(receivedMessage); when(receivedMessage.getLockToken()).thenReturn(lockToken1); when(receivedMessage.getLockedUntil()).thenReturn(expiration); when(connection.createReceiveLink(anyString(), anyString(), any(ReceiveMode.class), any(), any(MessagingEntityType.class))).thenReturn(Mono.error(new AmqpException(false, "some receive link Error.", null))); StepVerifier.create(receiver.receiveMessages().take(1)) .verifyErrorMatches(throwable -> { Assertions.assertTrue(throwable instanceof ServiceBusAmqpException); final ServiceBusErrorSource actual = ((ServiceBusAmqpException) throwable).getErrorSource(); Assertions.assertEquals(ServiceBusErrorSource.RECEIVE, actual); return true; }); verify(amqpReceiveLink, never()).updateDisposition(eq(lockToken1), any(DeliveryState.class)); }
Assertions.assertEquals(ServiceBusErrorSource.RECEIVE, actual);
void errorSourceOnReceiveMessage() { final String lockToken = UUID.randomUUID().toString(); final OffsetDateTime expiration = OffsetDateTime.now().plus(Duration.ofMinutes(5)); final MessageWithLockToken message = mock(MessageWithLockToken.class); when(messageSerializer.deserialize(message, ServiceBusReceivedMessage.class)).thenReturn(receivedMessage); when(receivedMessage.getLockToken()).thenReturn(lockToken); when(receivedMessage.getLockedUntil()).thenReturn(expiration); when(connection.createReceiveLink(anyString(), anyString(), any(ReceiveMode.class), any(), any(MessagingEntityType.class))).thenReturn(Mono.error(new AmqpException(false, "some receive link Error.", null))); StepVerifier.create(receiver.receiveMessages().take(1)) .verifyErrorSatisfies(throwable -> { Assertions.assertTrue(throwable instanceof ServiceBusAmqpException); final ServiceBusErrorSource actual = ((ServiceBusAmqpException) throwable).getErrorSource(); Assertions.assertEquals(ServiceBusErrorSource.RECEIVE, actual); }); verify(amqpReceiveLink, never()).updateDisposition(eq(lockToken), any(DeliveryState.class)); }
class ServiceBusReceiverAsyncClientTest { private static final ClientOptions CLIENT_OPTIONS = new ClientOptions(); private static final String PAYLOAD = "hello"; private static final byte[] PAYLOAD_BYTES = PAYLOAD.getBytes(UTF_8); private static final int PREFETCH = 5; private static final String NAMESPACE = "my-namespace-foo.net"; private static final String ENTITY_PATH = "queue-name"; private static final MessagingEntityType ENTITY_TYPE = MessagingEntityType.QUEUE; private static final String NAMESPACE_CONNECTION_STRING = String.format( "Endpoint=sb: NAMESPACE, "some-name", "something-else"); private static final Duration CLEANUP_INTERVAL = Duration.ofSeconds(10); private static final String SESSION_ID = "my-session-id"; private final ClientLogger logger = new ClientLogger(ServiceBusReceiverAsyncClientTest.class); private final String messageTrackingUUID = UUID.randomUUID().toString(); private final ReplayProcessor<AmqpEndpointState> endpointProcessor = ReplayProcessor.cacheLast(); private final FluxSink<AmqpEndpointState> endpointSink = endpointProcessor.sink(FluxSink.OverflowStrategy.BUFFER); private final DirectProcessor<Message> messageProcessor = DirectProcessor.create(); private final FluxSink<Message> messageSink = messageProcessor.sink(FluxSink.OverflowStrategy.BUFFER); private ServiceBusConnectionProcessor connectionProcessor; private ServiceBusReceiverAsyncClient receiver; private ServiceBusReceiverAsyncClient sessionReceiver; @Mock private ServiceBusReactorReceiver amqpReceiveLink; @Mock private ServiceBusReactorReceiver sessionReceiveLink; @Mock private ServiceBusAmqpConnection connection; @Mock private TokenCredential tokenCredential; @Mock private MessageSerializer messageSerializer; @Mock private TracerProvider tracerProvider; @Mock private ServiceBusManagementNode managementNode; @Mock private ServiceBusReceivedMessage receivedMessage; @Mock private ServiceBusReceivedMessage receivedMessage2; @Mock private Runnable onClientClose; @BeforeAll static void beforeAll() { StepVerifier.setDefaultTimeout(Duration.ofSeconds(100)); } @AfterAll static void afterAll() { StepVerifier.resetDefaultTimeout(); } @BeforeEach void setup(TestInfo testInfo) { logger.info("[{}] Setting up.", testInfo.getDisplayName()); MockitoAnnotations.initMocks(this); when(amqpReceiveLink.receive()).thenReturn(messageProcessor.publishOn(Schedulers.single())); when(amqpReceiveLink.getEndpointStates()).thenReturn(endpointProcessor); when(sessionReceiveLink.receive()).thenReturn(messageProcessor.publishOn(Schedulers.single())); when(sessionReceiveLink.getEndpointStates()).thenReturn(endpointProcessor); ConnectionOptions connectionOptions = new ConnectionOptions(NAMESPACE, tokenCredential, CbsAuthorizationType.SHARED_ACCESS_SIGNATURE, AmqpTransportType.AMQP, new AmqpRetryOptions(), ProxyOptions.SYSTEM_DEFAULTS, Schedulers.boundedElastic(), CLIENT_OPTIONS, SslDomain.VerifyMode.VERIFY_PEER_NAME); when(connection.getEndpointStates()).thenReturn(endpointProcessor); endpointSink.next(AmqpEndpointState.ACTIVE); when(connection.getManagementNode(ENTITY_PATH, ENTITY_TYPE)) .thenReturn(Mono.just(managementNode)); when(connection.createReceiveLink(anyString(), anyString(), any(ReceiveMode.class), any(), any(MessagingEntityType.class))).thenReturn(Mono.just(amqpReceiveLink)); when(connection.createReceiveLink(anyString(), anyString(), any(ReceiveMode.class), any(), any(MessagingEntityType.class), anyString())).thenReturn(Mono.just(sessionReceiveLink)); connectionProcessor = Flux.<ServiceBusAmqpConnection>create(sink -> sink.next(connection)) .subscribeWith(new ServiceBusConnectionProcessor(connectionOptions.getFullyQualifiedNamespace(), connectionOptions.getRetry())); receiver = new ServiceBusReceiverAsyncClient(NAMESPACE, ENTITY_PATH, MessagingEntityType.QUEUE, new ReceiverOptions(ReceiveMode.PEEK_LOCK, PREFETCH, null, false), connectionProcessor, CLEANUP_INTERVAL, tracerProvider, messageSerializer, onClientClose); sessionReceiver = new ServiceBusReceiverAsyncClient(NAMESPACE, ENTITY_PATH, MessagingEntityType.QUEUE, new ReceiverOptions(ReceiveMode.PEEK_LOCK, PREFETCH, null, false, "Some-Session", false, null), connectionProcessor, CLEANUP_INTERVAL, tracerProvider, messageSerializer, onClientClose); } @AfterEach void teardown(TestInfo testInfo) { logger.info("[{}] Tearing down.", testInfo.getDisplayName()); receiver.close(); Mockito.framework().clearInlineMocks(); } /** * Verifies that when user calls peek more than one time, It returns different object. */ @SuppressWarnings("unchecked") @Test void peekTwoMessages() { final long sequence1 = 10; final long sequence2 = 12; final ArgumentCaptor<Long> captor = ArgumentCaptor.forClass(Long.class); when(receivedMessage.getSequenceNumber()).thenReturn(sequence1); when(receivedMessage2.getSequenceNumber()).thenReturn(sequence2); when(managementNode.peek(anyLong(), isNull(), isNull())) .thenReturn(Mono.just(receivedMessage), Mono.just(receivedMessage2)); StepVerifier.create(receiver.peekMessage()) .expectNext(receivedMessage) .verifyComplete(); StepVerifier.create(receiver.peekMessage()) .expectNext(receivedMessage2) .verifyComplete(); verify(managementNode, times(2)).peek(captor.capture(), isNull(), isNull()); final List<Long> allValues = captor.getAllValues(); Assertions.assertEquals(2, allValues.size()); Assertions.assertTrue(allValues.contains(0L)); Assertions.assertTrue(allValues.contains(11L)); } /** * Verifies that when no messages are returned, that it does not error. */ @Test void peekEmptyEntity() { when(managementNode.peek(0, null, null)) .thenReturn(Mono.empty()); StepVerifier.create(receiver.peekMessage()) .verifyComplete(); } /** * Verifies that this peek one messages from a sequence Number. */ @Test void peekWithSequenceOneMessage() { final int fromSequenceNumber = 10; final ServiceBusReceivedMessage receivedMessage = mock(ServiceBusReceivedMessage.class); when(managementNode.peek(fromSequenceNumber, null, null)).thenReturn(Mono.just(receivedMessage)); StepVerifier.create(receiver.peekMessageAt(fromSequenceNumber)) .expectNext(receivedMessage) .verifyComplete(); } /** * Verifies that this receives a number of messages. Verifies that the initial credits we add are equal to the * prefetch value. */ @Test void receivesNumberOfEvents() { final int numberOfEvents = 1; final List<Message> messages = getMessages(); final String lockToken = UUID.randomUUID().toString(); ServiceBusReceivedMessage receivedMessage = mock(ServiceBusReceivedMessage.class); when(receivedMessage.getLockedUntil()).thenReturn(OffsetDateTime.now()); when(receivedMessage.getLockToken()).thenReturn(lockToken); when(messageSerializer.deserialize(any(Message.class), eq(ServiceBusReceivedMessage.class))) .thenReturn(receivedMessage); StepVerifier.create(receiver.receiveMessages().take(numberOfEvents)) .then(() -> messages.forEach(m -> messageSink.next(m))) .expectNextCount(numberOfEvents) .verifyComplete(); verify(amqpReceiveLink).addCredits(PREFETCH); verify(amqpReceiveLink, never()).updateDisposition(eq(lockToken), any()); } /** * Verifies that we error if we try to settle a message with null transaction-id. */ @ParameterizedTest @EnumSource(DispositionStatus.class) void settleWithNullTransactionId(DispositionStatus dispositionStatus) { ServiceBusTransactionContext nullTransactionId = new ServiceBusTransactionContext(null); when(connection.getManagementNode(ENTITY_PATH, ENTITY_TYPE)).thenReturn(Mono.just(managementNode)); when(managementNode.updateDisposition(any(), eq(dispositionStatus), isNull(), isNull(), isNull(), isNull(), isNull(), isNull())) .thenReturn(Mono.delay(Duration.ofMillis(250)).then()); when(receivedMessage.getLockToken()).thenReturn("mylockToken"); final Mono<Void> operation; switch (dispositionStatus) { case DEFERRED: operation = receiver.defer(receivedMessage, new DeferOptions().setTransactionContext(nullTransactionId)); break; case ABANDONED: operation = receiver.abandon(receivedMessage, new AbandonOptions().setTransactionContext(nullTransactionId)); break; case COMPLETED: operation = receiver.complete(receivedMessage, new CompleteOptions().setTransactionContext(nullTransactionId)); break; case SUSPENDED: operation = receiver.deadLetter(receivedMessage, new DeadLetterOptions().setTransactionContext(nullTransactionId)); break; default: throw new IllegalArgumentException("Unrecognized operation: " + dispositionStatus); } StepVerifier.create(operation) .expectError(NullPointerException.class) .verify(); verify(managementNode, never()).updateDisposition(any(), eq(dispositionStatus), isNull(), isNull(), isNull(), isNull(), isNull(), isNull()); } /** * Verifies that we error if we try to complete a null message. */ @Test void completeNullMessage() { StepVerifier.create(receiver.complete(null)).expectError(NullPointerException.class).verify(); } /** * Verifies that we error if we complete in RECEIVE_AND_DELETE mode. */ @Test void completeInReceiveAndDeleteMode() { final ReceiverOptions options = new ReceiverOptions(ReceiveMode.RECEIVE_AND_DELETE, PREFETCH, null, false); ServiceBusReceiverAsyncClient client = new ServiceBusReceiverAsyncClient(NAMESPACE, ENTITY_PATH, MessagingEntityType.QUEUE, options, connectionProcessor, CLEANUP_INTERVAL, tracerProvider, messageSerializer, onClientClose); final String lockToken1 = UUID.randomUUID().toString(); when(receivedMessage.getLockToken()).thenReturn(lockToken1); try { StepVerifier.create(client.complete(receivedMessage)) .expectError(UnsupportedOperationException.class) .verify(); } finally { client.close(); } } /** * Verifies that this peek batch of messages. */ @Test void peekMessages() { final int numberOfEvents = 2; when(managementNode.peek(0, null, null, numberOfEvents)) .thenReturn(Flux.fromArray(new ServiceBusReceivedMessage[]{receivedMessage, receivedMessage2})); StepVerifier.create(receiver.peekMessages(numberOfEvents)) .expectNextCount(numberOfEvents) .verifyComplete(); } /** * Verifies that this peek batch of messages. */ @Test void peekMessagesEmptyEntity() { final int numberOfEvents = 2; when(managementNode.peek(0, null, null, numberOfEvents)) .thenReturn(Flux.fromIterable(Collections.emptyList())); StepVerifier.create(receiver.peekMessages(numberOfEvents)) .verifyComplete(); } /** * Verifies that this peek batch of messages from a sequence Number. */ @Test void peekBatchWithSequenceNumberMessages() { final int numberOfEvents = 2; final int fromSequenceNumber = 10; when(managementNode.peek(fromSequenceNumber, null, null, numberOfEvents)) .thenReturn(Flux.fromArray(new ServiceBusReceivedMessage[]{receivedMessage, receivedMessage2})); StepVerifier.create(receiver.peekMessagesAt(numberOfEvents, fromSequenceNumber)) .expectNext(receivedMessage, receivedMessage2) .verifyComplete(); } /** * Verifies that we can deadletter a message with an error and description. */ @Test void deadLetterWithDescription() { final String lockToken1 = UUID.randomUUID().toString(); final String description = "some-dead-letter-description"; final String reason = "dead-letter-reason"; final Map<String, Object> propertiesToModify = new HashMap<>(); propertiesToModify.put("something", true); final DeadLetterOptions deadLetterOptions = new DeadLetterOptions().setDeadLetterReason(reason) .setDeadLetterErrorDescription(description) .setPropertiesToModify(propertiesToModify); final OffsetDateTime expiration = OffsetDateTime.now().plus(Duration.ofMinutes(5)); final MessageWithLockToken message = mock(MessageWithLockToken.class); when(messageSerializer.deserialize(message, ServiceBusReceivedMessage.class)).thenReturn(receivedMessage); when(receivedMessage.getLockToken()).thenReturn(lockToken1); when(receivedMessage.getLockedUntil()).thenReturn(expiration); when(amqpReceiveLink.updateDisposition(eq(lockToken1), argThat(e -> e.getType() == DeliveryStateType.Rejected))).thenReturn(Mono.empty()); StepVerifier.create(receiver.receiveMessages() .take(1) .flatMap(context -> receiver.deadLetter(context.getMessage(), deadLetterOptions))) .then(() -> messageSink.next(message)) .expectNext() .verifyComplete(); verify(amqpReceiveLink).updateDisposition(eq(lockToken1), isA(Rejected.class)); } /** * Verifies that error source is populated when any error happened while renewing lock. */ @Test void errorSourceOnRenewMessageLock() { final Duration maxDuration = Duration.ofSeconds(8); final String lockToken = "some-token"; when(receivedMessage.getLockToken()).thenReturn(lockToken); when(managementNode.renewMessageLock(lockToken, null)) .thenReturn(Mono.error(new AmqpException(false, "some error occurred.", null))); StepVerifier.create(receiver.renewMessageLock(receivedMessage, maxDuration)) .verifyErrorMatches(throwable -> { Assertions.assertTrue(throwable instanceof ServiceBusAmqpException); final ServiceBusErrorSource actual = ((ServiceBusAmqpException) throwable).getErrorSource(); Assertions.assertEquals(ServiceBusErrorSource.RENEW_LOCK, actual); return true; }); verify(managementNode, times(1)).renewMessageLock(lockToken, null); } /** * Verifies that error source is populated when any error happened while renewing lock. */ @Test void errorSourceOnSessionLock() { when(managementNode.renewSessionLock(SESSION_ID, null)).thenReturn(Mono.error(new AmqpException(false, "some error occurred.", null))); StepVerifier.create(sessionReceiver.renewSessionLock(SESSION_ID)) .verifyErrorMatches(throwable -> { Assertions.assertTrue(throwable instanceof ServiceBusAmqpException); final ServiceBusErrorSource actual = ((ServiceBusAmqpException) throwable).getErrorSource(); Assertions.assertEquals(ServiceBusErrorSource.RENEW_LOCK, actual); return true; }); } /** * Verifies that error source is populated when there is any error during message settlement. */ @ParameterizedTest @MethodSource void errorSourceOnSettlement(DispositionStatus dispositionStatus, ServiceBusErrorSource expectedErrorSource, DeliveryStateType expectedDeliveryState) { final String lockToken1 = UUID.randomUUID().toString(); final OffsetDateTime expiration = OffsetDateTime.now().plus(Duration.ofMinutes(5)); final MessageWithLockToken message = mock(MessageWithLockToken.class); when(messageSerializer.deserialize(message, ServiceBusReceivedMessage.class)).thenReturn(receivedMessage); when(receivedMessage.getLockToken()).thenReturn(lockToken1); when(receivedMessage.getLockedUntil()).thenReturn(expiration); when(amqpReceiveLink.updateDisposition(eq(lockToken1), argThat(e -> e.getType() == expectedDeliveryState))) .thenReturn(Mono.error(new AmqpException(false, "some error occurred.", null))); StepVerifier.create(receiver.receiveMessages().take(1) .flatMap(context -> { final Mono<Void> operation; switch (dispositionStatus) { case DEFERRED: operation = receiver.defer(receivedMessage); break; case ABANDONED: operation = receiver.abandon(receivedMessage); break; case COMPLETED: operation = receiver.complete(receivedMessage); break; case SUSPENDED: operation = receiver.deadLetter(receivedMessage); break; default: throw new IllegalArgumentException("Unrecognized operation: " + dispositionStatus); } return operation; })) .then(() -> messageSink.next(message)) .expectNext() .verifyErrorMatches(throwable -> { Assertions.assertTrue(throwable instanceof ServiceBusAmqpException); final ServiceBusErrorSource actual = ((ServiceBusAmqpException) throwable).getErrorSource(); Assertions.assertEquals(expectedErrorSource, actual); return true; }); verify(amqpReceiveLink).updateDisposition(eq(lockToken1), any(DeliveryState.class)); } /** * Verifies that error source is populated when there is any error during receiving of message. */ @Test /** * Verifies that the user can complete settlement methods on received message. */ @ParameterizedTest @EnumSource(DispositionStatus.class) void settleMessageOnManagement(DispositionStatus dispositionStatus) { final String lockToken1 = UUID.randomUUID().toString(); final String lockToken2 = UUID.randomUUID().toString(); final OffsetDateTime expiration = OffsetDateTime.now().plus(Duration.ofMinutes(5)); final long sequenceNumber = 10L; final long sequenceNumber2 = 15L; final MessageWithLockToken message = mock(MessageWithLockToken.class); final MessageWithLockToken message2 = mock(MessageWithLockToken.class); when(messageSerializer.deserialize(message, ServiceBusReceivedMessage.class)).thenReturn(receivedMessage); when(messageSerializer.deserialize(message2, ServiceBusReceivedMessage.class)).thenReturn(receivedMessage2); when(receivedMessage.getLockToken()).thenReturn(lockToken1); when(receivedMessage.getLockedUntil()).thenReturn(expiration); when(receivedMessage2.getLockToken()).thenReturn(lockToken2); when(receivedMessage2.getLockedUntil()).thenReturn(expiration); when(connection.getManagementNode(eq(ENTITY_PATH), eq(ENTITY_TYPE))) .thenReturn(Mono.just(managementNode)); when(managementNode.receiveDeferredMessages(eq(ReceiveMode.PEEK_LOCK), isNull(), isNull(), argThat(arg -> { boolean foundFirst = false; boolean foundSecond = false; for (Long seq : arg) { if (!foundFirst && sequenceNumber == seq) { foundFirst = true; } else if (!foundSecond && sequenceNumber2 == seq) { foundSecond = true; } } return foundFirst && foundSecond; }))) .thenReturn(Flux.just(receivedMessage, receivedMessage2)); when(managementNode.updateDisposition(lockToken1, dispositionStatus, null, null, null, null, null, null)) .thenReturn(Mono.empty()); when(managementNode.updateDisposition(lockToken2, dispositionStatus, null, null, null, null, null, null)) .thenReturn(Mono.empty()); StepVerifier.create(receiver.receiveDeferredMessages(Arrays.asList(sequenceNumber, sequenceNumber2))) .expectNext(receivedMessage, receivedMessage2) .verifyComplete(); final Mono<Void> operation; switch (dispositionStatus) { case DEFERRED: operation = receiver.defer(receivedMessage); break; case ABANDONED: operation = receiver.abandon(receivedMessage); break; case COMPLETED: operation = receiver.complete(receivedMessage); break; case SUSPENDED: operation = receiver.deadLetter(receivedMessage); break; default: throw new IllegalArgumentException("Unrecognized operation: " + dispositionStatus); } StepVerifier.create(operation) .verifyComplete(); verify(managementNode).updateDisposition(lockToken1, dispositionStatus, null, null, null, null, null, null); verify(managementNode, never()).updateDisposition(lockToken2, dispositionStatus, null, null, null, null, null, null); } /** * Verifies that this receive deferred one messages from a sequence Number. */ @Test void receiveDeferredWithSequenceOneMessage() { final int fromSequenceNumber = 10; final ServiceBusReceivedMessage receivedMessage = mock(ServiceBusReceivedMessage.class); when(managementNode.receiveDeferredMessages(any(), any(), any(), any())).thenReturn(Flux.just(receivedMessage)); StepVerifier.create(receiver.receiveDeferredMessage(fromSequenceNumber)) .expectNext(receivedMessage) .verifyComplete(); } /** * Verifies that this receive deferred messages from a sequence Number. */ @Test void receiveDeferredBatchFromSequenceNumber() { final long fromSequenceNumber1 = 10; final long fromSequenceNumber2 = 11; when(managementNode.receiveDeferredMessages(any(), any(), any(), any())) .thenReturn(Flux.fromArray(new ServiceBusReceivedMessage[]{receivedMessage, receivedMessage2})); StepVerifier.create(receiver.receiveDeferredMessages(Arrays.asList(fromSequenceNumber1, fromSequenceNumber2))) .expectNext(receivedMessage) .expectNext(receivedMessage2) .verifyComplete(); } /** * Verifies that the onClientClose is called. */ @Test void callsClientClose() { receiver.close(); verify(onClientClose).run(); } /** * Verifies that the onClientClose is only called once. */ @Test void callsClientCloseOnce() { receiver.close(); receiver.close(); verify(onClientClose).run(); } /** * Tests that invalid options throws and null options. */ @Test void receiveIllegalOptions() { ServiceBusReceiverClientBuilder builder = new ServiceBusClientBuilder() .connectionString(NAMESPACE_CONNECTION_STRING) .receiver() .topicName("baz").subscriptionName("bar") .receiveMode(ReceiveMode.PEEK_LOCK); Assertions.assertThrows(IllegalArgumentException.class, () -> builder.prefetchCount(-1)); } @Test void topicCorrectEntityPath() { final String topicName = "foo"; final String subscriptionName = "bar"; final String entityPath = String.join("/", topicName, "subscriptions", subscriptionName); final ServiceBusReceiverAsyncClient receiver = new ServiceBusClientBuilder() .connectionString(NAMESPACE_CONNECTION_STRING) .receiver() .topicName(topicName) .subscriptionName(subscriptionName) .receiveMode(ReceiveMode.PEEK_LOCK) .buildAsyncClient(); final String actual = receiver.getEntityPath(); final String actualNamespace = receiver.getFullyQualifiedNamespace(); Assertions.assertEquals(entityPath, actual); Assertions.assertEquals(NAMESPACE, actualNamespace); } /** * Verifies that client can call multiple receiveMessages on same receiver instance. */ @Test void canPerformMultipleReceive() { final int numberOfEvents = 1; final List<Message> messages = getMessages(); ServiceBusReceivedMessage receivedMessage = mock(ServiceBusReceivedMessage.class); when(receivedMessage.getLockedUntil()).thenReturn(OffsetDateTime.now()); when(receivedMessage.getLockToken()).thenReturn(UUID.randomUUID().toString()); when(messageSerializer.deserialize(any(Message.class), eq(ServiceBusReceivedMessage.class))) .thenReturn(receivedMessage); StepVerifier.create(receiver.receiveMessages().take(numberOfEvents)) .then(() -> messages.forEach(m -> messageSink.next(m))) .expectNextCount(numberOfEvents) .verifyComplete(); StepVerifier.create(receiver.receiveMessages().take(numberOfEvents)) .then(() -> messages.forEach(m -> messageSink.next(m))) .expectNextCount(numberOfEvents) .verifyComplete(); verify(amqpReceiveLink).addCredits(PREFETCH); } /** * Cannot get session state for non-session receiver. */ @Test void cannotPerformGetSessionState() { final String sessionId = "a-session-id"; StepVerifier.create(receiver.getSessionState(sessionId)) .expectError(IllegalStateException.class) .verify(); } /** * Cannot get session state for non-session receiver. */ @Test void cannotPerformSetSessionState() { final String sessionId = "a-session-id"; final byte[] sessionState = new byte[]{10, 11, 8}; StepVerifier.create(receiver.setSessionState(sessionId, sessionState)) .expectError(IllegalStateException.class) .verify(); } /** * Cannot get session state for non-session receiver. */ @Test void cannotPerformRenewSessionLock() { final String sessionId = "a-session-id"; StepVerifier.create(receiver.renewSessionLock(sessionId)) .expectError(IllegalStateException.class) .verify(); } /** * Verifies that we can get a session state. */ @SuppressWarnings("unchecked") @Test void getSessionState() { final byte[] bytes = new byte[]{95, 11, 54, 10}; when(managementNode.getSessionState(SESSION_ID, null)) .thenReturn(Mono.just(bytes), Mono.empty()); StepVerifier.create(sessionReceiver.getSessionState(SESSION_ID)) .expectNext(bytes) .expectComplete() .verify(); } /** * Verifies that we can set a session state. */ @Test void setSessionState() { final byte[] bytes = new byte[]{95, 11, 54, 10}; when(managementNode.setSessionState(SESSION_ID, bytes, null)).thenReturn(Mono.empty()); StepVerifier.create(sessionReceiver.setSessionState(SESSION_ID, bytes)) .expectComplete() .verify(); } /** * Verifies that we can renew a session state. */ @Test void renewSessionLock() { final OffsetDateTime expiry = Instant.ofEpochSecond(1588011761L).atOffset(ZoneOffset.UTC); when(managementNode.renewSessionLock(SESSION_ID, null)).thenReturn(Mono.just(expiry)); StepVerifier.create(sessionReceiver.renewSessionLock(SESSION_ID)) .expectNext(expiry) .expectComplete() .verify(); } /** * Verifies that we cannot renew a message lock when using a session receiver. */ @Test void cannotRenewMessageLockInSession() { when(receivedMessage.getLockToken()).thenReturn("lock-token"); when(receivedMessage.getSessionId()).thenReturn("fo"); StepVerifier.create(sessionReceiver.renewMessageLock(receivedMessage)) .expectError(IllegalStateException.class) .verify(); } /** * Verifies that we can auto-renew a message lock. */ @Test void autoRenewMessageLock() { final Duration maxDuration = Duration.ofSeconds(8); final Duration renewalPeriod = Duration.ofSeconds(3); final String lockToken = "some-token"; final int atMost = 5; final Duration totalSleepPeriod = maxDuration.plusMillis(500); when(receivedMessage.getLockToken()).thenReturn(lockToken); when(managementNode.renewMessageLock(lockToken, null)) .thenReturn(Mono.fromCallable(() -> OffsetDateTime.now().plus(renewalPeriod))); StepVerifier.create(receiver.renewMessageLock(receivedMessage, maxDuration)) .thenAwait(totalSleepPeriod) .then(() -> logger.info("Finished renewals for first sleep.")) .expectComplete() .verify(Duration.ofSeconds(5)); verify(managementNode, Mockito.atMost(atMost)).renewMessageLock(lockToken, null); } /** * Verifies that it errors when we try a null lock token. */ @Test void autoRenewMessageLockErrorNull() { final Duration maxDuration = Duration.ofSeconds(8); final Duration renewalPeriod = Duration.ofSeconds(3); when(receivedMessage.getLockToken()).thenReturn(null); when(managementNode.renewMessageLock(anyString(), isNull())) .thenReturn(Mono.fromCallable(() -> OffsetDateTime.now().plus(renewalPeriod))); StepVerifier.create(receiver.renewMessageLock(receivedMessage, maxDuration)) .expectError(NullPointerException.class) .verify(); verify(managementNode, never()).renewMessageLock(anyString(), isNull()); } /** * Verifies that it errors when we try an empty string lock token. */ @Test void autoRenewMessageLockErrorEmptyString() { final Duration maxDuration = Duration.ofSeconds(8); final Duration renewalPeriod = Duration.ofSeconds(3); final String lockToken = ""; when(receivedMessage.getLockToken()).thenReturn(""); when(managementNode.renewMessageLock(anyString(), isNull())) .thenReturn(Mono.fromCallable(() -> OffsetDateTime.now().plus(renewalPeriod))); StepVerifier.create(receiver.renewMessageLock(receivedMessage, maxDuration)) .expectError(IllegalArgumentException.class) .verify(); verify(managementNode, never()).renewMessageLock(anyString(), isNull()); } /** * Verifies that we can auto-renew a session lock. */ @Test void autoRenewSessionLock() { final Duration maxDuration = Duration.ofSeconds(8); final Duration renewalPeriod = Duration.ofSeconds(3); final String sessionId = "some-token"; final int atMost = 5; final Duration totalSleepPeriod = maxDuration.plusMillis(500); when(managementNode.renewSessionLock(sessionId, null)) .thenReturn(Mono.fromCallable(() -> OffsetDateTime.now().plus(renewalPeriod))); StepVerifier.create(sessionReceiver.renewSessionLock(sessionId, maxDuration)) .thenAwait(totalSleepPeriod) .then(() -> logger.info("Finished renewals for first sleep.")) .expectComplete() .verify(Duration.ofSeconds(5)); verify(managementNode, Mockito.atMost(atMost)).renewSessionLock(sessionId, null); } /** * Verifies that it errors when we try a null lock token. */ @Test void autoRenewSessionLockErrorNull() { final Duration maxDuration = Duration.ofSeconds(8); final Duration renewalPeriod = Duration.ofSeconds(3); when(managementNode.renewSessionLock(anyString(), isNull())) .thenReturn(Mono.fromCallable(() -> OffsetDateTime.now().plus(renewalPeriod))); StepVerifier.create(sessionReceiver.renewSessionLock(null, maxDuration)) .expectError(NullPointerException.class) .verify(); verify(managementNode, never()).renewSessionLock(anyString(), isNull()); } /** * Verifies that it errors when we try an empty string session id */ @Test void autoRenewSessionLockErrorEmptyString() { final Duration maxDuration = Duration.ofSeconds(8); final Duration renewalPeriod = Duration.ofSeconds(3); final String sessionId = ""; when(managementNode.renewSessionLock(anyString(), isNull())) .thenReturn(Mono.fromCallable(() -> OffsetDateTime.now().plus(renewalPeriod))); StepVerifier.create(sessionReceiver.renewSessionLock(sessionId, maxDuration)) .expectError(IllegalArgumentException.class) .verify(); verify(managementNode, never()).renewSessionLock(anyString(), isNull()); } @Test void autoCompleteMessage() { final int numberOfEvents = 3; final List<Message> messages = getMessages(); final String lockToken = UUID.randomUUID().toString(); final ReceiverOptions receiverOptions = new ReceiverOptions(ReceiveMode.PEEK_LOCK, PREFETCH, null, true); final ServiceBusReceiverAsyncClient receiver2 = new ServiceBusReceiverAsyncClient(NAMESPACE, ENTITY_PATH, MessagingEntityType.QUEUE, receiverOptions, connectionProcessor, CLEANUP_INTERVAL, tracerProvider, messageSerializer, onClientClose); when(receivedMessage.getLockToken()).thenReturn(lockToken); when(messageSerializer.deserialize(any(Message.class), eq(ServiceBusReceivedMessage.class))) .thenReturn(receivedMessage); when(amqpReceiveLink.updateDisposition(lockToken, Accepted.getInstance())).thenReturn(Mono.empty()); try { StepVerifier.create(receiver2.receiveMessages().take(numberOfEvents)) .then(() -> messages.forEach(m -> messageSink.next(m))) .expectNextCount(numberOfEvents) .verifyComplete(); } finally { receiver2.close(); } verify(amqpReceiveLink, times(numberOfEvents)).updateDisposition(lockToken, Accepted.getInstance()); } @Test void autoCompleteMessageSessionReceiver() { final int numberOfEvents = 3; final List<Message> messages = getMessages(); final String lockToken = UUID.randomUUID().toString(); final ReceiverOptions receiverOptions = new ReceiverOptions(ReceiveMode.PEEK_LOCK, PREFETCH, null, true, "Some-Session", false, null); final ServiceBusReceiverAsyncClient sessionReceiver2 = new ServiceBusReceiverAsyncClient(NAMESPACE, ENTITY_PATH, MessagingEntityType.QUEUE, receiverOptions, connectionProcessor, CLEANUP_INTERVAL, tracerProvider, messageSerializer, onClientClose); when(receivedMessage.getLockToken()).thenReturn(lockToken); when(messageSerializer.deserialize(any(Message.class), eq(ServiceBusReceivedMessage.class))) .thenReturn(receivedMessage); when(sessionReceiveLink.updateDisposition(lockToken, Accepted.getInstance())).thenReturn(Mono.empty()); try { StepVerifier.create(sessionReceiver2.receiveMessages().take(numberOfEvents)) .then(() -> messages.forEach(m -> messageSink.next(m))) .expectNextCount(numberOfEvents) .verifyComplete(); } finally { sessionReceiver2.close(); } verify(sessionReceiveLink, times(numberOfEvents)).updateDisposition(lockToken, Accepted.getInstance()); } private List<Message> getMessages() { final Map<String, String> map = Collections.singletonMap("SAMPLE_HEADER", "foo"); return IntStream.range(0, 10) .mapToObj(index -> getMessage(PAYLOAD_BYTES, messageTrackingUUID, map)) .collect(Collectors.toList()); } private static Stream<Arguments> errorSourceOnSettlement() { return Stream.of( Arguments.of(DispositionStatus.DEFERRED, ServiceBusErrorSource.DEFER, DeliveryStateType.Modified), Arguments.of(DispositionStatus.COMPLETED, ServiceBusErrorSource.COMPLETE, DeliveryStateType.Accepted), Arguments.of(DispositionStatus.SUSPENDED, ServiceBusErrorSource.DEAD_LETTER, DeliveryStateType.Rejected), Arguments.of(DispositionStatus.ABANDONED, ServiceBusErrorSource.ABANDONED, DeliveryStateType.Modified)); } }
class ServiceBusReceiverAsyncClientTest { private static final ClientOptions CLIENT_OPTIONS = new ClientOptions(); private static final String PAYLOAD = "hello"; private static final byte[] PAYLOAD_BYTES = PAYLOAD.getBytes(UTF_8); private static final int PREFETCH = 5; private static final String NAMESPACE = "my-namespace-foo.net"; private static final String ENTITY_PATH = "queue-name"; private static final MessagingEntityType ENTITY_TYPE = MessagingEntityType.QUEUE; private static final String NAMESPACE_CONNECTION_STRING = String.format( "Endpoint=sb: NAMESPACE, "some-name", "something-else"); private static final Duration CLEANUP_INTERVAL = Duration.ofSeconds(10); private static final String SESSION_ID = "my-session-id"; private final ClientLogger logger = new ClientLogger(ServiceBusReceiverAsyncClientTest.class); private final String messageTrackingUUID = UUID.randomUUID().toString(); private final ReplayProcessor<AmqpEndpointState> endpointProcessor = ReplayProcessor.cacheLast(); private final FluxSink<AmqpEndpointState> endpointSink = endpointProcessor.sink(FluxSink.OverflowStrategy.BUFFER); private final DirectProcessor<Message> messageProcessor = DirectProcessor.create(); private final FluxSink<Message> messageSink = messageProcessor.sink(FluxSink.OverflowStrategy.BUFFER); private ServiceBusConnectionProcessor connectionProcessor; private ServiceBusReceiverAsyncClient receiver; private ServiceBusReceiverAsyncClient sessionReceiver; @Mock private ServiceBusReactorReceiver amqpReceiveLink; @Mock private ServiceBusReactorReceiver sessionReceiveLink; @Mock private ServiceBusAmqpConnection connection; @Mock private TokenCredential tokenCredential; @Mock private MessageSerializer messageSerializer; @Mock private TracerProvider tracerProvider; @Mock private ServiceBusManagementNode managementNode; @Mock private ServiceBusReceivedMessage receivedMessage; @Mock private ServiceBusReceivedMessage receivedMessage2; @Mock private Runnable onClientClose; @BeforeAll static void beforeAll() { StepVerifier.setDefaultTimeout(Duration.ofSeconds(100)); } @AfterAll static void afterAll() { StepVerifier.resetDefaultTimeout(); } @BeforeEach void setup(TestInfo testInfo) { logger.info("[{}] Setting up.", testInfo.getDisplayName()); MockitoAnnotations.initMocks(this); when(amqpReceiveLink.receive()).thenReturn(messageProcessor.publishOn(Schedulers.single())); when(amqpReceiveLink.getEndpointStates()).thenReturn(endpointProcessor); when(sessionReceiveLink.receive()).thenReturn(messageProcessor.publishOn(Schedulers.single())); when(sessionReceiveLink.getEndpointStates()).thenReturn(endpointProcessor); ConnectionOptions connectionOptions = new ConnectionOptions(NAMESPACE, tokenCredential, CbsAuthorizationType.SHARED_ACCESS_SIGNATURE, AmqpTransportType.AMQP, new AmqpRetryOptions(), ProxyOptions.SYSTEM_DEFAULTS, Schedulers.boundedElastic(), CLIENT_OPTIONS, SslDomain.VerifyMode.VERIFY_PEER_NAME); when(connection.getEndpointStates()).thenReturn(endpointProcessor); endpointSink.next(AmqpEndpointState.ACTIVE); when(connection.getManagementNode(ENTITY_PATH, ENTITY_TYPE)) .thenReturn(Mono.just(managementNode)); when(connection.createReceiveLink(anyString(), anyString(), any(ReceiveMode.class), any(), any(MessagingEntityType.class))).thenReturn(Mono.just(amqpReceiveLink)); when(connection.createReceiveLink(anyString(), anyString(), any(ReceiveMode.class), any(), any(MessagingEntityType.class), anyString())).thenReturn(Mono.just(sessionReceiveLink)); connectionProcessor = Flux.<ServiceBusAmqpConnection>create(sink -> sink.next(connection)) .subscribeWith(new ServiceBusConnectionProcessor(connectionOptions.getFullyQualifiedNamespace(), connectionOptions.getRetry())); receiver = new ServiceBusReceiverAsyncClient(NAMESPACE, ENTITY_PATH, MessagingEntityType.QUEUE, new ReceiverOptions(ReceiveMode.PEEK_LOCK, PREFETCH, null, false), connectionProcessor, CLEANUP_INTERVAL, tracerProvider, messageSerializer, onClientClose); sessionReceiver = new ServiceBusReceiverAsyncClient(NAMESPACE, ENTITY_PATH, MessagingEntityType.QUEUE, new ReceiverOptions(ReceiveMode.PEEK_LOCK, PREFETCH, null, false, "Some-Session", null), connectionProcessor, CLEANUP_INTERVAL, tracerProvider, messageSerializer, onClientClose); } @AfterEach void teardown(TestInfo testInfo) { logger.info("[{}] Tearing down.", testInfo.getDisplayName()); receiver.close(); Mockito.framework().clearInlineMocks(); } /** * Verifies that when user calls peek more than one time, It returns different object. */ @SuppressWarnings("unchecked") @Test void peekTwoMessages() { final long sequence1 = 10; final long sequence2 = 12; final ArgumentCaptor<Long> captor = ArgumentCaptor.forClass(Long.class); when(receivedMessage.getSequenceNumber()).thenReturn(sequence1); when(receivedMessage2.getSequenceNumber()).thenReturn(sequence2); when(managementNode.peek(anyLong(), isNull(), isNull())) .thenReturn(Mono.just(receivedMessage), Mono.just(receivedMessage2)); StepVerifier.create(receiver.peekMessage()) .expectNext(receivedMessage) .verifyComplete(); StepVerifier.create(receiver.peekMessage()) .expectNext(receivedMessage2) .verifyComplete(); verify(managementNode, times(2)).peek(captor.capture(), isNull(), isNull()); final List<Long> allValues = captor.getAllValues(); Assertions.assertEquals(2, allValues.size()); Assertions.assertTrue(allValues.contains(0L)); Assertions.assertTrue(allValues.contains(11L)); } /** * Verifies that when no messages are returned, that it does not error. */ @Test void peekEmptyEntity() { when(managementNode.peek(0, null, null)) .thenReturn(Mono.empty()); StepVerifier.create(receiver.peekMessage()) .verifyComplete(); } /** * Verifies that this peek one messages from a sequence Number. */ @Test void peekWithSequenceOneMessage() { final int fromSequenceNumber = 10; final ServiceBusReceivedMessage receivedMessage = mock(ServiceBusReceivedMessage.class); when(managementNode.peek(fromSequenceNumber, null, null)).thenReturn(Mono.just(receivedMessage)); StepVerifier.create(receiver.peekMessageAt(fromSequenceNumber)) .expectNext(receivedMessage) .verifyComplete(); } /** * Verifies that this receives a number of messages. Verifies that the initial credits we add are equal to the * prefetch value. */ @Test void receivesNumberOfEvents() { final int numberOfEvents = 1; final List<Message> messages = getMessages(); final String lockToken = UUID.randomUUID().toString(); ServiceBusReceivedMessage receivedMessage = mock(ServiceBusReceivedMessage.class); when(receivedMessage.getLockedUntil()).thenReturn(OffsetDateTime.now()); when(receivedMessage.getLockToken()).thenReturn(lockToken); when(messageSerializer.deserialize(any(Message.class), eq(ServiceBusReceivedMessage.class))) .thenReturn(receivedMessage); StepVerifier.create(receiver.receiveMessages().take(numberOfEvents)) .then(() -> messages.forEach(m -> messageSink.next(m))) .expectNextCount(numberOfEvents) .verifyComplete(); verify(amqpReceiveLink).addCredits(PREFETCH); verify(amqpReceiveLink, never()).updateDisposition(eq(lockToken), any()); } /** * Verifies that we error if we try to settle a message with null transaction-id. */ @ParameterizedTest @EnumSource(DispositionStatus.class) void settleWithNullTransactionId(DispositionStatus dispositionStatus) { ServiceBusTransactionContext nullTransactionId = new ServiceBusTransactionContext(null); when(connection.getManagementNode(ENTITY_PATH, ENTITY_TYPE)).thenReturn(Mono.just(managementNode)); when(managementNode.updateDisposition(any(), eq(dispositionStatus), isNull(), isNull(), isNull(), isNull(), isNull(), isNull())) .thenReturn(Mono.delay(Duration.ofMillis(250)).then()); when(receivedMessage.getLockToken()).thenReturn("mylockToken"); final Mono<Void> operation; switch (dispositionStatus) { case DEFERRED: operation = receiver.defer(receivedMessage, new DeferOptions().setTransactionContext(nullTransactionId)); break; case ABANDONED: operation = receiver.abandon(receivedMessage, new AbandonOptions().setTransactionContext(nullTransactionId)); break; case COMPLETED: operation = receiver.complete(receivedMessage, new CompleteOptions().setTransactionContext(nullTransactionId)); break; case SUSPENDED: operation = receiver.deadLetter(receivedMessage, new DeadLetterOptions().setTransactionContext(nullTransactionId)); break; default: throw new IllegalArgumentException("Unrecognized operation: " + dispositionStatus); } StepVerifier.create(operation) .expectError(NullPointerException.class) .verify(); verify(managementNode, never()).updateDisposition(any(), eq(dispositionStatus), isNull(), isNull(), isNull(), isNull(), isNull(), isNull()); } /** * Verifies that we error if we try to complete a null message. */ @Test void completeNullMessage() { StepVerifier.create(receiver.complete(null)).expectError(NullPointerException.class).verify(); } /** * Verifies that we error if we complete in RECEIVE_AND_DELETE mode. */ @Test void completeInReceiveAndDeleteMode() { final ReceiverOptions options = new ReceiverOptions(ReceiveMode.RECEIVE_AND_DELETE, PREFETCH, null, false); ServiceBusReceiverAsyncClient client = new ServiceBusReceiverAsyncClient(NAMESPACE, ENTITY_PATH, MessagingEntityType.QUEUE, options, connectionProcessor, CLEANUP_INTERVAL, tracerProvider, messageSerializer, onClientClose); final String lockToken1 = UUID.randomUUID().toString(); when(receivedMessage.getLockToken()).thenReturn(lockToken1); try { StepVerifier.create(client.complete(receivedMessage)) .expectError(UnsupportedOperationException.class) .verify(); } finally { client.close(); } } /** * Verifies that this peek batch of messages. */ @Test void peekMessages() { final int numberOfEvents = 2; when(managementNode.peek(0, null, null, numberOfEvents)) .thenReturn(Flux.fromArray(new ServiceBusReceivedMessage[]{receivedMessage, receivedMessage2})); StepVerifier.create(receiver.peekMessages(numberOfEvents)) .expectNextCount(numberOfEvents) .verifyComplete(); } /** * Verifies that this peek batch of messages. */ @Test void peekMessagesEmptyEntity() { final int numberOfEvents = 2; when(managementNode.peek(0, null, null, numberOfEvents)) .thenReturn(Flux.fromIterable(Collections.emptyList())); StepVerifier.create(receiver.peekMessages(numberOfEvents)) .verifyComplete(); } /** * Verifies that this peek batch of messages from a sequence Number. */ @Test void peekBatchWithSequenceNumberMessages() { final int numberOfEvents = 2; final int fromSequenceNumber = 10; when(managementNode.peek(fromSequenceNumber, null, null, numberOfEvents)) .thenReturn(Flux.fromArray(new ServiceBusReceivedMessage[]{receivedMessage, receivedMessage2})); StepVerifier.create(receiver.peekMessagesAt(numberOfEvents, fromSequenceNumber)) .expectNext(receivedMessage, receivedMessage2) .verifyComplete(); } /** * Verifies that we can deadletter a message with an error and description. */ @Test void deadLetterWithDescription() { final String lockToken1 = UUID.randomUUID().toString(); final String description = "some-dead-letter-description"; final String reason = "dead-letter-reason"; final Map<String, Object> propertiesToModify = new HashMap<>(); propertiesToModify.put("something", true); final DeadLetterOptions deadLetterOptions = new DeadLetterOptions().setDeadLetterReason(reason) .setDeadLetterErrorDescription(description) .setPropertiesToModify(propertiesToModify); final OffsetDateTime expiration = OffsetDateTime.now().plus(Duration.ofMinutes(5)); final MessageWithLockToken message = mock(MessageWithLockToken.class); when(messageSerializer.deserialize(message, ServiceBusReceivedMessage.class)).thenReturn(receivedMessage); when(receivedMessage.getLockToken()).thenReturn(lockToken1); when(receivedMessage.getLockedUntil()).thenReturn(expiration); when(amqpReceiveLink.updateDisposition(eq(lockToken1), argThat(e -> e.getType() == DeliveryStateType.Rejected))).thenReturn(Mono.empty()); StepVerifier.create(receiver.receiveMessages() .take(1) .flatMap(context -> receiver.deadLetter(context.getMessage(), deadLetterOptions))) .then(() -> messageSink.next(message)) .expectNext() .verifyComplete(); verify(amqpReceiveLink).updateDisposition(eq(lockToken1), isA(Rejected.class)); } /** * Verifies that error source is populated when any error happened while renewing lock. */ @Test void errorSourceOnRenewMessageLock() { final Duration maxDuration = Duration.ofSeconds(8); final String lockToken = "some-token"; when(receivedMessage.getLockToken()).thenReturn(lockToken); when(managementNode.renewMessageLock(lockToken, null)) .thenReturn(Mono.error(new AmqpException(false, "some error occurred.", null))); StepVerifier.create(receiver.renewMessageLock(receivedMessage, maxDuration)) .verifyErrorSatisfies(throwable -> { Assertions.assertTrue(throwable instanceof ServiceBusAmqpException); final ServiceBusErrorSource actual = ((ServiceBusAmqpException) throwable).getErrorSource(); Assertions.assertEquals(ServiceBusErrorSource.RENEW_LOCK, actual); }); verify(managementNode, times(1)).renewMessageLock(lockToken, null); } /** * Verifies that error source is populated when any error happened while renewing lock. */ @Test void errorSourceOnSessionLock() { when(managementNode.renewSessionLock(SESSION_ID, null)).thenReturn(Mono.error(new AmqpException(false, "some error occurred.", null))); StepVerifier.create(sessionReceiver.renewSessionLock(SESSION_ID)) .verifyErrorSatisfies(throwable -> { Assertions.assertTrue(throwable instanceof ServiceBusAmqpException); final ServiceBusErrorSource actual = ((ServiceBusAmqpException) throwable).getErrorSource(); Assertions.assertEquals(ServiceBusErrorSource.RENEW_LOCK, actual); }); } /** * Verifies that error source is not populated when there is no autoComplete. Because user wanted to settle on their * own, we do not need to populate ErrorSource. */ @ParameterizedTest @MethodSource void errorSourceNoneOnSettlement(DispositionStatus dispositionStatus, DeliveryStateType expectedDeliveryState) { final UUID lockTokenUuid = UUID.randomUUID(); final String lockToken1 = lockTokenUuid.toString(); final MessageWithLockToken message = mock(MessageWithLockToken.class); when(receivedMessage.getLockToken()).thenReturn(lockToken1); when(messageSerializer.deserialize(message, ServiceBusReceivedMessage.class)).thenReturn(receivedMessage); when(amqpReceiveLink.updateDisposition(eq(lockToken1), argThat(e -> e.getType() == expectedDeliveryState))) .thenReturn(Mono.error(new AmqpException(false, "some error occurred.", null))); StepVerifier.create(receiver.receiveMessages().take(1) .flatMap(context -> { final Mono<Void> operation; switch (dispositionStatus) { case ABANDONED: operation = receiver.abandon(context.getMessage()); break; case COMPLETED: operation = receiver.complete(context.getMessage()); break; default: throw new IllegalArgumentException("Unrecognized operation: " + dispositionStatus); } return operation; }) ) .then(() -> messageSink.next(message)) .expectNext() .verifyErrorSatisfies(throwable -> { Assertions.assertFalse(throwable instanceof ServiceBusAmqpException); Assertions.assertTrue(throwable instanceof AmqpException); }); verify(amqpReceiveLink).updateDisposition(eq(lockToken1), any(DeliveryState.class)); } /** * Ensure that we throw right error source when there is any issue during autocomplete. Error source should be * {@link ServiceBusErrorSource */ @Test void errorSourceAutoCompleteMessage() { final int numberOfEvents = 2; final int messagesToReceive = 1; final List<Message> messages = getMessages(); final String lockToken = UUID.randomUUID().toString(); final ReceiverOptions receiverOptions = new ReceiverOptions(ReceiveMode.PEEK_LOCK, PREFETCH, null, true); final ServiceBusReceiverAsyncClient receiver2 = new ServiceBusReceiverAsyncClient(NAMESPACE, ENTITY_PATH, MessagingEntityType.QUEUE, receiverOptions, connectionProcessor, CLEANUP_INTERVAL, tracerProvider, messageSerializer, onClientClose); when(receivedMessage.getLockToken()).thenReturn(lockToken); when(messageSerializer.deserialize(any(Message.class), eq(ServiceBusReceivedMessage.class))) .thenReturn(receivedMessage); when(amqpReceiveLink.updateDisposition(lockToken, Accepted.getInstance())).thenReturn(Mono.error(new AmqpException(false, "some error occurred.", null))); try { StepVerifier.create(receiver2.receiveMessages().take(numberOfEvents)) .then(() -> messages.forEach(m -> messageSink.next(m))) .expectNextCount(messagesToReceive) .verifyErrorSatisfies(throwable -> { Assertions.assertTrue(throwable instanceof ServiceBusAmqpException); final ServiceBusErrorSource actual = ((ServiceBusAmqpException) throwable).getErrorSource(); Assertions.assertEquals(ServiceBusErrorSource.COMPLETE, actual); }); } finally { receiver2.close(); } verify(amqpReceiveLink, atLeast(messagesToReceive)).updateDisposition(lockToken, Accepted.getInstance()); } /** * Verifies that error source is populated when there is any error during receiving of message. */ @Test /** * Verifies that the user can complete settlement methods on received message. */ @ParameterizedTest @EnumSource(DispositionStatus.class) void settleMessageOnManagement(DispositionStatus dispositionStatus) { final String lockToken1 = UUID.randomUUID().toString(); final String lockToken2 = UUID.randomUUID().toString(); final OffsetDateTime expiration = OffsetDateTime.now().plus(Duration.ofMinutes(5)); final long sequenceNumber = 10L; final long sequenceNumber2 = 15L; final MessageWithLockToken message = mock(MessageWithLockToken.class); final MessageWithLockToken message2 = mock(MessageWithLockToken.class); when(messageSerializer.deserialize(message, ServiceBusReceivedMessage.class)).thenReturn(receivedMessage); when(messageSerializer.deserialize(message2, ServiceBusReceivedMessage.class)).thenReturn(receivedMessage2); when(receivedMessage.getLockToken()).thenReturn(lockToken1); when(receivedMessage.getLockedUntil()).thenReturn(expiration); when(receivedMessage2.getLockToken()).thenReturn(lockToken2); when(receivedMessage2.getLockedUntil()).thenReturn(expiration); when(connection.getManagementNode(eq(ENTITY_PATH), eq(ENTITY_TYPE))) .thenReturn(Mono.just(managementNode)); when(managementNode.receiveDeferredMessages(eq(ReceiveMode.PEEK_LOCK), isNull(), isNull(), argThat(arg -> { boolean foundFirst = false; boolean foundSecond = false; for (Long seq : arg) { if (!foundFirst && sequenceNumber == seq) { foundFirst = true; } else if (!foundSecond && sequenceNumber2 == seq) { foundSecond = true; } } return foundFirst && foundSecond; }))) .thenReturn(Flux.just(receivedMessage, receivedMessage2)); when(managementNode.updateDisposition(lockToken1, dispositionStatus, null, null, null, null, null, null)) .thenReturn(Mono.empty()); when(managementNode.updateDisposition(lockToken2, dispositionStatus, null, null, null, null, null, null)) .thenReturn(Mono.empty()); StepVerifier.create(receiver.receiveDeferredMessages(Arrays.asList(sequenceNumber, sequenceNumber2))) .expectNext(receivedMessage, receivedMessage2) .verifyComplete(); final Mono<Void> operation; switch (dispositionStatus) { case DEFERRED: operation = receiver.defer(receivedMessage); break; case ABANDONED: operation = receiver.abandon(receivedMessage); break; case COMPLETED: operation = receiver.complete(receivedMessage); break; case SUSPENDED: operation = receiver.deadLetter(receivedMessage); break; default: throw new IllegalArgumentException("Unrecognized operation: " + dispositionStatus); } StepVerifier.create(operation) .verifyComplete(); verify(managementNode).updateDisposition(lockToken1, dispositionStatus, null, null, null, null, null, null); verify(managementNode, never()).updateDisposition(lockToken2, dispositionStatus, null, null, null, null, null, null); } /** * Verifies that this receive deferred one messages from a sequence Number. */ @Test void receiveDeferredWithSequenceOneMessage() { final int fromSequenceNumber = 10; final ServiceBusReceivedMessage receivedMessage = mock(ServiceBusReceivedMessage.class); when(managementNode.receiveDeferredMessages(any(), any(), any(), any())).thenReturn(Flux.just(receivedMessage)); StepVerifier.create(receiver.receiveDeferredMessage(fromSequenceNumber)) .expectNext(receivedMessage) .verifyComplete(); } /** * Verifies that this receive deferred messages from a sequence Number. */ @Test void receiveDeferredBatchFromSequenceNumber() { final long fromSequenceNumber1 = 10; final long fromSequenceNumber2 = 11; when(managementNode.receiveDeferredMessages(any(), any(), any(), any())) .thenReturn(Flux.fromArray(new ServiceBusReceivedMessage[]{receivedMessage, receivedMessage2})); StepVerifier.create(receiver.receiveDeferredMessages(Arrays.asList(fromSequenceNumber1, fromSequenceNumber2))) .expectNext(receivedMessage) .expectNext(receivedMessage2) .verifyComplete(); } /** * Verifies that the onClientClose is called. */ @Test void callsClientClose() { receiver.close(); verify(onClientClose).run(); } /** * Verifies that the onClientClose is only called once. */ @Test void callsClientCloseOnce() { receiver.close(); receiver.close(); verify(onClientClose).run(); } /** * Tests that invalid options throws and null options. */ @Test void receiveIllegalOptions() { ServiceBusReceiverClientBuilder builder = new ServiceBusClientBuilder() .connectionString(NAMESPACE_CONNECTION_STRING) .receiver() .topicName("baz").subscriptionName("bar") .receiveMode(ReceiveMode.PEEK_LOCK); Assertions.assertThrows(IllegalArgumentException.class, () -> builder.prefetchCount(-1)); } @Test void topicCorrectEntityPath() { final String topicName = "foo"; final String subscriptionName = "bar"; final String entityPath = String.join("/", topicName, "subscriptions", subscriptionName); final ServiceBusReceiverAsyncClient receiver = new ServiceBusClientBuilder() .connectionString(NAMESPACE_CONNECTION_STRING) .receiver() .topicName(topicName) .subscriptionName(subscriptionName) .receiveMode(ReceiveMode.PEEK_LOCK) .buildAsyncClient(); final String actual = receiver.getEntityPath(); final String actualNamespace = receiver.getFullyQualifiedNamespace(); Assertions.assertEquals(entityPath, actual); Assertions.assertEquals(NAMESPACE, actualNamespace); } /** * Verifies that client can call multiple receiveMessages on same receiver instance. */ @Test void canPerformMultipleReceive() { final int numberOfEvents = 1; final List<Message> messages = getMessages(); ServiceBusReceivedMessage receivedMessage = mock(ServiceBusReceivedMessage.class); when(receivedMessage.getLockedUntil()).thenReturn(OffsetDateTime.now()); when(receivedMessage.getLockToken()).thenReturn(UUID.randomUUID().toString()); when(messageSerializer.deserialize(any(Message.class), eq(ServiceBusReceivedMessage.class))) .thenReturn(receivedMessage); StepVerifier.create(receiver.receiveMessages().take(numberOfEvents)) .then(() -> messages.forEach(m -> messageSink.next(m))) .expectNextCount(numberOfEvents) .verifyComplete(); StepVerifier.create(receiver.receiveMessages().take(numberOfEvents)) .then(() -> messages.forEach(m -> messageSink.next(m))) .expectNextCount(numberOfEvents) .verifyComplete(); verify(amqpReceiveLink).addCredits(PREFETCH); } /** * Cannot get session state for non-session receiver. */ @Test void cannotPerformGetSessionState() { final String sessionId = "a-session-id"; StepVerifier.create(receiver.getSessionState(sessionId)) .expectError(IllegalStateException.class) .verify(); } /** * Cannot get session state for non-session receiver. */ @Test void cannotPerformSetSessionState() { final String sessionId = "a-session-id"; final byte[] sessionState = new byte[]{10, 11, 8}; StepVerifier.create(receiver.setSessionState(sessionId, sessionState)) .expectError(IllegalStateException.class) .verify(); } /** * Cannot get session state for non-session receiver. */ @Test void cannotPerformRenewSessionLock() { final String sessionId = "a-session-id"; StepVerifier.create(receiver.renewSessionLock(sessionId)) .expectError(IllegalStateException.class) .verify(); } /** * Verifies that we can get a session state. */ @SuppressWarnings("unchecked") @Test void getSessionState() { final byte[] bytes = new byte[]{95, 11, 54, 10}; when(managementNode.getSessionState(SESSION_ID, null)) .thenReturn(Mono.just(bytes), Mono.empty()); StepVerifier.create(sessionReceiver.getSessionState(SESSION_ID)) .expectNext(bytes) .expectComplete() .verify(); } /** * Verifies that we can set a session state. */ @Test void setSessionState() { final byte[] bytes = new byte[]{95, 11, 54, 10}; when(managementNode.setSessionState(SESSION_ID, bytes, null)).thenReturn(Mono.empty()); StepVerifier.create(sessionReceiver.setSessionState(SESSION_ID, bytes)) .expectComplete() .verify(); } /** * Verifies that we can renew a session state. */ @Test void renewSessionLock() { final OffsetDateTime expiry = Instant.ofEpochSecond(1588011761L).atOffset(ZoneOffset.UTC); when(managementNode.renewSessionLock(SESSION_ID, null)).thenReturn(Mono.just(expiry)); StepVerifier.create(sessionReceiver.renewSessionLock(SESSION_ID)) .expectNext(expiry) .expectComplete() .verify(); } /** * Verifies that we cannot renew a message lock when using a session receiver. */ @Test void cannotRenewMessageLockInSession() { when(receivedMessage.getLockToken()).thenReturn("lock-token"); when(receivedMessage.getSessionId()).thenReturn("fo"); StepVerifier.create(sessionReceiver.renewMessageLock(receivedMessage)) .expectError(IllegalStateException.class) .verify(); } /** * Verifies that we can auto-renew a message lock. */ @Test void autoRenewMessageLock() { final Duration maxDuration = Duration.ofSeconds(8); final Duration renewalPeriod = Duration.ofSeconds(3); final String lockToken = "some-token"; final int atMost = 5; final Duration totalSleepPeriod = maxDuration.plusMillis(500); when(receivedMessage.getLockToken()).thenReturn(lockToken); when(managementNode.renewMessageLock(lockToken, null)) .thenReturn(Mono.fromCallable(() -> OffsetDateTime.now().plus(renewalPeriod))); StepVerifier.create(receiver.renewMessageLock(receivedMessage, maxDuration)) .thenAwait(totalSleepPeriod) .then(() -> logger.info("Finished renewals for first sleep.")) .expectComplete() .verify(Duration.ofSeconds(5)); verify(managementNode, Mockito.atMost(atMost)).renewMessageLock(lockToken, null); } /** * Verifies that it errors when we try a null lock token. */ @Test void autoRenewMessageLockErrorNull() { final Duration maxDuration = Duration.ofSeconds(8); final Duration renewalPeriod = Duration.ofSeconds(3); when(receivedMessage.getLockToken()).thenReturn(null); when(managementNode.renewMessageLock(anyString(), isNull())) .thenReturn(Mono.fromCallable(() -> OffsetDateTime.now().plus(renewalPeriod))); StepVerifier.create(receiver.renewMessageLock(receivedMessage, maxDuration)) .expectError(NullPointerException.class) .verify(); verify(managementNode, never()).renewMessageLock(anyString(), isNull()); } /** * Verifies that it errors when we try an empty string lock token. */ @Test void autoRenewMessageLockErrorEmptyString() { final Duration maxDuration = Duration.ofSeconds(8); final Duration renewalPeriod = Duration.ofSeconds(3); final String lockToken = ""; when(receivedMessage.getLockToken()).thenReturn(""); when(managementNode.renewMessageLock(anyString(), isNull())) .thenReturn(Mono.fromCallable(() -> OffsetDateTime.now().plus(renewalPeriod))); StepVerifier.create(receiver.renewMessageLock(receivedMessage, maxDuration)) .expectError(IllegalArgumentException.class) .verify(); verify(managementNode, never()).renewMessageLock(anyString(), isNull()); } /** * Verifies that we can auto-renew a session lock. */ @Test void autoRenewSessionLock() { final Duration maxDuration = Duration.ofSeconds(8); final Duration renewalPeriod = Duration.ofSeconds(3); final String sessionId = "some-token"; final int atMost = 5; final Duration totalSleepPeriod = maxDuration.plusMillis(500); when(managementNode.renewSessionLock(sessionId, null)) .thenReturn(Mono.fromCallable(() -> OffsetDateTime.now().plus(renewalPeriod))); StepVerifier.create(sessionReceiver.renewSessionLock(sessionId, maxDuration)) .thenAwait(totalSleepPeriod) .then(() -> logger.info("Finished renewals for first sleep.")) .expectComplete() .verify(Duration.ofSeconds(5)); verify(managementNode, Mockito.atMost(atMost)).renewSessionLock(sessionId, null); } /** * Verifies that it errors when we try a null lock token. */ @Test void autoRenewSessionLockErrorNull() { final Duration maxDuration = Duration.ofSeconds(8); final Duration renewalPeriod = Duration.ofSeconds(3); when(managementNode.renewSessionLock(anyString(), isNull())) .thenReturn(Mono.fromCallable(() -> OffsetDateTime.now().plus(renewalPeriod))); StepVerifier.create(sessionReceiver.renewSessionLock(null, maxDuration)) .expectError(NullPointerException.class) .verify(); verify(managementNode, never()).renewSessionLock(anyString(), isNull()); } /** * Verifies that it errors when we try an empty string session id */ @Test void autoRenewSessionLockErrorEmptyString() { final Duration maxDuration = Duration.ofSeconds(8); final Duration renewalPeriod = Duration.ofSeconds(3); final String sessionId = ""; when(managementNode.renewSessionLock(anyString(), isNull())) .thenReturn(Mono.fromCallable(() -> OffsetDateTime.now().plus(renewalPeriod))); StepVerifier.create(sessionReceiver.renewSessionLock(sessionId, maxDuration)) .expectError(IllegalArgumentException.class) .verify(); verify(managementNode, never()).renewSessionLock(anyString(), isNull()); } @Test void autoCompleteMessage() { final int numberOfEvents = 3; final List<Message> messages = getMessages(); final String lockToken = UUID.randomUUID().toString(); final ReceiverOptions receiverOptions = new ReceiverOptions(ReceiveMode.PEEK_LOCK, PREFETCH, null, true); final ServiceBusReceiverAsyncClient receiver2 = new ServiceBusReceiverAsyncClient(NAMESPACE, ENTITY_PATH, MessagingEntityType.QUEUE, receiverOptions, connectionProcessor, CLEANUP_INTERVAL, tracerProvider, messageSerializer, onClientClose); when(receivedMessage.getLockToken()).thenReturn(lockToken); when(messageSerializer.deserialize(any(Message.class), eq(ServiceBusReceivedMessage.class))) .thenReturn(receivedMessage); when(amqpReceiveLink.updateDisposition(lockToken, Accepted.getInstance())).thenReturn(Mono.empty()); try { StepVerifier.create(receiver2.receiveMessages().take(numberOfEvents)) .then(() -> messages.forEach(m -> messageSink.next(m))) .expectNextCount(numberOfEvents) .verifyComplete(); } finally { receiver2.close(); } verify(amqpReceiveLink, times(numberOfEvents)).updateDisposition(lockToken, Accepted.getInstance()); } @Test void autoCompleteMessageSessionReceiver() { final int numberOfEvents = 3; final List<Message> messages = getMessages(); final String lockToken = UUID.randomUUID().toString(); final ReceiverOptions receiverOptions = new ReceiverOptions(ReceiveMode.PEEK_LOCK, PREFETCH, null, true, "Some-Session", null); final ServiceBusReceiverAsyncClient sessionReceiver2 = new ServiceBusReceiverAsyncClient(NAMESPACE, ENTITY_PATH, MessagingEntityType.QUEUE, receiverOptions, connectionProcessor, CLEANUP_INTERVAL, tracerProvider, messageSerializer, onClientClose); when(receivedMessage.getLockToken()).thenReturn(lockToken); when(messageSerializer.deserialize(any(Message.class), eq(ServiceBusReceivedMessage.class))) .thenReturn(receivedMessage); when(sessionReceiveLink.updateDisposition(lockToken, Accepted.getInstance())).thenReturn(Mono.empty()); try { StepVerifier.create(sessionReceiver2.receiveMessages().take(numberOfEvents)) .then(() -> messages.forEach(m -> messageSink.next(m))) .expectNextCount(numberOfEvents) .verifyComplete(); } finally { sessionReceiver2.close(); } verify(sessionReceiveLink, times(numberOfEvents)).updateDisposition(lockToken, Accepted.getInstance()); } private List<Message> getMessages() { final Map<String, String> map = Collections.singletonMap("SAMPLE_HEADER", "foo"); return IntStream.range(0, 10) .mapToObj(index -> getMessage(PAYLOAD_BYTES, messageTrackingUUID, map)) .collect(Collectors.toList()); } private static Stream<Arguments> errorSourceNoneOnSettlement() { return Stream.of( Arguments.of(DispositionStatus.COMPLETED, DeliveryStateType.Accepted), Arguments.of(DispositionStatus.ABANDONED, DeliveryStateType.Modified)); } }
* This is going to mask an autocomplete/Renew exception. iirc, we pass it downstream. it would not be a receive error, it would be a complete error. This should be different in that we check the exception type, if it's ServiceBusErrorSource already and if so, not map it. * Would be nice to have a test for this.
public Flux<ServiceBusReceivedMessageContext> receiveMessages() { final Flux<ServiceBusReceivedMessageContext> messageFlux = sessionManager != null ? sessionManager.receive() : getOrCreateConsumer().receive().map(ServiceBusReceivedMessageContext::new); final Flux<ServiceBusReceivedMessageContext> withAutoLockRenewal; if (receiverOptions.isAutoLockRenewEnabled()) { withAutoLockRenewal = new FluxAutoLockRenew(messageFlux, receiverOptions.getMaxLockRenewDuration(), renewalContainer, this::renewMessageLock); } else { withAutoLockRenewal = messageFlux; } final Flux<ServiceBusReceivedMessageContext> withAutoComplete; if (receiverOptions.isEnableAutoComplete()) { withAutoComplete = new FluxAutoComplete(withAutoLockRenewal, completionLock, context -> context.getMessage() != null ? complete(context.getMessage()) : Mono.empty(), context -> context.getMessage() != null ? abandon(context.getMessage()) : Mono.empty()); } else { withAutoComplete = withAutoLockRenewal; } return withAutoComplete .onErrorMap(throwable -> mapError(throwable, ServiceBusErrorSource.RECEIVE)); }
.onErrorMap(throwable -> mapError(throwable, ServiceBusErrorSource.RECEIVE));
public Flux<ServiceBusReceivedMessageContext> receiveMessages() { final Flux<ServiceBusReceivedMessageContext> messageFlux = sessionManager != null ? sessionManager.receive() : getOrCreateConsumer().receive().map(ServiceBusReceivedMessageContext::new); final Flux<ServiceBusReceivedMessageContext> withAutoLockRenewal; if (receiverOptions.isAutoLockRenewEnabled()) { withAutoLockRenewal = new FluxAutoLockRenew(messageFlux, receiverOptions.getMaxLockRenewDuration(), renewalContainer, this::renewMessageLock); } else { withAutoLockRenewal = messageFlux; } final Flux<ServiceBusReceivedMessageContext> withAutoComplete; if (receiverOptions.isEnableAutoComplete()) { withAutoComplete = new FluxAutoComplete(withAutoLockRenewal, completionLock, context -> context.getMessage() != null ? complete(context.getMessage()) : Mono.empty(), context -> context.getMessage() != null ? abandon(context.getMessage()) : Mono.empty()); } else { withAutoComplete = withAutoLockRenewal; } return withAutoComplete .onErrorMap(throwable -> mapError(throwable, ServiceBusErrorSource.RECEIVE)); }
class ServiceBusReceiverAsyncClient implements AutoCloseable { private static final DeadLetterOptions DEFAULT_DEAD_LETTER_OPTIONS = new DeadLetterOptions(); private static final String TRANSACTION_LINK_NAME = "coordinator"; private final LockContainer<LockRenewalOperation> renewalContainer; private final AtomicBoolean isDisposed = new AtomicBoolean(); private final LockContainer<OffsetDateTime> managementNodeLocks; private final ClientLogger logger = new ClientLogger(ServiceBusReceiverAsyncClient.class); private final String fullyQualifiedNamespace; private final String entityPath; private final MessagingEntityType entityType; private final ReceiverOptions receiverOptions; private final ServiceBusConnectionProcessor connectionProcessor; private final TracerProvider tracerProvider; private final MessageSerializer messageSerializer; private final Runnable onClientClose; private final ServiceBusSessionManager sessionManager; private final Semaphore completionLock = new Semaphore(1); private final AtomicLong lastPeekedSequenceNumber = new AtomicLong(-1); private final AtomicReference<ServiceBusAsyncConsumer> consumer = new AtomicReference<>(); /** * Creates a receiver that listens to a Service Bus resource. * * @param fullyQualifiedNamespace The fully qualified domain name for the Service Bus resource. * @param entityPath The name of the topic or queue. * @param entityType The type of the Service Bus resource. * @param receiverOptions Options when receiving messages. * @param connectionProcessor The AMQP connection to the Service Bus resource. * @param tracerProvider Tracer for telemetry. * @param messageSerializer Serializes and deserializes Service Bus messages. * @param onClientClose Operation to run when the client completes. */ ServiceBusReceiverAsyncClient(String fullyQualifiedNamespace, String entityPath, MessagingEntityType entityType, ReceiverOptions receiverOptions, ServiceBusConnectionProcessor connectionProcessor, Duration cleanupInterval, TracerProvider tracerProvider, MessageSerializer messageSerializer, Runnable onClientClose) { this.fullyQualifiedNamespace = Objects.requireNonNull(fullyQualifiedNamespace, "'fullyQualifiedNamespace' cannot be null."); this.entityPath = Objects.requireNonNull(entityPath, "'entityPath' cannot be null."); this.entityType = Objects.requireNonNull(entityType, "'entityType' cannot be null."); this.receiverOptions = Objects.requireNonNull(receiverOptions, "'receiveOptions cannot be null.'"); this.connectionProcessor = Objects.requireNonNull(connectionProcessor, "'connectionProcessor' cannot be null."); this.tracerProvider = Objects.requireNonNull(tracerProvider, "'tracerProvider' cannot be null."); this.messageSerializer = Objects.requireNonNull(messageSerializer, "'messageSerializer' cannot be null."); this.onClientClose = Objects.requireNonNull(onClientClose, "'onClientClose' cannot be null."); this.managementNodeLocks = new LockContainer<>(cleanupInterval); this.renewalContainer = new LockContainer<>(Duration.ofMinutes(2), renewal -> { logger.info("Closing expired renewal operation. lockToken[{}]. status[{}]. throwable[{}].", renewal.getLockToken(), renewal.getStatus(), renewal.getThrowable()); renewal.close(); }); this.sessionManager = null; } ServiceBusReceiverAsyncClient(String fullyQualifiedNamespace, String entityPath, MessagingEntityType entityType, ReceiverOptions receiverOptions, ServiceBusConnectionProcessor connectionProcessor, Duration cleanupInterval, TracerProvider tracerProvider, MessageSerializer messageSerializer, Runnable onClientClose, ServiceBusSessionManager sessionManager) { this.fullyQualifiedNamespace = Objects.requireNonNull(fullyQualifiedNamespace, "'fullyQualifiedNamespace' cannot be null."); this.entityPath = Objects.requireNonNull(entityPath, "'entityPath' cannot be null."); this.entityType = Objects.requireNonNull(entityType, "'entityType' cannot be null."); this.receiverOptions = Objects.requireNonNull(receiverOptions, "'receiveOptions cannot be null.'"); this.connectionProcessor = Objects.requireNonNull(connectionProcessor, "'connectionProcessor' cannot be null."); this.tracerProvider = Objects.requireNonNull(tracerProvider, "'tracerProvider' cannot be null."); this.messageSerializer = Objects.requireNonNull(messageSerializer, "'messageSerializer' cannot be null."); this.onClientClose = Objects.requireNonNull(onClientClose, "'onClientClose' cannot be null."); this.sessionManager = Objects.requireNonNull(sessionManager, "'sessionManager' cannot be null."); this.managementNodeLocks = new LockContainer<>(cleanupInterval); this.renewalContainer = new LockContainer<>(Duration.ofMinutes(2), renewal -> { logger.info("Closing expired renewal operation. sessionId[{}]. status[{}]. throwable[{}]", renewal.getSessionId(), renewal.getStatus(), renewal.getThrowable()); renewal.close(); }); } /** * Gets the fully qualified Service Bus namespace that the connection is associated with. This is likely similar to * {@code {yournamespace}.servicebus.windows.net}. * * @return The fully qualified Service Bus namespace that the connection is associated with. */ public String getFullyQualifiedNamespace() { return fullyQualifiedNamespace; } /** * Gets the Service Bus resource this client interacts with. * * @return The Service Bus resource this client interacts with. */ public String getEntityPath() { return entityPath; } /** * Abandon a {@link ServiceBusReceivedMessage message}. This will make the message available * again for processing. Abandoning a message will increase the delivery count on the message. * * @param message The {@link ServiceBusReceivedMessage} to perform this operation. * * @return A {@link Mono} that completes when the Service Bus abandon operation completes. * @throws NullPointerException if {@code message} is null. * @throws UnsupportedOperationException if the receiver was opened in {@link ReceiveMode * mode. */ public Mono<Void> abandon(ServiceBusReceivedMessage message) { return updateDisposition(message, DispositionStatus.ABANDONED, null, null, null, null); } /** * Abandon a {@link ServiceBusReceivedMessage message} updates the message's properties. * This will make the message available again for processing. Abandoning a message will increase the delivery count * on the message. * * @param message The {@link ServiceBusReceivedMessage} to perform this operation. * @param options to abandon the message. You can specify * {@link AbandonOptions * {@code transactionContext} can be set using * {@link AbandonOptions * created first by {@link ServiceBusReceiverAsyncClient * {@link ServiceBusSenderAsyncClient * * @return A {@link Mono} that completes when the Service Bus operation finishes. * @throws NullPointerException if {@code message} or {@code options} is null. Also if * {@code transactionContext.transactionId} is null when {@code options.transactionContext} is specified. * @throws UnsupportedOperationException if the receiver was opened in {@link ReceiveMode * mode. */ public Mono<Void> abandon(ServiceBusReceivedMessage message, AbandonOptions options) { if (Objects.isNull(options)) { return monoError(logger, new NullPointerException("'settlementOptions' cannot be null.")); } else if (!Objects.isNull(options.getTransactionContext()) && Objects.isNull(options.getTransactionContext().getTransactionId())) { return monoError(logger, new NullPointerException( "'options.transactionContext.transactionId' cannot be null.")); } return updateDisposition(message, DispositionStatus.ABANDONED, null, null, options.getPropertiesToModify(), options.getTransactionContext()); } /** * Completes a {@link ServiceBusReceivedMessage message}. This will delete the message from the service. * * @param message The {@link ServiceBusReceivedMessage} to perform this operation. * * @return A {@link Mono} that finishes when the message is completed on Service Bus. * @throws NullPointerException if {@code message} is null. * @throws UnsupportedOperationException if the receiver was opened in {@link ReceiveMode * mode. */ public Mono<Void> complete(ServiceBusReceivedMessage message) { return updateDisposition(message, DispositionStatus.COMPLETED, null, null, null, null); } /** * Completes a {@link ServiceBusReceivedMessage message}. This will delete the message from the * service. * * @param message The {@link ServiceBusReceivedMessage} to perform this operation. * @param options to complete the message. The {@code transactionContext} can be set using * {@link CompleteOptions * created first by {@link ServiceBusReceiverAsyncClient * {@link ServiceBusSenderAsyncClient * * @return A {@link Mono} that finishes when the message is completed on Service Bus. * @throws NullPointerException if {@code message} or {@code options} is null. Also if * {@code transactionContext.transactionId} is null when {@code options.transactionContext} is specified. * @throws UnsupportedOperationException if the receiver was opened in {@link ReceiveMode * mode. */ public Mono<Void> complete(ServiceBusReceivedMessage message, CompleteOptions options) { if (Objects.isNull(options)) { return monoError(logger, new NullPointerException("'options' cannot be null.")); } else if (!Objects.isNull(options.getTransactionContext()) && Objects.isNull(options.getTransactionContext().getTransactionId())) { return monoError(logger, new NullPointerException( "'options.transactionContext.transactionId' cannot be null.")); } return updateDisposition(message, DispositionStatus.COMPLETED, null, null, null, options.getTransactionContext()); } /** * Defers a {@link ServiceBusReceivedMessage message}. This will move message into the deferred subqueue. * * @param message The {@link ServiceBusReceivedMessage} to perform this operation. * * @return A {@link Mono} that completes when the Service Bus defer operation finishes. * @throws NullPointerException if {@code message} is null. * @throws UnsupportedOperationException if the receiver was opened in {@link ReceiveMode * @see <a href="https: */ public Mono<Void> defer(ServiceBusReceivedMessage message) { return updateDisposition(message, DispositionStatus.DEFERRED, null, null, null, null); } /** * Defers a {@link ServiceBusReceivedMessage message} with modified message property. This will move message into * the deferred subqueue. * * @param message The {@link ServiceBusReceivedMessage} to perform this operation. * @param options to defer the message. You can specify {@link DeferOptions * to modify on the Message. The {@code transactionContext} can be set using * {@link DeferOptions * created first by {@link ServiceBusReceiverAsyncClient * {@link ServiceBusSenderAsyncClient * * @return A {@link Mono} that completes when the defer operation finishes. * @throws NullPointerException if {@code message} or {@code options} is null. Also if * {@code transactionContext.transactionId} is null when {@code options.transactionContext} is specified. * @throws UnsupportedOperationException if the receiver was opened in {@link ReceiveMode * mode. * @see <a href="https: */ public Mono<Void> defer(ServiceBusReceivedMessage message, DeferOptions options) { if (Objects.isNull(options)) { return monoError(logger, new NullPointerException("'options' cannot be null.")); } else if (!Objects.isNull(options.getTransactionContext()) && Objects.isNull(options.getTransactionContext().getTransactionId())) { return monoError(logger, new NullPointerException( "'options.transactionContext.transactionId' cannot be null.")); } return updateDisposition(message, DispositionStatus.DEFERRED, null, null, options.getPropertiesToModify(), options.getTransactionContext()); } /** * Moves a {@link ServiceBusReceivedMessage message} to the deadletter sub-queue. * * @param message The {@link ServiceBusReceivedMessage} to perform this operation. * * @return A {@link Mono} that completes when the dead letter operation finishes. * @throws NullPointerException if {@code message} is null. * @throws UnsupportedOperationException if the receiver was opened in {@link ReceiveMode * mode. * @see <a href="https: * queues</a> */ public Mono<Void> deadLetter(ServiceBusReceivedMessage message) { return deadLetter(message, DEFAULT_DEAD_LETTER_OPTIONS); } /** * Moves a {@link ServiceBusReceivedMessage message} to the deadletter sub-queue. * * @param message The {@link ServiceBusReceivedMessage} to perform this operation. * @param options to deadLetter the message. You can specify * {@link DeadLetterOptions * {@code transactionContext} can be set using * {@link DeadLetterOptions * created first by {@link ServiceBusReceiverAsyncClient * {@link ServiceBusSenderAsyncClient * @return A {@link Mono} that completes when the dead letter operation finishes. * @throws NullPointerException if {@code message} or {@code options} is null. Also if * {@code transactionContext.transactionId} is null when {@code options.transactionContext} is specified. * @throws UnsupportedOperationException if the receiver was opened in {@link ReceiveMode * mode. * @see <a href="https: * queues</a> */ public Mono<Void> deadLetter(ServiceBusReceivedMessage message, DeadLetterOptions options) { if (Objects.isNull(options)) { return monoError(logger, new NullPointerException("'options' cannot be null.")); } else if (!Objects.isNull(options.getTransactionContext()) && Objects.isNull(options.getTransactionContext().getTransactionId())) { return monoError(logger, new NullPointerException( "'options.transactionContext.transactionId' cannot be null.")); } return updateDisposition(message, DispositionStatus.SUSPENDED, options.getDeadLetterReason(), options.getDeadLetterErrorDescription(), options.getPropertiesToModify(), options.getTransactionContext()); } /** * Gets the state of a session given its identifier. * * @param sessionId Identifier of session to get. * * @return The session state or an empty Mono if there is no state set for the session. * @throws IllegalStateException if the receiver is a non-session receiver. */ public Mono<byte[]> getSessionState(String sessionId) { if (isDisposed.get()) { return monoError(logger, new IllegalStateException( String.format(INVALID_OPERATION_DISPOSED_RECEIVER, "getSessionState"))); } else if (!receiverOptions.isSessionReceiver()) { return monoError(logger, new IllegalStateException("Cannot get session state on a non-session receiver.")); } if (sessionManager != null) { return sessionManager.getSessionState(sessionId); } else { return connectionProcessor .flatMap(connection -> connection.getManagementNode(entityPath, entityType)) .flatMap(channel -> channel.getSessionState(sessionId, getLinkName(sessionId))); } } /** * Reads the next active message without changing the state of the receiver or the message source. The first call to * {@code peek()} fetches the first active message for this receiver. Each subsequent call fetches the subsequent * message in the entity. * * @return A peeked {@link ServiceBusReceivedMessage}. * @see <a href="https: */ public Mono<ServiceBusReceivedMessage> peekMessage() { return peekMessage(receiverOptions.getSessionId()); } /** * Reads the next active message without changing the state of the receiver or the message source. The first call to * {@code peek()} fetches the first active message for this receiver. Each subsequent call fetches the subsequent * message in the entity. * * @param sessionId Session id of the message to peek from. {@code null} if there is no session. * * @return A peeked {@link ServiceBusReceivedMessage}. * @throws IllegalStateException if the receiver is disposed. * @see <a href="https: */ public Mono<ServiceBusReceivedMessage> peekMessage(String sessionId) { if (isDisposed.get()) { return monoError(logger, new IllegalStateException( String.format(INVALID_OPERATION_DISPOSED_RECEIVER, "peek"))); } return connectionProcessor .flatMap(connection -> connection.getManagementNode(entityPath, entityType)) .flatMap(channel -> { final long sequence = lastPeekedSequenceNumber.get() + 1; logger.verbose("Peek message from sequence number: {}", sequence); return channel.peek(sequence, sessionId, getLinkName(sessionId)); }) .handle((message, sink) -> { final long current = lastPeekedSequenceNumber .updateAndGet(value -> Math.max(value, message.getSequenceNumber())); logger.verbose("Updating last peeked sequence number: {}", current); sink.next(message); }); } /** * Starting from the given sequence number, reads next the active message without changing the state of the receiver * or the message source. * * @param sequenceNumber The sequence number from where to read the message. * * @return A peeked {@link ServiceBusReceivedMessage}. * @see <a href="https: */ public Mono<ServiceBusReceivedMessage> peekMessageAt(long sequenceNumber) { return peekMessageAt(sequenceNumber, receiverOptions.getSessionId()); } /** * Starting from the given sequence number, reads next the active message without changing the state of the receiver * or the message source. * * @param sequenceNumber The sequence number from where to read the message. * @param sessionId Session id of the message to peek from. {@code null} if there is no session. * * @return A peeked {@link ServiceBusReceivedMessage}. * @see <a href="https: */ public Mono<ServiceBusReceivedMessage> peekMessageAt(long sequenceNumber, String sessionId) { if (isDisposed.get()) { return monoError(logger, new IllegalStateException( String.format(INVALID_OPERATION_DISPOSED_RECEIVER, "peekAt"))); } return connectionProcessor .flatMap(connection -> connection.getManagementNode(entityPath, entityType)) .flatMap(node -> node.peek(sequenceNumber, sessionId, getLinkName(sessionId))); } /** * Reads the next batch of active messages without changing the state of the receiver or the message source. * * @param maxMessages The number of messages. * * @return A {@link Flux} of {@link ServiceBusReceivedMessage messages} that are peeked. * @throws IllegalArgumentException if {@code maxMessages} is not a positive integer. * @see <a href="https: */ public Flux<ServiceBusReceivedMessage> peekMessages(int maxMessages) { return peekMessages(maxMessages, receiverOptions.getSessionId()); } /** * Reads the next batch of active messages without changing the state of the receiver or the message source. * * @param maxMessages The number of messages. * @param sessionId Session id of the messages to peek from. {@code null} if there is no session. * * @return An {@link IterableStream} of {@link ServiceBusReceivedMessage messages} that are peeked. * @throws IllegalArgumentException if {@code maxMessages} is not a positive integer. * @see <a href="https: */ public Flux<ServiceBusReceivedMessage> peekMessages(int maxMessages, String sessionId) { if (isDisposed.get()) { return fluxError(logger, new IllegalStateException( String.format(INVALID_OPERATION_DISPOSED_RECEIVER, "peekBatch"))); } return connectionProcessor .flatMap(connection -> connection.getManagementNode(entityPath, entityType)) .flatMapMany(node -> { final long nextSequenceNumber = lastPeekedSequenceNumber.get() + 1; logger.verbose("Peek batch from sequence number: {}", nextSequenceNumber); final Flux<ServiceBusReceivedMessage> messages = node.peek(nextSequenceNumber, sessionId, getLinkName(sessionId), maxMessages); final Mono<ServiceBusReceivedMessage> handle = messages .switchIfEmpty(Mono.fromCallable(() -> { ServiceBusReceivedMessage emptyMessage = new ServiceBusReceivedMessage(new byte[0]); emptyMessage.setSequenceNumber(lastPeekedSequenceNumber.get()); return emptyMessage; })) .last() .handle((last, sink) -> { final long current = lastPeekedSequenceNumber .updateAndGet(value -> Math.max(value, last.getSequenceNumber())); logger.verbose("Last peeked sequence number in batch: {}", current); sink.complete(); }); return Flux.merge(messages, handle); }); } /** * Starting from the given sequence number, reads the next batch of active messages without changing the state of * the receiver or the message source. * * @param maxMessages The number of messages. * @param sequenceNumber The sequence number from where to start reading messages. * * @return A {@link Flux} of {@link ServiceBusReceivedMessage} peeked. * @throws IllegalArgumentException if {@code maxMessages} is not a positive integer. * @see <a href="https: */ public Flux<ServiceBusReceivedMessage> peekMessagesAt(int maxMessages, long sequenceNumber) { return peekMessagesAt(maxMessages, sequenceNumber, receiverOptions.getSessionId()); } /** * Starting from the given sequence number, reads the next batch of active messages without changing the state of * the receiver or the message source. * * @param maxMessages The number of messages. * @param sequenceNumber The sequence number from where to start reading messages. * @param sessionId Session id of the messages to peek from. {@code null} if there is no session. * * @return An {@link IterableStream} of {@link ServiceBusReceivedMessage} peeked. * @throws IllegalArgumentException if {@code maxMessages} is not a positive integer. * @see <a href="https: */ public Flux<ServiceBusReceivedMessage> peekMessagesAt(int maxMessages, long sequenceNumber, String sessionId) { if (isDisposed.get()) { return fluxError(logger, new IllegalStateException( String.format(INVALID_OPERATION_DISPOSED_RECEIVER, "peekBatchAt"))); } return connectionProcessor .flatMap(connection -> connection.getManagementNode(entityPath, entityType)) .flatMapMany(node -> node.peek(sequenceNumber, sessionId, getLinkName(sessionId), maxMessages)); } /** * Receives an <b>infinite</b> stream of {@link ServiceBusReceivedMessage messages} from the Service Bus entity. * This Flux continuously receives messages from a Service Bus entity until either: * * <ul> * <li>The receiver is closed.</li> * <li>The subscription to the Flux is disposed.</li> * <li>A terminal signal from a downstream subscriber is propagated upstream (ie. {@link Flux * {@link Flux * <li>An {@link AmqpException} occurs that causes the receive link to stop.</li> * </ul> * * @return An <b>infinite</b> stream of messages from the Service Bus entity. */ /** * Receives a deferred {@link ServiceBusReceivedMessage message}. Deferred messages can only be received by using * sequence number. * * @param sequenceNumber The {@link ServiceBusReceivedMessage * message. * * @return A deferred message with the matching {@code sequenceNumber}. */ public Mono<ServiceBusReceivedMessage> receiveDeferredMessage(long sequenceNumber) { return receiveDeferredMessage(sequenceNumber, receiverOptions.getSessionId()); } /** * Receives a deferred {@link ServiceBusReceivedMessage message}. Deferred messages can only be received by using * sequence number. * * @param sequenceNumber The {@link ServiceBusReceivedMessage * message. * @param sessionId Session id of the deferred message. {@code null} if there is no session. * * @return A deferred message with the matching {@code sequenceNumber}. */ public Mono<ServiceBusReceivedMessage> receiveDeferredMessage(long sequenceNumber, String sessionId) { return connectionProcessor .flatMap(connection -> connection.getManagementNode(entityPath, entityType)) .flatMap(node -> node.receiveDeferredMessages(receiverOptions.getReceiveMode(), sessionId, getLinkName(sessionId), Collections.singleton(sequenceNumber)).last()) .map(receivedMessage -> { if (CoreUtils.isNullOrEmpty(receivedMessage.getLockToken())) { return receivedMessage; } if (receiverOptions.getReceiveMode() == ReceiveMode.PEEK_LOCK) { receivedMessage.setLockedUntil(managementNodeLocks.addOrUpdate(receivedMessage.getLockToken(), receivedMessage.getLockedUntil(), receivedMessage.getLockedUntil())); } return receivedMessage; }); } /** * Receives a batch of deferred {@link ServiceBusReceivedMessage messages}. Deferred messages can only be received * by using sequence number. * * @param sequenceNumbers The sequence numbers of the deferred messages. * * @return A {@link Flux} of deferred {@link ServiceBusReceivedMessage messages}. */ public Flux<ServiceBusReceivedMessage> receiveDeferredMessages(Iterable<Long> sequenceNumbers) { return receiveDeferredMessages(sequenceNumbers, receiverOptions.getSessionId()); } /** * Receives a batch of deferred {@link ServiceBusReceivedMessage messages}. Deferred messages can only be received * by using sequence number. * * @param sequenceNumbers The sequence numbers of the deferred messages. * @param sessionId Session id of the deferred messages. {@code null} if there is no session. * * @return An {@link IterableStream} of deferred {@link ServiceBusReceivedMessage messages}. */ public Flux<ServiceBusReceivedMessage> receiveDeferredMessages(Iterable<Long> sequenceNumbers, String sessionId) { if (isDisposed.get()) { return fluxError(logger, new IllegalStateException( String.format(INVALID_OPERATION_DISPOSED_RECEIVER, "receiveDeferredMessageBatch"))); } return connectionProcessor .flatMap(connection -> connection.getManagementNode(entityPath, entityType)) .flatMapMany(node -> node.receiveDeferredMessages(receiverOptions.getReceiveMode(), sessionId, getLinkName(sessionId), sequenceNumbers)) .map(receivedMessage -> { if (CoreUtils.isNullOrEmpty(receivedMessage.getLockToken())) { return receivedMessage; } if (receiverOptions.getReceiveMode() == ReceiveMode.PEEK_LOCK) { receivedMessage.setLockedUntil(managementNodeLocks.addOrUpdate(receivedMessage.getLockToken(), receivedMessage.getLockedUntil(), receivedMessage.getLockedUntil())); } return receivedMessage; }); } /** * Asynchronously renews the lock on the message. The lock will be renewed based on the setting specified on the * entity. When a message is received in {@link ReceiveMode * this receiver instance for a duration as specified during the entity creation (LockDuration). If processing of * the message requires longer than this duration, the lock needs to be renewed. For each renewal, the lock is reset * to the entity's LockDuration value. * * @param message The {@link ServiceBusReceivedMessage} to perform auto-lock renewal. * * @return The new expiration time for the message. * @throws NullPointerException if {@code message} or {@code message.getLockToken()} is null. * @throws UnsupportedOperationException if the receiver was opened in {@link ReceiveMode * mode. * @throws IllegalStateException if the receiver is a session receiver. * @throws IllegalArgumentException if {@code message.getLockToken()} is an empty value. */ public Mono<OffsetDateTime> renewMessageLock(ServiceBusReceivedMessage message) { if (isDisposed.get()) { return monoError(logger, new IllegalStateException( String.format(INVALID_OPERATION_DISPOSED_RECEIVER, "renewMessageLock"))); } else if (Objects.isNull(message)) { return monoError(logger, new NullPointerException("'message' cannot be null.")); } else if (Objects.isNull(message.getLockToken())) { return monoError(logger, new NullPointerException("'message.getLockToken()' cannot be null.")); } else if (message.getLockToken().isEmpty()) { return monoError(logger, new IllegalArgumentException("'message.getLockToken()' cannot be empty.")); } else if (receiverOptions.isSessionReceiver()) { return monoError(logger, new IllegalStateException( String.format("Cannot renew message lock [%s] for a session receiver.", message.getLockToken()))); } return renewMessageLock(message.getLockToken()) .onErrorMap(throwable -> mapError(throwable, ServiceBusErrorSource.RENEW_LOCK)); } /** * Asynchronously renews the lock on the message. The lock will be renewed based on the setting specified on the * entity. * * @param lockToken to be renewed. * * @return The new expiration time for the message. */ Mono<OffsetDateTime> renewMessageLock(String lockToken) { return connectionProcessor .flatMap(connection -> connection.getManagementNode(entityPath, entityType)) .flatMap(serviceBusManagementNode -> serviceBusManagementNode.renewMessageLock(lockToken, getLinkName(null))) .map(offsetDateTime -> managementNodeLocks.addOrUpdate(lockToken, offsetDateTime, offsetDateTime)) .onErrorMap(throwable -> mapError(throwable, ServiceBusErrorSource.RENEW_LOCK)); } /** * Starts the auto lock renewal for a {@link ServiceBusReceivedMessage message}. * * @param message The {@link ServiceBusReceivedMessage} to perform this operation. * @param maxLockRenewalDuration Maximum duration to keep renewing the lock token. * * @return A lock renewal operation for the message. * @throws NullPointerException if {@code message}, {@code message.getLockToken()} or {@code * maxLockRenewalDuration} is null. * @throws IllegalStateException if the receiver is a session receiver or the receiver is disposed. * @throws IllegalArgumentException if {@code message.getLockToken()} is an empty value. */ public Mono<Void> renewMessageLock(ServiceBusReceivedMessage message, Duration maxLockRenewalDuration) { if (isDisposed.get()) { return monoError(logger, new IllegalStateException( String.format(INVALID_OPERATION_DISPOSED_RECEIVER, "getAutoRenewMessageLock"))); } else if (Objects.isNull(message)) { return monoError(logger, new NullPointerException("'message' cannot be null.")); } else if (Objects.isNull(message.getLockToken())) { return monoError(logger, new NullPointerException("'message.getLockToken()' cannot be null.")); } else if (message.getLockToken().isEmpty()) { return monoError(logger, new IllegalArgumentException("'message.getLockToken()' cannot be empty.")); } else if (receiverOptions.isSessionReceiver()) { return monoError(logger, new IllegalStateException( String.format("Cannot renew message lock [%s] for a session receiver.", message.getLockToken()))); } else if (maxLockRenewalDuration == null) { return monoError(logger, new NullPointerException("'maxLockRenewalDuration' cannot be null.")); } else if (maxLockRenewalDuration.isNegative()) { return monoError(logger, new IllegalArgumentException("'maxLockRenewalDuration' cannot be negative.")); } final LockRenewalOperation operation = new LockRenewalOperation(message.getLockToken(), maxLockRenewalDuration, false, ignored -> renewMessageLock(message)); renewalContainer.addOrUpdate(message.getLockToken(), OffsetDateTime.now().plus(maxLockRenewalDuration), operation); return operation.getCompletionOperation() .onErrorMap(throwable -> mapError(throwable, ServiceBusErrorSource.RENEW_LOCK)); } /** * Renews the session lock. * * @param sessionId Identifier of session to get. * * @return The next expiration time for the session lock. * @throws IllegalStateException if the receiver is a non-session receiver. */ public Mono<OffsetDateTime> renewSessionLock(String sessionId) { if (isDisposed.get()) { return monoError(logger, new IllegalStateException( String.format(INVALID_OPERATION_DISPOSED_RECEIVER, "renewSessionLock"))); } else if (!receiverOptions.isSessionReceiver()) { return monoError(logger, new IllegalStateException("Cannot renew session lock on a non-session receiver.")); } final String linkName = sessionManager != null ? sessionManager.getLinkName(sessionId) : null; return connectionProcessor .flatMap(connection -> connection.getManagementNode(entityPath, entityType)) .flatMap(channel -> channel.renewSessionLock(sessionId, linkName)) .onErrorMap(throwable -> mapError(throwable, ServiceBusErrorSource.RENEW_LOCK)); } /** * Starts the auto lock renewal for a session id. * * @param sessionId Id for the session to renew. * @param maxLockRenewalDuration Maximum duration to keep renewing the session lock. * * @return A lock renewal operation for the message. * @throws NullPointerException if {@code sessionId} or {@code maxLockRenewalDuration} is null. * @throws IllegalArgumentException if {@code sessionId} is an empty string. * @throws IllegalStateException if the receiver is a non-session receiver or the receiver is disposed. */ public Mono<Void> renewSessionLock(String sessionId, Duration maxLockRenewalDuration) { if (isDisposed.get()) { return monoError(logger, new IllegalStateException( String.format(INVALID_OPERATION_DISPOSED_RECEIVER, "getAutoRenewSessionLock"))); } else if (!receiverOptions.isSessionReceiver()) { return monoError(logger, new IllegalStateException( "Cannot renew session lock on a non-session receiver.")); } else if (maxLockRenewalDuration == null) { return monoError(logger, new NullPointerException("'maxLockRenewalDuration' cannot be null.")); } else if (maxLockRenewalDuration.isNegative()) { return monoError(logger, new IllegalArgumentException( "'maxLockRenewalDuration' cannot be negative.")); } else if (Objects.isNull(sessionId)) { return monoError(logger, new NullPointerException("'sessionId' cannot be null.")); } else if (sessionId.isEmpty()) { return monoError(logger, new IllegalArgumentException("'sessionId' cannot be empty.")); } final LockRenewalOperation operation = new LockRenewalOperation(sessionId, maxLockRenewalDuration, true, this::renewSessionLock); renewalContainer.addOrUpdate(sessionId, OffsetDateTime.now().plus(maxLockRenewalDuration), operation); return operation .getCompletionOperation() .onErrorMap(throwable -> mapError(throwable, ServiceBusErrorSource.RENEW_LOCK)); } /** * Sets the state of a session given its identifier. * * @param sessionId Identifier of session to get. * @param sessionState State to set on the session. * * @return A Mono that completes when the session is set * @throws IllegalStateException if the receiver is a non-session receiver. */ public Mono<Void> setSessionState(String sessionId, byte[] sessionState) { if (isDisposed.get()) { return monoError(logger, new IllegalStateException( String.format(INVALID_OPERATION_DISPOSED_RECEIVER, "setSessionState"))); } else if (!receiverOptions.isSessionReceiver()) { return monoError(logger, new IllegalStateException("Cannot set session state on a non-session receiver.")); } final String linkName = sessionManager != null ? sessionManager.getLinkName(sessionId) : null; return connectionProcessor .flatMap(connection -> connection.getManagementNode(entityPath, entityType)) .flatMap(channel -> channel.setSessionState(sessionId, sessionState, linkName)); } /** * Starts a new service side transaction. The {@link ServiceBusTransactionContext} should be passed to all * operations that needs to be in this transaction. * * <p><strong>Create a transaction</strong></p> * {@codesnippet com.azure.messaging.servicebus.servicebusasyncreceiverclient.createTransaction} * * @return The {@link Mono} that finishes this operation on service bus resource. */ public Mono<ServiceBusTransactionContext> createTransaction() { if (isDisposed.get()) { return monoError(logger, new IllegalStateException( String.format(INVALID_OPERATION_DISPOSED_RECEIVER, "createTransaction"))); } return connectionProcessor .flatMap(connection -> connection.createSession(TRANSACTION_LINK_NAME)) .flatMap(transactionSession -> transactionSession.createTransaction()) .map(transaction -> new ServiceBusTransactionContext(transaction.getTransactionId())); } /** * Commits the transaction given {@link ServiceBusTransactionContext}. This will make a call to Service Bus. * <p><strong>Commit a transaction</strong></p> * {@codesnippet com.azure.messaging.servicebus.servicebusasyncreceiverclient.commitTransaction} * * @param transactionContext to be committed. * * @return The {@link Mono} that finishes this operation on service bus resource. * @throws NullPointerException if {@code transactionContext} or {@code transactionContext.transactionId} is * null. */ public Mono<Void> commitTransaction(ServiceBusTransactionContext transactionContext) { if (isDisposed.get()) { return monoError(logger, new IllegalStateException( String.format(INVALID_OPERATION_DISPOSED_RECEIVER, "commitTransaction"))); } if (Objects.isNull(transactionContext)) { return monoError(logger, new NullPointerException("'transactionContext' cannot be null.")); } else if (Objects.isNull(transactionContext.getTransactionId())) { return monoError(logger, new NullPointerException("'transactionContext.transactionId' cannot be null.")); } return connectionProcessor .flatMap(connection -> connection.createSession(TRANSACTION_LINK_NAME)) .flatMap(transactionSession -> transactionSession.commitTransaction(new AmqpTransaction( transactionContext.getTransactionId()))); } /** * Rollbacks the transaction given {@link ServiceBusTransactionContext}. This will make a call to Service Bus. * <p><strong>Rollback a transaction</strong></p> * {@codesnippet com.azure.messaging.servicebus.servicebusasyncreceiverclient.rollbackTransaction} * * @param transactionContext to be rollbacked. * * @return The {@link Mono} that finishes this operation on service bus resource. * @throws NullPointerException if {@code transactionContext} or {@code transactionContext.transactionId} is * null. */ public Mono<Void> rollbackTransaction(ServiceBusTransactionContext transactionContext) { if (isDisposed.get()) { return monoError(logger, new IllegalStateException( String.format(INVALID_OPERATION_DISPOSED_RECEIVER, "rollbackTransaction"))); } if (Objects.isNull(transactionContext)) { return monoError(logger, new NullPointerException("'transactionContext' cannot be null.")); } else if (Objects.isNull(transactionContext.getTransactionId())) { return monoError(logger, new NullPointerException("'transactionContext.transactionId' cannot be null.")); } return connectionProcessor .flatMap(connection -> connection.createSession(TRANSACTION_LINK_NAME)) .flatMap(transactionSession -> transactionSession.rollbackTransaction(new AmqpTransaction( transactionContext.getTransactionId()))); } /** * Disposes of the consumer by closing the underlying connection to the service. */ @Override public void close() { if (isDisposed.getAndSet(true)) { return; } try { completionLock.acquire(); } catch (InterruptedException e) { logger.info("Unable to obtain completion lock.", e); } logger.info("Removing receiver links."); final ServiceBusAsyncConsumer disposed = consumer.getAndSet(null); if (disposed != null) { disposed.close(); } if (sessionManager != null) { sessionManager.close(); } onClientClose.run(); } /** * @return receiver options set by user; */ ReceiverOptions getReceiverOptions() { return receiverOptions; } /** * Gets whether or not the management node contains the message lock token and it has not expired. Lock tokens are * held by the management node when they are received from the management node or management operations are * performed using that {@code lockToken}. * * @param lockToken Lock token to check for. * * @return {@code true} if the management node contains the lock token and false otherwise. */ private boolean isManagementToken(String lockToken) { return managementNodeLocks.containsUnexpired(lockToken); } private Mono<Void> updateDisposition(ServiceBusReceivedMessage message, DispositionStatus dispositionStatus, String deadLetterReason, String deadLetterErrorDescription, Map<String, Object> propertiesToModify, ServiceBusTransactionContext transactionContext) { if (isDisposed.get()) { return monoError(logger, new IllegalStateException( String.format(INVALID_OPERATION_DISPOSED_RECEIVER, dispositionStatus.getValue()))); } else if (Objects.isNull(message)) { return monoError(logger, new NullPointerException("'message' cannot be null.")); } final String lockToken = message.getLockToken(); final String sessionId = message.getSessionId(); if (receiverOptions.getReceiveMode() != ReceiveMode.PEEK_LOCK) { return Mono.error(logger.logExceptionAsError(new UnsupportedOperationException(String.format( "'%s' is not supported on a receiver opened in ReceiveMode.RECEIVE_AND_DELETE.", dispositionStatus)))); } final String sessionIdToUse; if (sessionId == null && !CoreUtils.isNullOrEmpty(receiverOptions.getSessionId())) { sessionIdToUse = receiverOptions.getSessionId(); } else { sessionIdToUse = sessionId; } logger.info("{}: Update started. Disposition: {}. Lock: {}. SessionId: {}.", entityPath, dispositionStatus, lockToken, sessionIdToUse); final Mono<Void> performOnManagement = connectionProcessor .flatMap(connection -> connection.getManagementNode(entityPath, entityType)) .flatMap(node -> node.updateDisposition(lockToken, dispositionStatus, deadLetterReason, deadLetterErrorDescription, propertiesToModify, sessionId, getLinkName(sessionId), transactionContext)) .then(Mono.fromRunnable(() -> { logger.info("{}: Management node Update completed. Disposition: {}. Lock: {}.", entityPath, dispositionStatus, lockToken); managementNodeLocks.remove(lockToken); renewalContainer.remove(lockToken); })); Mono<Void> updateDispositionOperation; if (sessionManager != null) { updateDispositionOperation = sessionManager.updateDisposition(lockToken, sessionId, dispositionStatus, propertiesToModify, deadLetterReason, deadLetterErrorDescription, transactionContext) .flatMap(isSuccess -> { if (isSuccess) { renewalContainer.remove(lockToken); return Mono.empty(); } logger.info("Could not perform on session manger. Performing on management node."); return performOnManagement; }); } else { final ServiceBusAsyncConsumer existingConsumer = consumer.get(); if (isManagementToken(lockToken) || existingConsumer == null) { updateDispositionOperation = performOnManagement; } else { updateDispositionOperation = existingConsumer.updateDisposition(lockToken, dispositionStatus, deadLetterReason, deadLetterErrorDescription, propertiesToModify, transactionContext) .then(Mono.fromRunnable(() -> { logger.info("{}: Update completed. Disposition: {}. Lock: {}.", entityPath, dispositionStatus, lockToken); renewalContainer.remove(lockToken); })); } } return updateDispositionOperation .onErrorMap(throwable -> { ServiceBusErrorSource errorSource; if (throwable instanceof AmqpException) { switch (dispositionStatus) { case COMPLETED: errorSource = ServiceBusErrorSource.COMPLETE; break; case DEFERRED: errorSource = ServiceBusErrorSource.DEFER; break; case SUSPENDED: errorSource = ServiceBusErrorSource.DEAD_LETTER; break; case ABANDONED: errorSource = ServiceBusErrorSource.ABANDONED; break; default: errorSource = ServiceBusErrorSource.UNKNOWN; } return new ServiceBusAmqpException((AmqpException) throwable, errorSource); } else { return throwable; } }); } private ServiceBusAsyncConsumer getOrCreateConsumer() { final ServiceBusAsyncConsumer existing = consumer.get(); if (existing != null) { return existing; } final String linkName = StringUtil.getRandomString(entityPath); logger.info("{}: Creating consumer for link '{}'", entityPath, linkName); final Flux<ServiceBusReceiveLink> receiveLink = connectionProcessor.flatMap(connection -> { if (receiverOptions.isSessionReceiver()) { return connection.createReceiveLink(linkName, entityPath, receiverOptions.getReceiveMode(), null, entityType, receiverOptions.getSessionId()); } else { return connection.createReceiveLink(linkName, entityPath, receiverOptions.getReceiveMode(), null, entityType); } }) .doOnNext(next -> { final String format = "Created consumer for Service Bus resource: [{}] mode: [{}]" + " sessionEnabled? {} transferEntityPath: [{}], entityType: [{}]"; logger.verbose(format, next.getEntityPath(), receiverOptions.getReceiveMode(), CoreUtils.isNullOrEmpty(receiverOptions.getSessionId()), "N/A", entityType); }) .repeat(); final AmqpRetryPolicy retryPolicy = RetryUtil.getRetryPolicy(connectionProcessor.getRetryOptions()); final ServiceBusReceiveLinkProcessor linkMessageProcessor = receiveLink.subscribeWith( new ServiceBusReceiveLinkProcessor(receiverOptions.getPrefetchCount(), retryPolicy, receiverOptions.getReceiveMode())); final ServiceBusAsyncConsumer newConsumer = new ServiceBusAsyncConsumer(linkName, linkMessageProcessor, messageSerializer, receiverOptions); if (consumer.compareAndSet(null, newConsumer)) { return newConsumer; } else { newConsumer.close(); return consumer.get(); } } /** * If the receiver has not connected via {@link * through the management node. * * @return The name of the receive link, or null of it has not connected via a receive link. */ private String getLinkName(String sessionId) { if (sessionManager != null && !CoreUtils.isNullOrEmpty(sessionId)) { return sessionManager.getLinkName(sessionId); } else if (!CoreUtils.isNullOrEmpty(sessionId) && !receiverOptions.isSessionReceiver()) { return null; } else { final ServiceBusAsyncConsumer existing = consumer.get(); return existing != null ? existing.getLinkName() : null; } } /** * Map the error to {@link ServiceBusAmqpException} */ private Throwable mapError(Throwable throwable, ServiceBusErrorSource errorSource) { if (throwable instanceof AmqpException) { return new ServiceBusAmqpException((AmqpException) throwable, errorSource); } else { return throwable; } } }
class ServiceBusReceiverAsyncClient implements AutoCloseable { private static final DeadLetterOptions DEFAULT_DEAD_LETTER_OPTIONS = new DeadLetterOptions(); private static final String TRANSACTION_LINK_NAME = "coordinator"; private final LockContainer<LockRenewalOperation> renewalContainer; private final AtomicBoolean isDisposed = new AtomicBoolean(); private final LockContainer<OffsetDateTime> managementNodeLocks; private final ClientLogger logger = new ClientLogger(ServiceBusReceiverAsyncClient.class); private final String fullyQualifiedNamespace; private final String entityPath; private final MessagingEntityType entityType; private final ReceiverOptions receiverOptions; private final ServiceBusConnectionProcessor connectionProcessor; private final TracerProvider tracerProvider; private final MessageSerializer messageSerializer; private final Runnable onClientClose; private final ServiceBusSessionManager sessionManager; private final Semaphore completionLock = new Semaphore(1); private final AtomicLong lastPeekedSequenceNumber = new AtomicLong(-1); private final AtomicReference<ServiceBusAsyncConsumer> consumer = new AtomicReference<>(); /** * Creates a receiver that listens to a Service Bus resource. * * @param fullyQualifiedNamespace The fully qualified domain name for the Service Bus resource. * @param entityPath The name of the topic or queue. * @param entityType The type of the Service Bus resource. * @param receiverOptions Options when receiving messages. * @param connectionProcessor The AMQP connection to the Service Bus resource. * @param tracerProvider Tracer for telemetry. * @param messageSerializer Serializes and deserializes Service Bus messages. * @param onClientClose Operation to run when the client completes. */ ServiceBusReceiverAsyncClient(String fullyQualifiedNamespace, String entityPath, MessagingEntityType entityType, ReceiverOptions receiverOptions, ServiceBusConnectionProcessor connectionProcessor, Duration cleanupInterval, TracerProvider tracerProvider, MessageSerializer messageSerializer, Runnable onClientClose) { this.fullyQualifiedNamespace = Objects.requireNonNull(fullyQualifiedNamespace, "'fullyQualifiedNamespace' cannot be null."); this.entityPath = Objects.requireNonNull(entityPath, "'entityPath' cannot be null."); this.entityType = Objects.requireNonNull(entityType, "'entityType' cannot be null."); this.receiverOptions = Objects.requireNonNull(receiverOptions, "'receiveOptions cannot be null.'"); this.connectionProcessor = Objects.requireNonNull(connectionProcessor, "'connectionProcessor' cannot be null."); this.tracerProvider = Objects.requireNonNull(tracerProvider, "'tracerProvider' cannot be null."); this.messageSerializer = Objects.requireNonNull(messageSerializer, "'messageSerializer' cannot be null."); this.onClientClose = Objects.requireNonNull(onClientClose, "'onClientClose' cannot be null."); this.managementNodeLocks = new LockContainer<>(cleanupInterval); this.renewalContainer = new LockContainer<>(Duration.ofMinutes(2), renewal -> { logger.info("Closing expired renewal operation. lockToken[{}]. status[{}]. throwable[{}].", renewal.getLockToken(), renewal.getStatus(), renewal.getThrowable()); renewal.close(); }); this.sessionManager = null; } ServiceBusReceiverAsyncClient(String fullyQualifiedNamespace, String entityPath, MessagingEntityType entityType, ReceiverOptions receiverOptions, ServiceBusConnectionProcessor connectionProcessor, Duration cleanupInterval, TracerProvider tracerProvider, MessageSerializer messageSerializer, Runnable onClientClose, ServiceBusSessionManager sessionManager) { this.fullyQualifiedNamespace = Objects.requireNonNull(fullyQualifiedNamespace, "'fullyQualifiedNamespace' cannot be null."); this.entityPath = Objects.requireNonNull(entityPath, "'entityPath' cannot be null."); this.entityType = Objects.requireNonNull(entityType, "'entityType' cannot be null."); this.receiverOptions = Objects.requireNonNull(receiverOptions, "'receiveOptions cannot be null.'"); this.connectionProcessor = Objects.requireNonNull(connectionProcessor, "'connectionProcessor' cannot be null."); this.tracerProvider = Objects.requireNonNull(tracerProvider, "'tracerProvider' cannot be null."); this.messageSerializer = Objects.requireNonNull(messageSerializer, "'messageSerializer' cannot be null."); this.onClientClose = Objects.requireNonNull(onClientClose, "'onClientClose' cannot be null."); this.sessionManager = Objects.requireNonNull(sessionManager, "'sessionManager' cannot be null."); this.managementNodeLocks = new LockContainer<>(cleanupInterval); this.renewalContainer = new LockContainer<>(Duration.ofMinutes(2), renewal -> { logger.info("Closing expired renewal operation. sessionId[{}]. status[{}]. throwable[{}]", renewal.getSessionId(), renewal.getStatus(), renewal.getThrowable()); renewal.close(); }); } /** * Gets the fully qualified Service Bus namespace that the connection is associated with. This is likely similar to * {@code {yournamespace}.servicebus.windows.net}. * * @return The fully qualified Service Bus namespace that the connection is associated with. */ public String getFullyQualifiedNamespace() { return fullyQualifiedNamespace; } /** * Gets the Service Bus resource this client interacts with. * * @return The Service Bus resource this client interacts with. */ public String getEntityPath() { return entityPath; } /** * Abandon a {@link ServiceBusReceivedMessage message}. This will make the message available * again for processing. Abandoning a message will increase the delivery count on the message. * * @param message The {@link ServiceBusReceivedMessage} to perform this operation. * * @return A {@link Mono} that completes when the Service Bus abandon operation completes. * @throws NullPointerException if {@code message} is null. * @throws UnsupportedOperationException if the receiver was opened in {@link ReceiveMode * mode. */ public Mono<Void> abandon(ServiceBusReceivedMessage message) { return updateDisposition(message, DispositionStatus.ABANDONED, null, null, null, null); } /** * Abandon a {@link ServiceBusReceivedMessage message} updates the message's properties. * This will make the message available again for processing. Abandoning a message will increase the delivery count * on the message. * * @param message The {@link ServiceBusReceivedMessage} to perform this operation. * @param options to abandon the message. You can specify * {@link AbandonOptions * {@code transactionContext} can be set using * {@link AbandonOptions * created first by {@link ServiceBusReceiverAsyncClient * {@link ServiceBusSenderAsyncClient * * @return A {@link Mono} that completes when the Service Bus operation finishes. * @throws NullPointerException if {@code message} or {@code options} is null. Also if * {@code transactionContext.transactionId} is null when {@code options.transactionContext} is specified. * @throws UnsupportedOperationException if the receiver was opened in {@link ReceiveMode * mode. */ public Mono<Void> abandon(ServiceBusReceivedMessage message, AbandonOptions options) { if (Objects.isNull(options)) { return monoError(logger, new NullPointerException("'settlementOptions' cannot be null.")); } else if (!Objects.isNull(options.getTransactionContext()) && Objects.isNull(options.getTransactionContext().getTransactionId())) { return monoError(logger, new NullPointerException( "'options.transactionContext.transactionId' cannot be null.")); } return updateDisposition(message, DispositionStatus.ABANDONED, null, null, options.getPropertiesToModify(), options.getTransactionContext()); } /** * Completes a {@link ServiceBusReceivedMessage message}. This will delete the message from the service. * * @param message The {@link ServiceBusReceivedMessage} to perform this operation. * * @return A {@link Mono} that finishes when the message is completed on Service Bus. * @throws NullPointerException if {@code message} is null. * @throws UnsupportedOperationException if the receiver was opened in {@link ReceiveMode * mode. */ public Mono<Void> complete(ServiceBusReceivedMessage message) { return updateDisposition(message, DispositionStatus.COMPLETED, null, null, null, null); } /** * Completes a {@link ServiceBusReceivedMessage message}. This will delete the message from the * service. * * @param message The {@link ServiceBusReceivedMessage} to perform this operation. * @param options to complete the message. The {@code transactionContext} can be set using * {@link CompleteOptions * created first by {@link ServiceBusReceiverAsyncClient * {@link ServiceBusSenderAsyncClient * * @return A {@link Mono} that finishes when the message is completed on Service Bus. * @throws NullPointerException if {@code message} or {@code options} is null. Also if * {@code transactionContext.transactionId} is null when {@code options.transactionContext} is specified. * @throws UnsupportedOperationException if the receiver was opened in {@link ReceiveMode * mode. */ public Mono<Void> complete(ServiceBusReceivedMessage message, CompleteOptions options) { if (Objects.isNull(options)) { return monoError(logger, new NullPointerException("'options' cannot be null.")); } else if (!Objects.isNull(options.getTransactionContext()) && Objects.isNull(options.getTransactionContext().getTransactionId())) { return monoError(logger, new NullPointerException( "'options.transactionContext.transactionId' cannot be null.")); } return updateDisposition(message, DispositionStatus.COMPLETED, null, null, null, options.getTransactionContext()); } /** * Defers a {@link ServiceBusReceivedMessage message}. This will move message into the deferred subqueue. * * @param message The {@link ServiceBusReceivedMessage} to perform this operation. * * @return A {@link Mono} that completes when the Service Bus defer operation finishes. * @throws NullPointerException if {@code message} is null. * @throws UnsupportedOperationException if the receiver was opened in {@link ReceiveMode * @see <a href="https: */ public Mono<Void> defer(ServiceBusReceivedMessage message) { return updateDisposition(message, DispositionStatus.DEFERRED, null, null, null, null); } /** * Defers a {@link ServiceBusReceivedMessage message} with modified message property. This will move message into * the deferred subqueue. * * @param message The {@link ServiceBusReceivedMessage} to perform this operation. * @param options to defer the message. You can specify {@link DeferOptions * to modify on the Message. The {@code transactionContext} can be set using * {@link DeferOptions * created first by {@link ServiceBusReceiverAsyncClient * {@link ServiceBusSenderAsyncClient * * @return A {@link Mono} that completes when the defer operation finishes. * @throws NullPointerException if {@code message} or {@code options} is null. Also if * {@code transactionContext.transactionId} is null when {@code options.transactionContext} is specified. * @throws UnsupportedOperationException if the receiver was opened in {@link ReceiveMode * mode. * @see <a href="https: */ public Mono<Void> defer(ServiceBusReceivedMessage message, DeferOptions options) { if (Objects.isNull(options)) { return monoError(logger, new NullPointerException("'options' cannot be null.")); } else if (!Objects.isNull(options.getTransactionContext()) && Objects.isNull(options.getTransactionContext().getTransactionId())) { return monoError(logger, new NullPointerException( "'options.transactionContext.transactionId' cannot be null.")); } return updateDisposition(message, DispositionStatus.DEFERRED, null, null, options.getPropertiesToModify(), options.getTransactionContext()); } /** * Moves a {@link ServiceBusReceivedMessage message} to the deadletter sub-queue. * * @param message The {@link ServiceBusReceivedMessage} to perform this operation. * * @return A {@link Mono} that completes when the dead letter operation finishes. * @throws NullPointerException if {@code message} is null. * @throws UnsupportedOperationException if the receiver was opened in {@link ReceiveMode * mode. * @see <a href="https: * queues</a> */ public Mono<Void> deadLetter(ServiceBusReceivedMessage message) { return deadLetter(message, DEFAULT_DEAD_LETTER_OPTIONS); } /** * Moves a {@link ServiceBusReceivedMessage message} to the deadletter sub-queue. * * @param message The {@link ServiceBusReceivedMessage} to perform this operation. * @param options to deadLetter the message. You can specify * {@link DeadLetterOptions * {@code transactionContext} can be set using * {@link DeadLetterOptions * created first by {@link ServiceBusReceiverAsyncClient * {@link ServiceBusSenderAsyncClient * @return A {@link Mono} that completes when the dead letter operation finishes. * @throws NullPointerException if {@code message} or {@code options} is null. Also if * {@code transactionContext.transactionId} is null when {@code options.transactionContext} is specified. * @throws UnsupportedOperationException if the receiver was opened in {@link ReceiveMode * mode. * @see <a href="https: * queues</a> */ public Mono<Void> deadLetter(ServiceBusReceivedMessage message, DeadLetterOptions options) { if (Objects.isNull(options)) { return monoError(logger, new NullPointerException("'options' cannot be null.")); } else if (!Objects.isNull(options.getTransactionContext()) && Objects.isNull(options.getTransactionContext().getTransactionId())) { return monoError(logger, new NullPointerException( "'options.transactionContext.transactionId' cannot be null.")); } return updateDisposition(message, DispositionStatus.SUSPENDED, options.getDeadLetterReason(), options.getDeadLetterErrorDescription(), options.getPropertiesToModify(), options.getTransactionContext()); } /** * Gets the state of the session if this receiver is a session receiver. * * @return The session state or an empty Mono if there is no state set for the session. * @throws IllegalStateException if the receiver is a non-session receiver. */ public Mono<byte[]> getSessionState() { return getSessionState(receiverOptions.getSessionId()); } /** * Reads the next active message without changing the state of the receiver or the message source. The first call to * {@code peek()} fetches the first active message for this receiver. Each subsequent call fetches the subsequent * message in the entity. * * @return A peeked {@link ServiceBusReceivedMessage}. * @see <a href="https: */ public Mono<ServiceBusReceivedMessage> peekMessage() { return peekMessage(receiverOptions.getSessionId()); } /** * Reads the next active message without changing the state of the receiver or the message source. The first call to * {@code peek()} fetches the first active message for this receiver. Each subsequent call fetches the subsequent * message in the entity. * * @param sessionId Session id of the message to peek from. {@code null} if there is no session. * * @return A peeked {@link ServiceBusReceivedMessage}. * @throws IllegalStateException if the receiver is disposed. * @see <a href="https: */ Mono<ServiceBusReceivedMessage> peekMessage(String sessionId) { if (isDisposed.get()) { return monoError(logger, new IllegalStateException( String.format(INVALID_OPERATION_DISPOSED_RECEIVER, "peek"))); } return connectionProcessor .flatMap(connection -> connection.getManagementNode(entityPath, entityType)) .flatMap(channel -> { final long sequence = lastPeekedSequenceNumber.get() + 1; logger.verbose("Peek message from sequence number: {}", sequence); return channel.peek(sequence, sessionId, getLinkName(sessionId)); }) .handle((message, sink) -> { final long current = lastPeekedSequenceNumber .updateAndGet(value -> Math.max(value, message.getSequenceNumber())); logger.verbose("Updating last peeked sequence number: {}", current); sink.next(message); }); } /** * Starting from the given sequence number, reads next the active message without changing the state of the receiver * or the message source. * * @param sequenceNumber The sequence number from where to read the message. * * @return A peeked {@link ServiceBusReceivedMessage}. * @see <a href="https: */ public Mono<ServiceBusReceivedMessage> peekMessageAt(long sequenceNumber) { return peekMessageAt(sequenceNumber, receiverOptions.getSessionId()); } /** * Starting from the given sequence number, reads next the active message without changing the state of the receiver * or the message source. * * @param sequenceNumber The sequence number from where to read the message. * @param sessionId Session id of the message to peek from. {@code null} if there is no session. * * @return A peeked {@link ServiceBusReceivedMessage}. * @see <a href="https: */ Mono<ServiceBusReceivedMessage> peekMessageAt(long sequenceNumber, String sessionId) { if (isDisposed.get()) { return monoError(logger, new IllegalStateException( String.format(INVALID_OPERATION_DISPOSED_RECEIVER, "peekAt"))); } return connectionProcessor .flatMap(connection -> connection.getManagementNode(entityPath, entityType)) .flatMap(node -> node.peek(sequenceNumber, sessionId, getLinkName(sessionId))); } /** * Reads the next batch of active messages without changing the state of the receiver or the message source. * * @param maxMessages The number of messages. * * @return A {@link Flux} of {@link ServiceBusReceivedMessage messages} that are peeked. * @throws IllegalArgumentException if {@code maxMessages} is not a positive integer. * @see <a href="https: */ public Flux<ServiceBusReceivedMessage> peekMessages(int maxMessages) { return peekMessages(maxMessages, receiverOptions.getSessionId()); } /** * Reads the next batch of active messages without changing the state of the receiver or the message source. * * @param maxMessages The number of messages. * @param sessionId Session id of the messages to peek from. {@code null} if there is no session. * * @return An {@link IterableStream} of {@link ServiceBusReceivedMessage messages} that are peeked. * @throws IllegalArgumentException if {@code maxMessages} is not a positive integer. * @see <a href="https: */ Flux<ServiceBusReceivedMessage> peekMessages(int maxMessages, String sessionId) { if (isDisposed.get()) { return fluxError(logger, new IllegalStateException( String.format(INVALID_OPERATION_DISPOSED_RECEIVER, "peekBatch"))); } return connectionProcessor .flatMap(connection -> connection.getManagementNode(entityPath, entityType)) .flatMapMany(node -> { final long nextSequenceNumber = lastPeekedSequenceNumber.get() + 1; logger.verbose("Peek batch from sequence number: {}", nextSequenceNumber); final Flux<ServiceBusReceivedMessage> messages = node.peek(nextSequenceNumber, sessionId, getLinkName(sessionId), maxMessages); final Mono<ServiceBusReceivedMessage> handle = messages .switchIfEmpty(Mono.fromCallable(() -> { ServiceBusReceivedMessage emptyMessage = new ServiceBusReceivedMessage(new byte[0]); emptyMessage.setSequenceNumber(lastPeekedSequenceNumber.get()); return emptyMessage; })) .last() .handle((last, sink) -> { final long current = lastPeekedSequenceNumber .updateAndGet(value -> Math.max(value, last.getSequenceNumber())); logger.verbose("Last peeked sequence number in batch: {}", current); sink.complete(); }); return Flux.merge(messages, handle); }); } /** * Starting from the given sequence number, reads the next batch of active messages without changing the state of * the receiver or the message source. * * @param maxMessages The number of messages. * @param sequenceNumber The sequence number from where to start reading messages. * * @return A {@link Flux} of {@link ServiceBusReceivedMessage} peeked. * @throws IllegalArgumentException if {@code maxMessages} is not a positive integer. * @see <a href="https: */ public Flux<ServiceBusReceivedMessage> peekMessagesAt(int maxMessages, long sequenceNumber) { return peekMessagesAt(maxMessages, sequenceNumber, receiverOptions.getSessionId()); } /** * Starting from the given sequence number, reads the next batch of active messages without changing the state of * the receiver or the message source. * * @param maxMessages The number of messages. * @param sequenceNumber The sequence number from where to start reading messages. * @param sessionId Session id of the messages to peek from. {@code null} if there is no session. * * @return An {@link IterableStream} of {@link ServiceBusReceivedMessage} peeked. * @throws IllegalArgumentException if {@code maxMessages} is not a positive integer. * @see <a href="https: */ Flux<ServiceBusReceivedMessage> peekMessagesAt(int maxMessages, long sequenceNumber, String sessionId) { if (isDisposed.get()) { return fluxError(logger, new IllegalStateException( String.format(INVALID_OPERATION_DISPOSED_RECEIVER, "peekBatchAt"))); } return connectionProcessor .flatMap(connection -> connection.getManagementNode(entityPath, entityType)) .flatMapMany(node -> node.peek(sequenceNumber, sessionId, getLinkName(sessionId), maxMessages)); } /** * Receives an <b>infinite</b> stream of {@link ServiceBusReceivedMessage messages} from the Service Bus entity. * This Flux continuously receives messages from a Service Bus entity until either: * * <ul> * <li>The receiver is closed.</li> * <li>The subscription to the Flux is disposed.</li> * <li>A terminal signal from a downstream subscriber is propagated upstream (ie. {@link Flux * {@link Flux * <li>An {@link AmqpException} occurs that causes the receive link to stop.</li> * </ul> * * @return An <b>infinite</b> stream of messages from the Service Bus entity. */ /** * Receives a deferred {@link ServiceBusReceivedMessage message}. Deferred messages can only be received by using * sequence number. * * @param sequenceNumber The {@link ServiceBusReceivedMessage * message. * * @return A deferred message with the matching {@code sequenceNumber}. */ public Mono<ServiceBusReceivedMessage> receiveDeferredMessage(long sequenceNumber) { return receiveDeferredMessage(sequenceNumber, receiverOptions.getSessionId()); } /** * Receives a deferred {@link ServiceBusReceivedMessage message}. Deferred messages can only be received by using * sequence number. * * @param sequenceNumber The {@link ServiceBusReceivedMessage * message. * @param sessionId Session id of the deferred message. {@code null} if there is no session. * * @return A deferred message with the matching {@code sequenceNumber}. */ Mono<ServiceBusReceivedMessage> receiveDeferredMessage(long sequenceNumber, String sessionId) { return connectionProcessor .flatMap(connection -> connection.getManagementNode(entityPath, entityType)) .flatMap(node -> node.receiveDeferredMessages(receiverOptions.getReceiveMode(), sessionId, getLinkName(sessionId), Collections.singleton(sequenceNumber)).last()) .map(receivedMessage -> { if (CoreUtils.isNullOrEmpty(receivedMessage.getLockToken())) { return receivedMessage; } if (receiverOptions.getReceiveMode() == ReceiveMode.PEEK_LOCK) { receivedMessage.setLockedUntil(managementNodeLocks.addOrUpdate(receivedMessage.getLockToken(), receivedMessage.getLockedUntil(), receivedMessage.getLockedUntil())); } return receivedMessage; }); } /** * Receives a batch of deferred {@link ServiceBusReceivedMessage messages}. Deferred messages can only be received * by using sequence number. * * @param sequenceNumbers The sequence numbers of the deferred messages. * * @return A {@link Flux} of deferred {@link ServiceBusReceivedMessage messages}. */ public Flux<ServiceBusReceivedMessage> receiveDeferredMessages(Iterable<Long> sequenceNumbers) { return receiveDeferredMessages(sequenceNumbers, receiverOptions.getSessionId()); } /** * Receives a batch of deferred {@link ServiceBusReceivedMessage messages}. Deferred messages can only be received * by using sequence number. * * @param sequenceNumbers The sequence numbers of the deferred messages. * @param sessionId Session id of the deferred messages. {@code null} if there is no session. * * @return An {@link IterableStream} of deferred {@link ServiceBusReceivedMessage messages}. */ Flux<ServiceBusReceivedMessage> receiveDeferredMessages(Iterable<Long> sequenceNumbers, String sessionId) { if (isDisposed.get()) { return fluxError(logger, new IllegalStateException( String.format(INVALID_OPERATION_DISPOSED_RECEIVER, "receiveDeferredMessageBatch"))); } return connectionProcessor .flatMap(connection -> connection.getManagementNode(entityPath, entityType)) .flatMapMany(node -> node.receiveDeferredMessages(receiverOptions.getReceiveMode(), sessionId, getLinkName(sessionId), sequenceNumbers)) .map(receivedMessage -> { if (CoreUtils.isNullOrEmpty(receivedMessage.getLockToken())) { return receivedMessage; } if (receiverOptions.getReceiveMode() == ReceiveMode.PEEK_LOCK) { receivedMessage.setLockedUntil(managementNodeLocks.addOrUpdate(receivedMessage.getLockToken(), receivedMessage.getLockedUntil(), receivedMessage.getLockedUntil())); } return receivedMessage; }); } /** * Asynchronously renews the lock on the message. The lock will be renewed based on the setting specified on the * entity. When a message is received in {@link ReceiveMode * this receiver instance for a duration as specified during the entity creation (LockDuration). If processing of * the message requires longer than this duration, the lock needs to be renewed. For each renewal, the lock is reset * to the entity's LockDuration value. * * @param message The {@link ServiceBusReceivedMessage} to perform auto-lock renewal. * * @return The new expiration time for the message. * @throws NullPointerException if {@code message} or {@code message.getLockToken()} is null. * @throws UnsupportedOperationException if the receiver was opened in {@link ReceiveMode * mode. * @throws IllegalStateException if the receiver is a session receiver. * @throws IllegalArgumentException if {@code message.getLockToken()} is an empty value. */ public Mono<OffsetDateTime> renewMessageLock(ServiceBusReceivedMessage message) { if (isDisposed.get()) { return monoError(logger, new IllegalStateException( String.format(INVALID_OPERATION_DISPOSED_RECEIVER, "renewMessageLock"))); } else if (Objects.isNull(message)) { return monoError(logger, new NullPointerException("'message' cannot be null.")); } else if (Objects.isNull(message.getLockToken())) { return monoError(logger, new NullPointerException("'message.getLockToken()' cannot be null.")); } else if (message.getLockToken().isEmpty()) { return monoError(logger, new IllegalArgumentException("'message.getLockToken()' cannot be empty.")); } else if (receiverOptions.isSessionReceiver()) { return monoError(logger, new IllegalStateException( String.format("Cannot renew message lock [%s] for a session receiver.", message.getLockToken()))); } return renewMessageLock(message.getLockToken()) .onErrorMap(throwable -> mapError(throwable, ServiceBusErrorSource.RENEW_LOCK)); } /** * Asynchronously renews the lock on the message. The lock will be renewed based on the setting specified on the * entity. * * @param lockToken to be renewed. * * @return The new expiration time for the message. */ Mono<OffsetDateTime> renewMessageLock(String lockToken) { return connectionProcessor .flatMap(connection -> connection.getManagementNode(entityPath, entityType)) .flatMap(serviceBusManagementNode -> serviceBusManagementNode.renewMessageLock(lockToken, getLinkName(null))) .map(offsetDateTime -> managementNodeLocks.addOrUpdate(lockToken, offsetDateTime, offsetDateTime)); } /** * Starts the auto lock renewal for a {@link ServiceBusReceivedMessage message}. * * @param message The {@link ServiceBusReceivedMessage} to perform this operation. * @param maxLockRenewalDuration Maximum duration to keep renewing the lock token. * * @return A lock renewal operation for the message. * @throws NullPointerException if {@code message}, {@code message.getLockToken()} or {@code * maxLockRenewalDuration} is null. * @throws IllegalStateException if the receiver is a session receiver or the receiver is disposed. * @throws IllegalArgumentException if {@code message.getLockToken()} is an empty value. */ public Mono<Void> renewMessageLock(ServiceBusReceivedMessage message, Duration maxLockRenewalDuration) { if (isDisposed.get()) { return monoError(logger, new IllegalStateException( String.format(INVALID_OPERATION_DISPOSED_RECEIVER, "getAutoRenewMessageLock"))); } else if (Objects.isNull(message)) { return monoError(logger, new NullPointerException("'message' cannot be null.")); } else if (Objects.isNull(message.getLockToken())) { return monoError(logger, new NullPointerException("'message.getLockToken()' cannot be null.")); } else if (message.getLockToken().isEmpty()) { return monoError(logger, new IllegalArgumentException("'message.getLockToken()' cannot be empty.")); } else if (receiverOptions.isSessionReceiver()) { return monoError(logger, new IllegalStateException( String.format("Cannot renew message lock [%s] for a session receiver.", message.getLockToken()))); } else if (maxLockRenewalDuration == null) { return monoError(logger, new NullPointerException("'maxLockRenewalDuration' cannot be null.")); } else if (maxLockRenewalDuration.isNegative()) { return monoError(logger, new IllegalArgumentException("'maxLockRenewalDuration' cannot be negative.")); } final LockRenewalOperation operation = new LockRenewalOperation(message.getLockToken(), maxLockRenewalDuration, false, ignored -> renewMessageLock(message)); renewalContainer.addOrUpdate(message.getLockToken(), OffsetDateTime.now().plus(maxLockRenewalDuration), operation); return operation.getCompletionOperation() .onErrorMap(throwable -> mapError(throwable, ServiceBusErrorSource.RENEW_LOCK)); } /** * Renews the session lock if this receiver is a session receiver. * * @return The next expiration time for the session lock. * @throws IllegalStateException if the receiver is a non-session receiver. */ public Mono<OffsetDateTime> renewSessionLock() { return renewSessionLock(receiverOptions.getSessionId()); } /** * Starts the auto lock renewal for the session this receiver works for. * * @param maxLockRenewalDuration Maximum duration to keep renewing the session lock. * * @return A lock renewal operation for the message. * @throws NullPointerException if {@code sessionId} or {@code maxLockRenewalDuration} is null. * @throws IllegalArgumentException if {@code sessionId} is an empty string. * @throws IllegalStateException if the receiver is a non-session receiver or the receiver is disposed. */ public Mono<Void> renewSessionLock(Duration maxLockRenewalDuration) { return this.renewSessionLock(receiverOptions.getSessionId(), maxLockRenewalDuration); } /** * Sets the state of the session this receiver works for. * * @param sessionState State to set on the session. * * @return A Mono that completes when the session is set * @throws IllegalStateException if the receiver is a non-session receiver. */ public Mono<Void> setSessionState(byte[] sessionState) { return this.setSessionState(receiverOptions.getSessionId(), sessionState); } /** * Starts a new service side transaction. The {@link ServiceBusTransactionContext} should be passed to all * operations that needs to be in this transaction. * * <p><strong>Create a transaction</strong></p> * {@codesnippet com.azure.messaging.servicebus.servicebusasyncreceiverclient.createTransaction} * * @return The {@link Mono} that finishes this operation on service bus resource. */ public Mono<ServiceBusTransactionContext> createTransaction() { if (isDisposed.get()) { return monoError(logger, new IllegalStateException( String.format(INVALID_OPERATION_DISPOSED_RECEIVER, "createTransaction"))); } return connectionProcessor .flatMap(connection -> connection.createSession(TRANSACTION_LINK_NAME)) .flatMap(transactionSession -> transactionSession.createTransaction()) .map(transaction -> new ServiceBusTransactionContext(transaction.getTransactionId())); } /** * Commits the transaction given {@link ServiceBusTransactionContext}. This will make a call to Service Bus. * <p><strong>Commit a transaction</strong></p> * {@codesnippet com.azure.messaging.servicebus.servicebusasyncreceiverclient.commitTransaction} * * @param transactionContext to be committed. * * @return The {@link Mono} that finishes this operation on service bus resource. * @throws NullPointerException if {@code transactionContext} or {@code transactionContext.transactionId} is * null. */ public Mono<Void> commitTransaction(ServiceBusTransactionContext transactionContext) { if (isDisposed.get()) { return monoError(logger, new IllegalStateException( String.format(INVALID_OPERATION_DISPOSED_RECEIVER, "commitTransaction"))); } if (Objects.isNull(transactionContext)) { return monoError(logger, new NullPointerException("'transactionContext' cannot be null.")); } else if (Objects.isNull(transactionContext.getTransactionId())) { return monoError(logger, new NullPointerException("'transactionContext.transactionId' cannot be null.")); } return connectionProcessor .flatMap(connection -> connection.createSession(TRANSACTION_LINK_NAME)) .flatMap(transactionSession -> transactionSession.commitTransaction(new AmqpTransaction( transactionContext.getTransactionId()))); } /** * Rollbacks the transaction given {@link ServiceBusTransactionContext}. This will make a call to Service Bus. * <p><strong>Rollback a transaction</strong></p> * {@codesnippet com.azure.messaging.servicebus.servicebusasyncreceiverclient.rollbackTransaction} * * @param transactionContext to be rollbacked. * * @return The {@link Mono} that finishes this operation on service bus resource. * @throws NullPointerException if {@code transactionContext} or {@code transactionContext.transactionId} is * null. */ public Mono<Void> rollbackTransaction(ServiceBusTransactionContext transactionContext) { if (isDisposed.get()) { return monoError(logger, new IllegalStateException( String.format(INVALID_OPERATION_DISPOSED_RECEIVER, "rollbackTransaction"))); } if (Objects.isNull(transactionContext)) { return monoError(logger, new NullPointerException("'transactionContext' cannot be null.")); } else if (Objects.isNull(transactionContext.getTransactionId())) { return monoError(logger, new NullPointerException("'transactionContext.transactionId' cannot be null.")); } return connectionProcessor .flatMap(connection -> connection.createSession(TRANSACTION_LINK_NAME)) .flatMap(transactionSession -> transactionSession.rollbackTransaction(new AmqpTransaction( transactionContext.getTransactionId()))); } /** * Disposes of the consumer by closing the underlying connection to the service. */ @Override public void close() { if (isDisposed.getAndSet(true)) { return; } try { completionLock.acquire(); } catch (InterruptedException e) { logger.info("Unable to obtain completion lock.", e); } logger.info("Removing receiver links."); final ServiceBusAsyncConsumer disposed = consumer.getAndSet(null); if (disposed != null) { disposed.close(); } if (sessionManager != null) { sessionManager.close(); } onClientClose.run(); } /** * @return receiver options set by user; */ ReceiverOptions getReceiverOptions() { return receiverOptions; } /** * Gets whether or not the management node contains the message lock token and it has not expired. Lock tokens are * held by the management node when they are received from the management node or management operations are * performed using that {@code lockToken}. * * @param lockToken Lock token to check for. * * @return {@code true} if the management node contains the lock token and false otherwise. */ private boolean isManagementToken(String lockToken) { return managementNodeLocks.containsUnexpired(lockToken); } private Mono<Void> updateDisposition(ServiceBusReceivedMessage message, DispositionStatus dispositionStatus, String deadLetterReason, String deadLetterErrorDescription, Map<String, Object> propertiesToModify, ServiceBusTransactionContext transactionContext) { if (isDisposed.get()) { return monoError(logger, new IllegalStateException( String.format(INVALID_OPERATION_DISPOSED_RECEIVER, dispositionStatus.getValue()))); } else if (Objects.isNull(message)) { return monoError(logger, new NullPointerException("'message' cannot be null.")); } final String lockToken = message.getLockToken(); final String sessionId = message.getSessionId(); if (receiverOptions.getReceiveMode() != ReceiveMode.PEEK_LOCK) { return Mono.error(logger.logExceptionAsError(new UnsupportedOperationException(String.format( "'%s' is not supported on a receiver opened in ReceiveMode.RECEIVE_AND_DELETE.", dispositionStatus)))); } final String sessionIdToUse; if (sessionId == null && !CoreUtils.isNullOrEmpty(receiverOptions.getSessionId())) { sessionIdToUse = receiverOptions.getSessionId(); } else { sessionIdToUse = sessionId; } logger.info("{}: Update started. Disposition: {}. Lock: {}. SessionId: {}.", entityPath, dispositionStatus, lockToken, sessionIdToUse); final Mono<Void> performOnManagement = connectionProcessor .flatMap(connection -> connection.getManagementNode(entityPath, entityType)) .flatMap(node -> node.updateDisposition(lockToken, dispositionStatus, deadLetterReason, deadLetterErrorDescription, propertiesToModify, sessionId, getLinkName(sessionId), transactionContext)) .then(Mono.fromRunnable(() -> { logger.info("{}: Management node Update completed. Disposition: {}. Lock: {}.", entityPath, dispositionStatus, lockToken); managementNodeLocks.remove(lockToken); renewalContainer.remove(lockToken); })); Mono<Void> updateDispositionOperation; if (sessionManager != null) { updateDispositionOperation = sessionManager.updateDisposition(lockToken, sessionId, dispositionStatus, propertiesToModify, deadLetterReason, deadLetterErrorDescription, transactionContext) .flatMap(isSuccess -> { if (isSuccess) { renewalContainer.remove(lockToken); return Mono.empty(); } logger.info("Could not perform on session manger. Performing on management node."); return performOnManagement; }); } else { final ServiceBusAsyncConsumer existingConsumer = consumer.get(); if (isManagementToken(lockToken) || existingConsumer == null) { updateDispositionOperation = performOnManagement; } else { updateDispositionOperation = existingConsumer.updateDisposition(lockToken, dispositionStatus, deadLetterReason, deadLetterErrorDescription, propertiesToModify, transactionContext) .then(Mono.fromRunnable(() -> { logger.info("{}: Update completed. Disposition: {}. Lock: {}.", entityPath, dispositionStatus, lockToken); renewalContainer.remove(lockToken); })); } } return updateDispositionOperation .onErrorMap(throwable -> { if (receiverOptions.isEnableAutoComplete() && throwable instanceof AmqpException) { switch (dispositionStatus) { case COMPLETED: return new ServiceBusAmqpException((AmqpException) throwable, ServiceBusErrorSource.COMPLETE); case ABANDONED: return new ServiceBusAmqpException((AmqpException) throwable, ServiceBusErrorSource.ABANDONED); default: } } return throwable; }); } private ServiceBusAsyncConsumer getOrCreateConsumer() { final ServiceBusAsyncConsumer existing = consumer.get(); if (existing != null) { return existing; } final String linkName = StringUtil.getRandomString(entityPath); logger.info("{}: Creating consumer for link '{}'", entityPath, linkName); final Flux<ServiceBusReceiveLink> receiveLink = connectionProcessor.flatMap(connection -> { if (receiverOptions.isSessionReceiver()) { return connection.createReceiveLink(linkName, entityPath, receiverOptions.getReceiveMode(), null, entityType, receiverOptions.getSessionId()); } else { return connection.createReceiveLink(linkName, entityPath, receiverOptions.getReceiveMode(), null, entityType); } }) .doOnNext(next -> { final String format = "Created consumer for Service Bus resource: [{}] mode: [{}]" + " sessionEnabled? {} transferEntityPath: [{}], entityType: [{}]"; logger.verbose(format, next.getEntityPath(), receiverOptions.getReceiveMode(), CoreUtils.isNullOrEmpty(receiverOptions.getSessionId()), "N/A", entityType); }) .repeat(); final AmqpRetryPolicy retryPolicy = RetryUtil.getRetryPolicy(connectionProcessor.getRetryOptions()); final ServiceBusReceiveLinkProcessor linkMessageProcessor = receiveLink.subscribeWith( new ServiceBusReceiveLinkProcessor(receiverOptions.getPrefetchCount(), retryPolicy, receiverOptions.getReceiveMode())); final ServiceBusAsyncConsumer newConsumer = new ServiceBusAsyncConsumer(linkName, linkMessageProcessor, messageSerializer, receiverOptions); if (consumer.compareAndSet(null, newConsumer)) { return newConsumer; } else { newConsumer.close(); return consumer.get(); } } /** * If the receiver has not connected via {@link * through the management node. * * @return The name of the receive link, or null of it has not connected via a receive link. */ private String getLinkName(String sessionId) { if (sessionManager != null && !CoreUtils.isNullOrEmpty(sessionId)) { return sessionManager.getLinkName(sessionId); } else if (!CoreUtils.isNullOrEmpty(sessionId) && !receiverOptions.isSessionReceiver()) { return null; } else { final ServiceBusAsyncConsumer existing = consumer.get(); return existing != null ? existing.getLinkName() : null; } } Mono<OffsetDateTime> renewSessionLock(String sessionId) { if (isDisposed.get()) { return monoError(logger, new IllegalStateException( String.format(INVALID_OPERATION_DISPOSED_RECEIVER, "renewSessionLock"))); } else if (!receiverOptions.isSessionReceiver()) { return monoError(logger, new IllegalStateException("Cannot renew session lock on a non-session receiver.")); } final String linkName = sessionManager != null ? sessionManager.getLinkName(sessionId) : null; return connectionProcessor .flatMap(connection -> connection.getManagementNode(entityPath, entityType)) .flatMap(channel -> channel.renewSessionLock(sessionId, linkName)) .onErrorMap(throwable -> mapError(throwable, ServiceBusErrorSource.RENEW_LOCK)); } Mono<Void> renewSessionLock(String sessionId, Duration maxLockRenewalDuration) { if (isDisposed.get()) { return monoError(logger, new IllegalStateException( String.format(INVALID_OPERATION_DISPOSED_RECEIVER, "getAutoRenewSessionLock"))); } else if (!receiverOptions.isSessionReceiver()) { return monoError(logger, new IllegalStateException( "Cannot renew session lock on a non-session receiver.")); } else if (maxLockRenewalDuration == null) { return monoError(logger, new NullPointerException("'maxLockRenewalDuration' cannot be null.")); } else if (maxLockRenewalDuration.isNegative()) { return monoError(logger, new IllegalArgumentException( "'maxLockRenewalDuration' cannot be negative.")); } else if (Objects.isNull(sessionId)) { return monoError(logger, new NullPointerException("'sessionId' cannot be null.")); } else if (sessionId.isEmpty()) { return monoError(logger, new IllegalArgumentException("'sessionId' cannot be empty.")); } final LockRenewalOperation operation = new LockRenewalOperation(sessionId, maxLockRenewalDuration, true, this::renewSessionLock); renewalContainer.addOrUpdate(sessionId, OffsetDateTime.now().plus(maxLockRenewalDuration), operation); return operation.getCompletionOperation() .onErrorMap(throwable -> mapError(throwable, ServiceBusErrorSource.RENEW_LOCK)); } Mono<Void> setSessionState(String sessionId, byte[] sessionState) { if (isDisposed.get()) { return monoError(logger, new IllegalStateException( String.format(INVALID_OPERATION_DISPOSED_RECEIVER, "setSessionState"))); } else if (!receiverOptions.isSessionReceiver()) { return monoError(logger, new IllegalStateException("Cannot set session state on a non-session receiver.")); } final String linkName = sessionManager != null ? sessionManager.getLinkName(sessionId) : null; return connectionProcessor .flatMap(connection -> connection.getManagementNode(entityPath, entityType)) .flatMap(channel -> channel.setSessionState(sessionId, sessionState, linkName)); } Mono<byte[]> getSessionState(String sessionId) { if (isDisposed.get()) { return monoError(logger, new IllegalStateException( String.format(INVALID_OPERATION_DISPOSED_RECEIVER, "getSessionState"))); } else if (!receiverOptions.isSessionReceiver()) { return monoError(logger, new IllegalStateException("Cannot get session state on a non-session receiver.")); } if (sessionManager != null) { return sessionManager.getSessionState(sessionId); } else { return connectionProcessor .flatMap(connection -> connection.getManagementNode(entityPath, entityType)) .flatMap(channel -> channel.getSessionState(sessionId, getLinkName(sessionId))); } } /** * Map the error to {@link ServiceBusAmqpException} */ private Throwable mapError(Throwable throwable, ServiceBusErrorSource errorSource) { if ((throwable instanceof ServiceBusAmqpException) || !(throwable instanceof AmqpException)) { return throwable; } else { return new ServiceBusAmqpException((AmqpException) throwable, errorSource); } } }
verifyErrorSatisfies is the correct one. Same with the other ones.
void errorSourceOnSessionLock() { when(managementNode.renewSessionLock(SESSION_ID, null)).thenReturn(Mono.error(new AmqpException(false, "some error occurred.", null))); StepVerifier.create(sessionReceiver.renewSessionLock(SESSION_ID)) .verifyErrorMatches(throwable -> { Assertions.assertTrue(throwable instanceof ServiceBusAmqpException); final ServiceBusErrorSource actual = ((ServiceBusAmqpException) throwable).getErrorSource(); Assertions.assertEquals(ServiceBusErrorSource.RENEW_LOCK, actual); return true; }); }
.verifyErrorMatches(throwable -> {
void errorSourceOnSessionLock() { when(managementNode.renewSessionLock(SESSION_ID, null)).thenReturn(Mono.error(new AmqpException(false, "some error occurred.", null))); StepVerifier.create(sessionReceiver.renewSessionLock(SESSION_ID)) .verifyErrorSatisfies(throwable -> { Assertions.assertTrue(throwable instanceof ServiceBusAmqpException); final ServiceBusErrorSource actual = ((ServiceBusAmqpException) throwable).getErrorSource(); Assertions.assertEquals(ServiceBusErrorSource.RENEW_LOCK, actual); }); }
class ServiceBusReceiverAsyncClientTest { private static final ClientOptions CLIENT_OPTIONS = new ClientOptions(); private static final String PAYLOAD = "hello"; private static final byte[] PAYLOAD_BYTES = PAYLOAD.getBytes(UTF_8); private static final int PREFETCH = 5; private static final String NAMESPACE = "my-namespace-foo.net"; private static final String ENTITY_PATH = "queue-name"; private static final MessagingEntityType ENTITY_TYPE = MessagingEntityType.QUEUE; private static final String NAMESPACE_CONNECTION_STRING = String.format( "Endpoint=sb: NAMESPACE, "some-name", "something-else"); private static final Duration CLEANUP_INTERVAL = Duration.ofSeconds(10); private static final String SESSION_ID = "my-session-id"; private final ClientLogger logger = new ClientLogger(ServiceBusReceiverAsyncClientTest.class); private final String messageTrackingUUID = UUID.randomUUID().toString(); private final ReplayProcessor<AmqpEndpointState> endpointProcessor = ReplayProcessor.cacheLast(); private final FluxSink<AmqpEndpointState> endpointSink = endpointProcessor.sink(FluxSink.OverflowStrategy.BUFFER); private final DirectProcessor<Message> messageProcessor = DirectProcessor.create(); private final FluxSink<Message> messageSink = messageProcessor.sink(FluxSink.OverflowStrategy.BUFFER); private ServiceBusConnectionProcessor connectionProcessor; private ServiceBusReceiverAsyncClient receiver; private ServiceBusReceiverAsyncClient sessionReceiver; @Mock private ServiceBusReactorReceiver amqpReceiveLink; @Mock private ServiceBusReactorReceiver sessionReceiveLink; @Mock private ServiceBusAmqpConnection connection; @Mock private TokenCredential tokenCredential; @Mock private MessageSerializer messageSerializer; @Mock private TracerProvider tracerProvider; @Mock private ServiceBusManagementNode managementNode; @Mock private ServiceBusReceivedMessage receivedMessage; @Mock private ServiceBusReceivedMessage receivedMessage2; @Mock private Runnable onClientClose; @BeforeAll static void beforeAll() { StepVerifier.setDefaultTimeout(Duration.ofSeconds(100)); } @AfterAll static void afterAll() { StepVerifier.resetDefaultTimeout(); } @BeforeEach void setup(TestInfo testInfo) { logger.info("[{}] Setting up.", testInfo.getDisplayName()); MockitoAnnotations.initMocks(this); when(amqpReceiveLink.receive()).thenReturn(messageProcessor.publishOn(Schedulers.single())); when(amqpReceiveLink.getEndpointStates()).thenReturn(endpointProcessor); when(sessionReceiveLink.receive()).thenReturn(messageProcessor.publishOn(Schedulers.single())); when(sessionReceiveLink.getEndpointStates()).thenReturn(endpointProcessor); ConnectionOptions connectionOptions = new ConnectionOptions(NAMESPACE, tokenCredential, CbsAuthorizationType.SHARED_ACCESS_SIGNATURE, AmqpTransportType.AMQP, new AmqpRetryOptions(), ProxyOptions.SYSTEM_DEFAULTS, Schedulers.boundedElastic(), CLIENT_OPTIONS, SslDomain.VerifyMode.VERIFY_PEER_NAME); when(connection.getEndpointStates()).thenReturn(endpointProcessor); endpointSink.next(AmqpEndpointState.ACTIVE); when(connection.getManagementNode(ENTITY_PATH, ENTITY_TYPE)) .thenReturn(Mono.just(managementNode)); when(connection.createReceiveLink(anyString(), anyString(), any(ReceiveMode.class), any(), any(MessagingEntityType.class))).thenReturn(Mono.just(amqpReceiveLink)); when(connection.createReceiveLink(anyString(), anyString(), any(ReceiveMode.class), any(), any(MessagingEntityType.class), anyString())).thenReturn(Mono.just(sessionReceiveLink)); connectionProcessor = Flux.<ServiceBusAmqpConnection>create(sink -> sink.next(connection)) .subscribeWith(new ServiceBusConnectionProcessor(connectionOptions.getFullyQualifiedNamespace(), connectionOptions.getRetry())); receiver = new ServiceBusReceiverAsyncClient(NAMESPACE, ENTITY_PATH, MessagingEntityType.QUEUE, new ReceiverOptions(ReceiveMode.PEEK_LOCK, PREFETCH, null, false), connectionProcessor, CLEANUP_INTERVAL, tracerProvider, messageSerializer, onClientClose); sessionReceiver = new ServiceBusReceiverAsyncClient(NAMESPACE, ENTITY_PATH, MessagingEntityType.QUEUE, new ReceiverOptions(ReceiveMode.PEEK_LOCK, PREFETCH, null, false, "Some-Session", false, null), connectionProcessor, CLEANUP_INTERVAL, tracerProvider, messageSerializer, onClientClose); } @AfterEach void teardown(TestInfo testInfo) { logger.info("[{}] Tearing down.", testInfo.getDisplayName()); receiver.close(); Mockito.framework().clearInlineMocks(); } /** * Verifies that when user calls peek more than one time, It returns different object. */ @SuppressWarnings("unchecked") @Test void peekTwoMessages() { final long sequence1 = 10; final long sequence2 = 12; final ArgumentCaptor<Long> captor = ArgumentCaptor.forClass(Long.class); when(receivedMessage.getSequenceNumber()).thenReturn(sequence1); when(receivedMessage2.getSequenceNumber()).thenReturn(sequence2); when(managementNode.peek(anyLong(), isNull(), isNull())) .thenReturn(Mono.just(receivedMessage), Mono.just(receivedMessage2)); StepVerifier.create(receiver.peekMessage()) .expectNext(receivedMessage) .verifyComplete(); StepVerifier.create(receiver.peekMessage()) .expectNext(receivedMessage2) .verifyComplete(); verify(managementNode, times(2)).peek(captor.capture(), isNull(), isNull()); final List<Long> allValues = captor.getAllValues(); Assertions.assertEquals(2, allValues.size()); Assertions.assertTrue(allValues.contains(0L)); Assertions.assertTrue(allValues.contains(11L)); } /** * Verifies that when no messages are returned, that it does not error. */ @Test void peekEmptyEntity() { when(managementNode.peek(0, null, null)) .thenReturn(Mono.empty()); StepVerifier.create(receiver.peekMessage()) .verifyComplete(); } /** * Verifies that this peek one messages from a sequence Number. */ @Test void peekWithSequenceOneMessage() { final int fromSequenceNumber = 10; final ServiceBusReceivedMessage receivedMessage = mock(ServiceBusReceivedMessage.class); when(managementNode.peek(fromSequenceNumber, null, null)).thenReturn(Mono.just(receivedMessage)); StepVerifier.create(receiver.peekMessageAt(fromSequenceNumber)) .expectNext(receivedMessage) .verifyComplete(); } /** * Verifies that this receives a number of messages. Verifies that the initial credits we add are equal to the * prefetch value. */ @Test void receivesNumberOfEvents() { final int numberOfEvents = 1; final List<Message> messages = getMessages(); final String lockToken = UUID.randomUUID().toString(); ServiceBusReceivedMessage receivedMessage = mock(ServiceBusReceivedMessage.class); when(receivedMessage.getLockedUntil()).thenReturn(OffsetDateTime.now()); when(receivedMessage.getLockToken()).thenReturn(lockToken); when(messageSerializer.deserialize(any(Message.class), eq(ServiceBusReceivedMessage.class))) .thenReturn(receivedMessage); StepVerifier.create(receiver.receiveMessages().take(numberOfEvents)) .then(() -> messages.forEach(m -> messageSink.next(m))) .expectNextCount(numberOfEvents) .verifyComplete(); verify(amqpReceiveLink).addCredits(PREFETCH); verify(amqpReceiveLink, never()).updateDisposition(eq(lockToken), any()); } /** * Verifies that we error if we try to settle a message with null transaction-id. */ @ParameterizedTest @EnumSource(DispositionStatus.class) void settleWithNullTransactionId(DispositionStatus dispositionStatus) { ServiceBusTransactionContext nullTransactionId = new ServiceBusTransactionContext(null); when(connection.getManagementNode(ENTITY_PATH, ENTITY_TYPE)).thenReturn(Mono.just(managementNode)); when(managementNode.updateDisposition(any(), eq(dispositionStatus), isNull(), isNull(), isNull(), isNull(), isNull(), isNull())) .thenReturn(Mono.delay(Duration.ofMillis(250)).then()); when(receivedMessage.getLockToken()).thenReturn("mylockToken"); final Mono<Void> operation; switch (dispositionStatus) { case DEFERRED: operation = receiver.defer(receivedMessage, new DeferOptions().setTransactionContext(nullTransactionId)); break; case ABANDONED: operation = receiver.abandon(receivedMessage, new AbandonOptions().setTransactionContext(nullTransactionId)); break; case COMPLETED: operation = receiver.complete(receivedMessage, new CompleteOptions().setTransactionContext(nullTransactionId)); break; case SUSPENDED: operation = receiver.deadLetter(receivedMessage, new DeadLetterOptions().setTransactionContext(nullTransactionId)); break; default: throw new IllegalArgumentException("Unrecognized operation: " + dispositionStatus); } StepVerifier.create(operation) .expectError(NullPointerException.class) .verify(); verify(managementNode, never()).updateDisposition(any(), eq(dispositionStatus), isNull(), isNull(), isNull(), isNull(), isNull(), isNull()); } /** * Verifies that we error if we try to complete a null message. */ @Test void completeNullMessage() { StepVerifier.create(receiver.complete(null)).expectError(NullPointerException.class).verify(); } /** * Verifies that we error if we complete in RECEIVE_AND_DELETE mode. */ @Test void completeInReceiveAndDeleteMode() { final ReceiverOptions options = new ReceiverOptions(ReceiveMode.RECEIVE_AND_DELETE, PREFETCH, null, false); ServiceBusReceiverAsyncClient client = new ServiceBusReceiverAsyncClient(NAMESPACE, ENTITY_PATH, MessagingEntityType.QUEUE, options, connectionProcessor, CLEANUP_INTERVAL, tracerProvider, messageSerializer, onClientClose); final String lockToken1 = UUID.randomUUID().toString(); when(receivedMessage.getLockToken()).thenReturn(lockToken1); try { StepVerifier.create(client.complete(receivedMessage)) .expectError(UnsupportedOperationException.class) .verify(); } finally { client.close(); } } /** * Verifies that this peek batch of messages. */ @Test void peekMessages() { final int numberOfEvents = 2; when(managementNode.peek(0, null, null, numberOfEvents)) .thenReturn(Flux.fromArray(new ServiceBusReceivedMessage[]{receivedMessage, receivedMessage2})); StepVerifier.create(receiver.peekMessages(numberOfEvents)) .expectNextCount(numberOfEvents) .verifyComplete(); } /** * Verifies that this peek batch of messages. */ @Test void peekMessagesEmptyEntity() { final int numberOfEvents = 2; when(managementNode.peek(0, null, null, numberOfEvents)) .thenReturn(Flux.fromIterable(Collections.emptyList())); StepVerifier.create(receiver.peekMessages(numberOfEvents)) .verifyComplete(); } /** * Verifies that this peek batch of messages from a sequence Number. */ @Test void peekBatchWithSequenceNumberMessages() { final int numberOfEvents = 2; final int fromSequenceNumber = 10; when(managementNode.peek(fromSequenceNumber, null, null, numberOfEvents)) .thenReturn(Flux.fromArray(new ServiceBusReceivedMessage[]{receivedMessage, receivedMessage2})); StepVerifier.create(receiver.peekMessagesAt(numberOfEvents, fromSequenceNumber)) .expectNext(receivedMessage, receivedMessage2) .verifyComplete(); } /** * Verifies that we can deadletter a message with an error and description. */ @Test void deadLetterWithDescription() { final String lockToken1 = UUID.randomUUID().toString(); final String description = "some-dead-letter-description"; final String reason = "dead-letter-reason"; final Map<String, Object> propertiesToModify = new HashMap<>(); propertiesToModify.put("something", true); final DeadLetterOptions deadLetterOptions = new DeadLetterOptions().setDeadLetterReason(reason) .setDeadLetterErrorDescription(description) .setPropertiesToModify(propertiesToModify); final OffsetDateTime expiration = OffsetDateTime.now().plus(Duration.ofMinutes(5)); final MessageWithLockToken message = mock(MessageWithLockToken.class); when(messageSerializer.deserialize(message, ServiceBusReceivedMessage.class)).thenReturn(receivedMessage); when(receivedMessage.getLockToken()).thenReturn(lockToken1); when(receivedMessage.getLockedUntil()).thenReturn(expiration); when(amqpReceiveLink.updateDisposition(eq(lockToken1), argThat(e -> e.getType() == DeliveryStateType.Rejected))).thenReturn(Mono.empty()); StepVerifier.create(receiver.receiveMessages() .take(1) .flatMap(context -> receiver.deadLetter(context.getMessage(), deadLetterOptions))) .then(() -> messageSink.next(message)) .expectNext() .verifyComplete(); verify(amqpReceiveLink).updateDisposition(eq(lockToken1), isA(Rejected.class)); } /** * Verifies that error source is populated when any error happened while renewing lock. */ @Test void errorSourceOnRenewMessageLock() { final Duration maxDuration = Duration.ofSeconds(8); final String lockToken = "some-token"; when(receivedMessage.getLockToken()).thenReturn(lockToken); when(managementNode.renewMessageLock(lockToken, null)) .thenReturn(Mono.error(new AmqpException(false, "some error occurred.", null))); StepVerifier.create(receiver.renewMessageLock(receivedMessage, maxDuration)) .verifyErrorMatches(throwable -> { Assertions.assertTrue(throwable instanceof ServiceBusAmqpException); final ServiceBusErrorSource actual = ((ServiceBusAmqpException) throwable).getErrorSource(); Assertions.assertEquals(ServiceBusErrorSource.RENEW_LOCK, actual); return true; }); verify(managementNode, times(1)).renewMessageLock(lockToken, null); } /** * Verifies that error source is populated when any error happened while renewing lock. */ @Test /** * Verifies that error source is populated when there is any error during message settlement. */ @ParameterizedTest @MethodSource void errorSourceOnSettlement(DispositionStatus dispositionStatus, ServiceBusErrorSource expectedErrorSource, DeliveryStateType expectedDeliveryState) { final String lockToken1 = UUID.randomUUID().toString(); final OffsetDateTime expiration = OffsetDateTime.now().plus(Duration.ofMinutes(5)); final MessageWithLockToken message = mock(MessageWithLockToken.class); when(messageSerializer.deserialize(message, ServiceBusReceivedMessage.class)).thenReturn(receivedMessage); when(receivedMessage.getLockToken()).thenReturn(lockToken1); when(receivedMessage.getLockedUntil()).thenReturn(expiration); when(amqpReceiveLink.updateDisposition(eq(lockToken1), argThat(e -> e.getType() == expectedDeliveryState))) .thenReturn(Mono.error(new AmqpException(false, "some error occurred.", null))); StepVerifier.create(receiver.receiveMessages().take(1) .flatMap(context -> { final Mono<Void> operation; switch (dispositionStatus) { case DEFERRED: operation = receiver.defer(receivedMessage); break; case ABANDONED: operation = receiver.abandon(receivedMessage); break; case COMPLETED: operation = receiver.complete(receivedMessage); break; case SUSPENDED: operation = receiver.deadLetter(receivedMessage); break; default: throw new IllegalArgumentException("Unrecognized operation: " + dispositionStatus); } return operation; })) .then(() -> messageSink.next(message)) .expectNext() .verifyErrorMatches(throwable -> { Assertions.assertTrue(throwable instanceof ServiceBusAmqpException); final ServiceBusErrorSource actual = ((ServiceBusAmqpException) throwable).getErrorSource(); Assertions.assertEquals(expectedErrorSource, actual); return true; }); verify(amqpReceiveLink).updateDisposition(eq(lockToken1), any(DeliveryState.class)); } /** * Verifies that error source is populated when there is any error during receiving of message. */ @Test void errorSourceOnReceiveMessage() { final String lockToken = UUID.randomUUID().toString(); final OffsetDateTime expiration = OffsetDateTime.now().plus(Duration.ofMinutes(5)); final MessageWithLockToken message = mock(MessageWithLockToken.class); when(messageSerializer.deserialize(message, ServiceBusReceivedMessage.class)).thenReturn(receivedMessage); when(receivedMessage.getLockToken()).thenReturn(lockToken); when(receivedMessage.getLockedUntil()).thenReturn(expiration); when(connection.createReceiveLink(anyString(), anyString(), any(ReceiveMode.class), any(), any(MessagingEntityType.class))).thenReturn(Mono.error(new AmqpException(false, "some receive link Error.", null))); StepVerifier.create(receiver.receiveMessages().take(1)) .verifyErrorMatches(throwable -> { Assertions.assertTrue(throwable instanceof ServiceBusAmqpException); final ServiceBusErrorSource actual = ((ServiceBusAmqpException) throwable).getErrorSource(); Assertions.assertEquals(ServiceBusErrorSource.RECEIVE, actual); return true; }); verify(amqpReceiveLink, never()).updateDisposition(eq(lockToken), any(DeliveryState.class)); } /** * Verifies that the user can complete settlement methods on received message. */ @ParameterizedTest @EnumSource(DispositionStatus.class) void settleMessageOnManagement(DispositionStatus dispositionStatus) { final String lockToken1 = UUID.randomUUID().toString(); final String lockToken2 = UUID.randomUUID().toString(); final OffsetDateTime expiration = OffsetDateTime.now().plus(Duration.ofMinutes(5)); final long sequenceNumber = 10L; final long sequenceNumber2 = 15L; final MessageWithLockToken message = mock(MessageWithLockToken.class); final MessageWithLockToken message2 = mock(MessageWithLockToken.class); when(messageSerializer.deserialize(message, ServiceBusReceivedMessage.class)).thenReturn(receivedMessage); when(messageSerializer.deserialize(message2, ServiceBusReceivedMessage.class)).thenReturn(receivedMessage2); when(receivedMessage.getLockToken()).thenReturn(lockToken1); when(receivedMessage.getLockedUntil()).thenReturn(expiration); when(receivedMessage2.getLockToken()).thenReturn(lockToken2); when(receivedMessage2.getLockedUntil()).thenReturn(expiration); when(connection.getManagementNode(eq(ENTITY_PATH), eq(ENTITY_TYPE))) .thenReturn(Mono.just(managementNode)); when(managementNode.receiveDeferredMessages(eq(ReceiveMode.PEEK_LOCK), isNull(), isNull(), argThat(arg -> { boolean foundFirst = false; boolean foundSecond = false; for (Long seq : arg) { if (!foundFirst && sequenceNumber == seq) { foundFirst = true; } else if (!foundSecond && sequenceNumber2 == seq) { foundSecond = true; } } return foundFirst && foundSecond; }))) .thenReturn(Flux.just(receivedMessage, receivedMessage2)); when(managementNode.updateDisposition(lockToken1, dispositionStatus, null, null, null, null, null, null)) .thenReturn(Mono.empty()); when(managementNode.updateDisposition(lockToken2, dispositionStatus, null, null, null, null, null, null)) .thenReturn(Mono.empty()); StepVerifier.create(receiver.receiveDeferredMessages(Arrays.asList(sequenceNumber, sequenceNumber2))) .expectNext(receivedMessage, receivedMessage2) .verifyComplete(); final Mono<Void> operation; switch (dispositionStatus) { case DEFERRED: operation = receiver.defer(receivedMessage); break; case ABANDONED: operation = receiver.abandon(receivedMessage); break; case COMPLETED: operation = receiver.complete(receivedMessage); break; case SUSPENDED: operation = receiver.deadLetter(receivedMessage); break; default: throw new IllegalArgumentException("Unrecognized operation: " + dispositionStatus); } StepVerifier.create(operation) .verifyComplete(); verify(managementNode).updateDisposition(lockToken1, dispositionStatus, null, null, null, null, null, null); verify(managementNode, never()).updateDisposition(lockToken2, dispositionStatus, null, null, null, null, null, null); } /** * Verifies that this receive deferred one messages from a sequence Number. */ @Test void receiveDeferredWithSequenceOneMessage() { final int fromSequenceNumber = 10; final ServiceBusReceivedMessage receivedMessage = mock(ServiceBusReceivedMessage.class); when(managementNode.receiveDeferredMessages(any(), any(), any(), any())).thenReturn(Flux.just(receivedMessage)); StepVerifier.create(receiver.receiveDeferredMessage(fromSequenceNumber)) .expectNext(receivedMessage) .verifyComplete(); } /** * Verifies that this receive deferred messages from a sequence Number. */ @Test void receiveDeferredBatchFromSequenceNumber() { final long fromSequenceNumber1 = 10; final long fromSequenceNumber2 = 11; when(managementNode.receiveDeferredMessages(any(), any(), any(), any())) .thenReturn(Flux.fromArray(new ServiceBusReceivedMessage[]{receivedMessage, receivedMessage2})); StepVerifier.create(receiver.receiveDeferredMessages(Arrays.asList(fromSequenceNumber1, fromSequenceNumber2))) .expectNext(receivedMessage) .expectNext(receivedMessage2) .verifyComplete(); } /** * Verifies that the onClientClose is called. */ @Test void callsClientClose() { receiver.close(); verify(onClientClose).run(); } /** * Verifies that the onClientClose is only called once. */ @Test void callsClientCloseOnce() { receiver.close(); receiver.close(); verify(onClientClose).run(); } /** * Tests that invalid options throws and null options. */ @Test void receiveIllegalOptions() { ServiceBusReceiverClientBuilder builder = new ServiceBusClientBuilder() .connectionString(NAMESPACE_CONNECTION_STRING) .receiver() .topicName("baz").subscriptionName("bar") .receiveMode(ReceiveMode.PEEK_LOCK); Assertions.assertThrows(IllegalArgumentException.class, () -> builder.prefetchCount(-1)); } @Test void topicCorrectEntityPath() { final String topicName = "foo"; final String subscriptionName = "bar"; final String entityPath = String.join("/", topicName, "subscriptions", subscriptionName); final ServiceBusReceiverAsyncClient receiver = new ServiceBusClientBuilder() .connectionString(NAMESPACE_CONNECTION_STRING) .receiver() .topicName(topicName) .subscriptionName(subscriptionName) .receiveMode(ReceiveMode.PEEK_LOCK) .buildAsyncClient(); final String actual = receiver.getEntityPath(); final String actualNamespace = receiver.getFullyQualifiedNamespace(); Assertions.assertEquals(entityPath, actual); Assertions.assertEquals(NAMESPACE, actualNamespace); } /** * Verifies that client can call multiple receiveMessages on same receiver instance. */ @Test void canPerformMultipleReceive() { final int numberOfEvents = 1; final List<Message> messages = getMessages(); ServiceBusReceivedMessage receivedMessage = mock(ServiceBusReceivedMessage.class); when(receivedMessage.getLockedUntil()).thenReturn(OffsetDateTime.now()); when(receivedMessage.getLockToken()).thenReturn(UUID.randomUUID().toString()); when(messageSerializer.deserialize(any(Message.class), eq(ServiceBusReceivedMessage.class))) .thenReturn(receivedMessage); StepVerifier.create(receiver.receiveMessages().take(numberOfEvents)) .then(() -> messages.forEach(m -> messageSink.next(m))) .expectNextCount(numberOfEvents) .verifyComplete(); StepVerifier.create(receiver.receiveMessages().take(numberOfEvents)) .then(() -> messages.forEach(m -> messageSink.next(m))) .expectNextCount(numberOfEvents) .verifyComplete(); verify(amqpReceiveLink).addCredits(PREFETCH); } /** * Cannot get session state for non-session receiver. */ @Test void cannotPerformGetSessionState() { final String sessionId = "a-session-id"; StepVerifier.create(receiver.getSessionState(sessionId)) .expectError(IllegalStateException.class) .verify(); } /** * Cannot get session state for non-session receiver. */ @Test void cannotPerformSetSessionState() { final String sessionId = "a-session-id"; final byte[] sessionState = new byte[]{10, 11, 8}; StepVerifier.create(receiver.setSessionState(sessionId, sessionState)) .expectError(IllegalStateException.class) .verify(); } /** * Cannot get session state for non-session receiver. */ @Test void cannotPerformRenewSessionLock() { final String sessionId = "a-session-id"; StepVerifier.create(receiver.renewSessionLock(sessionId)) .expectError(IllegalStateException.class) .verify(); } /** * Verifies that we can get a session state. */ @SuppressWarnings("unchecked") @Test void getSessionState() { final byte[] bytes = new byte[]{95, 11, 54, 10}; when(managementNode.getSessionState(SESSION_ID, null)) .thenReturn(Mono.just(bytes), Mono.empty()); StepVerifier.create(sessionReceiver.getSessionState(SESSION_ID)) .expectNext(bytes) .expectComplete() .verify(); } /** * Verifies that we can set a session state. */ @Test void setSessionState() { final byte[] bytes = new byte[]{95, 11, 54, 10}; when(managementNode.setSessionState(SESSION_ID, bytes, null)).thenReturn(Mono.empty()); StepVerifier.create(sessionReceiver.setSessionState(SESSION_ID, bytes)) .expectComplete() .verify(); } /** * Verifies that we can renew a session state. */ @Test void renewSessionLock() { final OffsetDateTime expiry = Instant.ofEpochSecond(1588011761L).atOffset(ZoneOffset.UTC); when(managementNode.renewSessionLock(SESSION_ID, null)).thenReturn(Mono.just(expiry)); StepVerifier.create(sessionReceiver.renewSessionLock(SESSION_ID)) .expectNext(expiry) .expectComplete() .verify(); } /** * Verifies that we cannot renew a message lock when using a session receiver. */ @Test void cannotRenewMessageLockInSession() { when(receivedMessage.getLockToken()).thenReturn("lock-token"); when(receivedMessage.getSessionId()).thenReturn("fo"); StepVerifier.create(sessionReceiver.renewMessageLock(receivedMessage)) .expectError(IllegalStateException.class) .verify(); } /** * Verifies that we can auto-renew a message lock. */ @Test void autoRenewMessageLock() { final Duration maxDuration = Duration.ofSeconds(8); final Duration renewalPeriod = Duration.ofSeconds(3); final String lockToken = "some-token"; final int atMost = 5; final Duration totalSleepPeriod = maxDuration.plusMillis(500); when(receivedMessage.getLockToken()).thenReturn(lockToken); when(managementNode.renewMessageLock(lockToken, null)) .thenReturn(Mono.fromCallable(() -> OffsetDateTime.now().plus(renewalPeriod))); StepVerifier.create(receiver.renewMessageLock(receivedMessage, maxDuration)) .thenAwait(totalSleepPeriod) .then(() -> logger.info("Finished renewals for first sleep.")) .expectComplete() .verify(Duration.ofSeconds(5)); verify(managementNode, Mockito.atMost(atMost)).renewMessageLock(lockToken, null); } /** * Verifies that it errors when we try a null lock token. */ @Test void autoRenewMessageLockErrorNull() { final Duration maxDuration = Duration.ofSeconds(8); final Duration renewalPeriod = Duration.ofSeconds(3); when(receivedMessage.getLockToken()).thenReturn(null); when(managementNode.renewMessageLock(anyString(), isNull())) .thenReturn(Mono.fromCallable(() -> OffsetDateTime.now().plus(renewalPeriod))); StepVerifier.create(receiver.renewMessageLock(receivedMessage, maxDuration)) .expectError(NullPointerException.class) .verify(); verify(managementNode, never()).renewMessageLock(anyString(), isNull()); } /** * Verifies that it errors when we try an empty string lock token. */ @Test void autoRenewMessageLockErrorEmptyString() { final Duration maxDuration = Duration.ofSeconds(8); final Duration renewalPeriod = Duration.ofSeconds(3); final String lockToken = ""; when(receivedMessage.getLockToken()).thenReturn(""); when(managementNode.renewMessageLock(anyString(), isNull())) .thenReturn(Mono.fromCallable(() -> OffsetDateTime.now().plus(renewalPeriod))); StepVerifier.create(receiver.renewMessageLock(receivedMessage, maxDuration)) .expectError(IllegalArgumentException.class) .verify(); verify(managementNode, never()).renewMessageLock(anyString(), isNull()); } /** * Verifies that we can auto-renew a session lock. */ @Test void autoRenewSessionLock() { final Duration maxDuration = Duration.ofSeconds(8); final Duration renewalPeriod = Duration.ofSeconds(3); final String sessionId = "some-token"; final int atMost = 5; final Duration totalSleepPeriod = maxDuration.plusMillis(500); when(managementNode.renewSessionLock(sessionId, null)) .thenReturn(Mono.fromCallable(() -> OffsetDateTime.now().plus(renewalPeriod))); StepVerifier.create(sessionReceiver.renewSessionLock(sessionId, maxDuration)) .thenAwait(totalSleepPeriod) .then(() -> logger.info("Finished renewals for first sleep.")) .expectComplete() .verify(Duration.ofSeconds(5)); verify(managementNode, Mockito.atMost(atMost)).renewSessionLock(sessionId, null); } /** * Verifies that it errors when we try a null lock token. */ @Test void autoRenewSessionLockErrorNull() { final Duration maxDuration = Duration.ofSeconds(8); final Duration renewalPeriod = Duration.ofSeconds(3); when(managementNode.renewSessionLock(anyString(), isNull())) .thenReturn(Mono.fromCallable(() -> OffsetDateTime.now().plus(renewalPeriod))); StepVerifier.create(sessionReceiver.renewSessionLock(null, maxDuration)) .expectError(NullPointerException.class) .verify(); verify(managementNode, never()).renewSessionLock(anyString(), isNull()); } /** * Verifies that it errors when we try an empty string session id */ @Test void autoRenewSessionLockErrorEmptyString() { final Duration maxDuration = Duration.ofSeconds(8); final Duration renewalPeriod = Duration.ofSeconds(3); final String sessionId = ""; when(managementNode.renewSessionLock(anyString(), isNull())) .thenReturn(Mono.fromCallable(() -> OffsetDateTime.now().plus(renewalPeriod))); StepVerifier.create(sessionReceiver.renewSessionLock(sessionId, maxDuration)) .expectError(IllegalArgumentException.class) .verify(); verify(managementNode, never()).renewSessionLock(anyString(), isNull()); } @Test void autoCompleteMessage() { final int numberOfEvents = 3; final List<Message> messages = getMessages(); final String lockToken = UUID.randomUUID().toString(); final ReceiverOptions receiverOptions = new ReceiverOptions(ReceiveMode.PEEK_LOCK, PREFETCH, null, true); final ServiceBusReceiverAsyncClient receiver2 = new ServiceBusReceiverAsyncClient(NAMESPACE, ENTITY_PATH, MessagingEntityType.QUEUE, receiverOptions, connectionProcessor, CLEANUP_INTERVAL, tracerProvider, messageSerializer, onClientClose); when(receivedMessage.getLockToken()).thenReturn(lockToken); when(messageSerializer.deserialize(any(Message.class), eq(ServiceBusReceivedMessage.class))) .thenReturn(receivedMessage); when(amqpReceiveLink.updateDisposition(lockToken, Accepted.getInstance())).thenReturn(Mono.empty()); try { StepVerifier.create(receiver2.receiveMessages().take(numberOfEvents)) .then(() -> messages.forEach(m -> messageSink.next(m))) .expectNextCount(numberOfEvents) .verifyComplete(); } finally { receiver2.close(); } verify(amqpReceiveLink, times(numberOfEvents)).updateDisposition(lockToken, Accepted.getInstance()); } @Test void autoCompleteMessageSessionReceiver() { final int numberOfEvents = 3; final List<Message> messages = getMessages(); final String lockToken = UUID.randomUUID().toString(); final ReceiverOptions receiverOptions = new ReceiverOptions(ReceiveMode.PEEK_LOCK, PREFETCH, null, true, "Some-Session", false, null); final ServiceBusReceiverAsyncClient sessionReceiver2 = new ServiceBusReceiverAsyncClient(NAMESPACE, ENTITY_PATH, MessagingEntityType.QUEUE, receiverOptions, connectionProcessor, CLEANUP_INTERVAL, tracerProvider, messageSerializer, onClientClose); when(receivedMessage.getLockToken()).thenReturn(lockToken); when(messageSerializer.deserialize(any(Message.class), eq(ServiceBusReceivedMessage.class))) .thenReturn(receivedMessage); when(sessionReceiveLink.updateDisposition(lockToken, Accepted.getInstance())).thenReturn(Mono.empty()); try { StepVerifier.create(sessionReceiver2.receiveMessages().take(numberOfEvents)) .then(() -> messages.forEach(m -> messageSink.next(m))) .expectNextCount(numberOfEvents) .verifyComplete(); } finally { sessionReceiver2.close(); } verify(sessionReceiveLink, times(numberOfEvents)).updateDisposition(lockToken, Accepted.getInstance()); } private List<Message> getMessages() { final Map<String, String> map = Collections.singletonMap("SAMPLE_HEADER", "foo"); return IntStream.range(0, 10) .mapToObj(index -> getMessage(PAYLOAD_BYTES, messageTrackingUUID, map)) .collect(Collectors.toList()); } private static Stream<Arguments> errorSourceOnSettlement() { return Stream.of( Arguments.of(DispositionStatus.DEFERRED, ServiceBusErrorSource.DEFER, DeliveryStateType.Modified), Arguments.of(DispositionStatus.COMPLETED, ServiceBusErrorSource.COMPLETE, DeliveryStateType.Accepted), Arguments.of(DispositionStatus.SUSPENDED, ServiceBusErrorSource.DEAD_LETTER, DeliveryStateType.Rejected), Arguments.of(DispositionStatus.ABANDONED, ServiceBusErrorSource.ABANDONED, DeliveryStateType.Modified)); } }
class ServiceBusReceiverAsyncClientTest { private static final ClientOptions CLIENT_OPTIONS = new ClientOptions(); private static final String PAYLOAD = "hello"; private static final byte[] PAYLOAD_BYTES = PAYLOAD.getBytes(UTF_8); private static final int PREFETCH = 5; private static final String NAMESPACE = "my-namespace-foo.net"; private static final String ENTITY_PATH = "queue-name"; private static final MessagingEntityType ENTITY_TYPE = MessagingEntityType.QUEUE; private static final String NAMESPACE_CONNECTION_STRING = String.format( "Endpoint=sb: NAMESPACE, "some-name", "something-else"); private static final Duration CLEANUP_INTERVAL = Duration.ofSeconds(10); private static final String SESSION_ID = "my-session-id"; private final ClientLogger logger = new ClientLogger(ServiceBusReceiverAsyncClientTest.class); private final String messageTrackingUUID = UUID.randomUUID().toString(); private final ReplayProcessor<AmqpEndpointState> endpointProcessor = ReplayProcessor.cacheLast(); private final FluxSink<AmqpEndpointState> endpointSink = endpointProcessor.sink(FluxSink.OverflowStrategy.BUFFER); private final DirectProcessor<Message> messageProcessor = DirectProcessor.create(); private final FluxSink<Message> messageSink = messageProcessor.sink(FluxSink.OverflowStrategy.BUFFER); private ServiceBusConnectionProcessor connectionProcessor; private ServiceBusReceiverAsyncClient receiver; private ServiceBusReceiverAsyncClient sessionReceiver; @Mock private ServiceBusReactorReceiver amqpReceiveLink; @Mock private ServiceBusReactorReceiver sessionReceiveLink; @Mock private ServiceBusAmqpConnection connection; @Mock private TokenCredential tokenCredential; @Mock private MessageSerializer messageSerializer; @Mock private TracerProvider tracerProvider; @Mock private ServiceBusManagementNode managementNode; @Mock private ServiceBusReceivedMessage receivedMessage; @Mock private ServiceBusReceivedMessage receivedMessage2; @Mock private Runnable onClientClose; @BeforeAll static void beforeAll() { StepVerifier.setDefaultTimeout(Duration.ofSeconds(100)); } @AfterAll static void afterAll() { StepVerifier.resetDefaultTimeout(); } @BeforeEach void setup(TestInfo testInfo) { logger.info("[{}] Setting up.", testInfo.getDisplayName()); MockitoAnnotations.initMocks(this); when(amqpReceiveLink.receive()).thenReturn(messageProcessor.publishOn(Schedulers.single())); when(amqpReceiveLink.getEndpointStates()).thenReturn(endpointProcessor); when(sessionReceiveLink.receive()).thenReturn(messageProcessor.publishOn(Schedulers.single())); when(sessionReceiveLink.getEndpointStates()).thenReturn(endpointProcessor); ConnectionOptions connectionOptions = new ConnectionOptions(NAMESPACE, tokenCredential, CbsAuthorizationType.SHARED_ACCESS_SIGNATURE, AmqpTransportType.AMQP, new AmqpRetryOptions(), ProxyOptions.SYSTEM_DEFAULTS, Schedulers.boundedElastic(), CLIENT_OPTIONS, SslDomain.VerifyMode.VERIFY_PEER_NAME); when(connection.getEndpointStates()).thenReturn(endpointProcessor); endpointSink.next(AmqpEndpointState.ACTIVE); when(connection.getManagementNode(ENTITY_PATH, ENTITY_TYPE)) .thenReturn(Mono.just(managementNode)); when(connection.createReceiveLink(anyString(), anyString(), any(ReceiveMode.class), any(), any(MessagingEntityType.class))).thenReturn(Mono.just(amqpReceiveLink)); when(connection.createReceiveLink(anyString(), anyString(), any(ReceiveMode.class), any(), any(MessagingEntityType.class), anyString())).thenReturn(Mono.just(sessionReceiveLink)); connectionProcessor = Flux.<ServiceBusAmqpConnection>create(sink -> sink.next(connection)) .subscribeWith(new ServiceBusConnectionProcessor(connectionOptions.getFullyQualifiedNamespace(), connectionOptions.getRetry())); receiver = new ServiceBusReceiverAsyncClient(NAMESPACE, ENTITY_PATH, MessagingEntityType.QUEUE, new ReceiverOptions(ReceiveMode.PEEK_LOCK, PREFETCH, null, false), connectionProcessor, CLEANUP_INTERVAL, tracerProvider, messageSerializer, onClientClose); sessionReceiver = new ServiceBusReceiverAsyncClient(NAMESPACE, ENTITY_PATH, MessagingEntityType.QUEUE, new ReceiverOptions(ReceiveMode.PEEK_LOCK, PREFETCH, null, false, "Some-Session", null), connectionProcessor, CLEANUP_INTERVAL, tracerProvider, messageSerializer, onClientClose); } @AfterEach void teardown(TestInfo testInfo) { logger.info("[{}] Tearing down.", testInfo.getDisplayName()); receiver.close(); Mockito.framework().clearInlineMocks(); } /** * Verifies that when user calls peek more than one time, It returns different object. */ @SuppressWarnings("unchecked") @Test void peekTwoMessages() { final long sequence1 = 10; final long sequence2 = 12; final ArgumentCaptor<Long> captor = ArgumentCaptor.forClass(Long.class); when(receivedMessage.getSequenceNumber()).thenReturn(sequence1); when(receivedMessage2.getSequenceNumber()).thenReturn(sequence2); when(managementNode.peek(anyLong(), isNull(), isNull())) .thenReturn(Mono.just(receivedMessage), Mono.just(receivedMessage2)); StepVerifier.create(receiver.peekMessage()) .expectNext(receivedMessage) .verifyComplete(); StepVerifier.create(receiver.peekMessage()) .expectNext(receivedMessage2) .verifyComplete(); verify(managementNode, times(2)).peek(captor.capture(), isNull(), isNull()); final List<Long> allValues = captor.getAllValues(); Assertions.assertEquals(2, allValues.size()); Assertions.assertTrue(allValues.contains(0L)); Assertions.assertTrue(allValues.contains(11L)); } /** * Verifies that when no messages are returned, that it does not error. */ @Test void peekEmptyEntity() { when(managementNode.peek(0, null, null)) .thenReturn(Mono.empty()); StepVerifier.create(receiver.peekMessage()) .verifyComplete(); } /** * Verifies that this peek one messages from a sequence Number. */ @Test void peekWithSequenceOneMessage() { final int fromSequenceNumber = 10; final ServiceBusReceivedMessage receivedMessage = mock(ServiceBusReceivedMessage.class); when(managementNode.peek(fromSequenceNumber, null, null)).thenReturn(Mono.just(receivedMessage)); StepVerifier.create(receiver.peekMessageAt(fromSequenceNumber)) .expectNext(receivedMessage) .verifyComplete(); } /** * Verifies that this receives a number of messages. Verifies that the initial credits we add are equal to the * prefetch value. */ @Test void receivesNumberOfEvents() { final int numberOfEvents = 1; final List<Message> messages = getMessages(); final String lockToken = UUID.randomUUID().toString(); ServiceBusReceivedMessage receivedMessage = mock(ServiceBusReceivedMessage.class); when(receivedMessage.getLockedUntil()).thenReturn(OffsetDateTime.now()); when(receivedMessage.getLockToken()).thenReturn(lockToken); when(messageSerializer.deserialize(any(Message.class), eq(ServiceBusReceivedMessage.class))) .thenReturn(receivedMessage); StepVerifier.create(receiver.receiveMessages().take(numberOfEvents)) .then(() -> messages.forEach(m -> messageSink.next(m))) .expectNextCount(numberOfEvents) .verifyComplete(); verify(amqpReceiveLink).addCredits(PREFETCH); verify(amqpReceiveLink, never()).updateDisposition(eq(lockToken), any()); } /** * Verifies that we error if we try to settle a message with null transaction-id. */ @ParameterizedTest @EnumSource(DispositionStatus.class) void settleWithNullTransactionId(DispositionStatus dispositionStatus) { ServiceBusTransactionContext nullTransactionId = new ServiceBusTransactionContext(null); when(connection.getManagementNode(ENTITY_PATH, ENTITY_TYPE)).thenReturn(Mono.just(managementNode)); when(managementNode.updateDisposition(any(), eq(dispositionStatus), isNull(), isNull(), isNull(), isNull(), isNull(), isNull())) .thenReturn(Mono.delay(Duration.ofMillis(250)).then()); when(receivedMessage.getLockToken()).thenReturn("mylockToken"); final Mono<Void> operation; switch (dispositionStatus) { case DEFERRED: operation = receiver.defer(receivedMessage, new DeferOptions().setTransactionContext(nullTransactionId)); break; case ABANDONED: operation = receiver.abandon(receivedMessage, new AbandonOptions().setTransactionContext(nullTransactionId)); break; case COMPLETED: operation = receiver.complete(receivedMessage, new CompleteOptions().setTransactionContext(nullTransactionId)); break; case SUSPENDED: operation = receiver.deadLetter(receivedMessage, new DeadLetterOptions().setTransactionContext(nullTransactionId)); break; default: throw new IllegalArgumentException("Unrecognized operation: " + dispositionStatus); } StepVerifier.create(operation) .expectError(NullPointerException.class) .verify(); verify(managementNode, never()).updateDisposition(any(), eq(dispositionStatus), isNull(), isNull(), isNull(), isNull(), isNull(), isNull()); } /** * Verifies that we error if we try to complete a null message. */ @Test void completeNullMessage() { StepVerifier.create(receiver.complete(null)).expectError(NullPointerException.class).verify(); } /** * Verifies that we error if we complete in RECEIVE_AND_DELETE mode. */ @Test void completeInReceiveAndDeleteMode() { final ReceiverOptions options = new ReceiverOptions(ReceiveMode.RECEIVE_AND_DELETE, PREFETCH, null, false); ServiceBusReceiverAsyncClient client = new ServiceBusReceiverAsyncClient(NAMESPACE, ENTITY_PATH, MessagingEntityType.QUEUE, options, connectionProcessor, CLEANUP_INTERVAL, tracerProvider, messageSerializer, onClientClose); final String lockToken1 = UUID.randomUUID().toString(); when(receivedMessage.getLockToken()).thenReturn(lockToken1); try { StepVerifier.create(client.complete(receivedMessage)) .expectError(UnsupportedOperationException.class) .verify(); } finally { client.close(); } } /** * Verifies that this peek batch of messages. */ @Test void peekMessages() { final int numberOfEvents = 2; when(managementNode.peek(0, null, null, numberOfEvents)) .thenReturn(Flux.fromArray(new ServiceBusReceivedMessage[]{receivedMessage, receivedMessage2})); StepVerifier.create(receiver.peekMessages(numberOfEvents)) .expectNextCount(numberOfEvents) .verifyComplete(); } /** * Verifies that this peek batch of messages. */ @Test void peekMessagesEmptyEntity() { final int numberOfEvents = 2; when(managementNode.peek(0, null, null, numberOfEvents)) .thenReturn(Flux.fromIterable(Collections.emptyList())); StepVerifier.create(receiver.peekMessages(numberOfEvents)) .verifyComplete(); } /** * Verifies that this peek batch of messages from a sequence Number. */ @Test void peekBatchWithSequenceNumberMessages() { final int numberOfEvents = 2; final int fromSequenceNumber = 10; when(managementNode.peek(fromSequenceNumber, null, null, numberOfEvents)) .thenReturn(Flux.fromArray(new ServiceBusReceivedMessage[]{receivedMessage, receivedMessage2})); StepVerifier.create(receiver.peekMessagesAt(numberOfEvents, fromSequenceNumber)) .expectNext(receivedMessage, receivedMessage2) .verifyComplete(); } /** * Verifies that we can deadletter a message with an error and description. */ @Test void deadLetterWithDescription() { final String lockToken1 = UUID.randomUUID().toString(); final String description = "some-dead-letter-description"; final String reason = "dead-letter-reason"; final Map<String, Object> propertiesToModify = new HashMap<>(); propertiesToModify.put("something", true); final DeadLetterOptions deadLetterOptions = new DeadLetterOptions().setDeadLetterReason(reason) .setDeadLetterErrorDescription(description) .setPropertiesToModify(propertiesToModify); final OffsetDateTime expiration = OffsetDateTime.now().plus(Duration.ofMinutes(5)); final MessageWithLockToken message = mock(MessageWithLockToken.class); when(messageSerializer.deserialize(message, ServiceBusReceivedMessage.class)).thenReturn(receivedMessage); when(receivedMessage.getLockToken()).thenReturn(lockToken1); when(receivedMessage.getLockedUntil()).thenReturn(expiration); when(amqpReceiveLink.updateDisposition(eq(lockToken1), argThat(e -> e.getType() == DeliveryStateType.Rejected))).thenReturn(Mono.empty()); StepVerifier.create(receiver.receiveMessages() .take(1) .flatMap(context -> receiver.deadLetter(context.getMessage(), deadLetterOptions))) .then(() -> messageSink.next(message)) .expectNext() .verifyComplete(); verify(amqpReceiveLink).updateDisposition(eq(lockToken1), isA(Rejected.class)); } /** * Verifies that error source is populated when any error happened while renewing lock. */ @Test void errorSourceOnRenewMessageLock() { final Duration maxDuration = Duration.ofSeconds(8); final String lockToken = "some-token"; when(receivedMessage.getLockToken()).thenReturn(lockToken); when(managementNode.renewMessageLock(lockToken, null)) .thenReturn(Mono.error(new AmqpException(false, "some error occurred.", null))); StepVerifier.create(receiver.renewMessageLock(receivedMessage, maxDuration)) .verifyErrorSatisfies(throwable -> { Assertions.assertTrue(throwable instanceof ServiceBusAmqpException); final ServiceBusErrorSource actual = ((ServiceBusAmqpException) throwable).getErrorSource(); Assertions.assertEquals(ServiceBusErrorSource.RENEW_LOCK, actual); }); verify(managementNode, times(1)).renewMessageLock(lockToken, null); } /** * Verifies that error source is populated when any error happened while renewing lock. */ @Test /** * Verifies that error source is not populated when there is no autoComplete. Because user wanted to settle on their * own, we do not need to populate ErrorSource. */ @ParameterizedTest @MethodSource void errorSourceNoneOnSettlement(DispositionStatus dispositionStatus, DeliveryStateType expectedDeliveryState) { final UUID lockTokenUuid = UUID.randomUUID(); final String lockToken1 = lockTokenUuid.toString(); final MessageWithLockToken message = mock(MessageWithLockToken.class); when(receivedMessage.getLockToken()).thenReturn(lockToken1); when(messageSerializer.deserialize(message, ServiceBusReceivedMessage.class)).thenReturn(receivedMessage); when(amqpReceiveLink.updateDisposition(eq(lockToken1), argThat(e -> e.getType() == expectedDeliveryState))) .thenReturn(Mono.error(new AmqpException(false, "some error occurred.", null))); StepVerifier.create(receiver.receiveMessages().take(1) .flatMap(context -> { final Mono<Void> operation; switch (dispositionStatus) { case ABANDONED: operation = receiver.abandon(context.getMessage()); break; case COMPLETED: operation = receiver.complete(context.getMessage()); break; default: throw new IllegalArgumentException("Unrecognized operation: " + dispositionStatus); } return operation; }) ) .then(() -> messageSink.next(message)) .expectNext() .verifyErrorSatisfies(throwable -> { Assertions.assertFalse(throwable instanceof ServiceBusAmqpException); Assertions.assertTrue(throwable instanceof AmqpException); }); verify(amqpReceiveLink).updateDisposition(eq(lockToken1), any(DeliveryState.class)); } /** * Ensure that we throw right error source when there is any issue during autocomplete. Error source should be * {@link ServiceBusErrorSource */ @Test void errorSourceAutoCompleteMessage() { final int numberOfEvents = 2; final int messagesToReceive = 1; final List<Message> messages = getMessages(); final String lockToken = UUID.randomUUID().toString(); final ReceiverOptions receiverOptions = new ReceiverOptions(ReceiveMode.PEEK_LOCK, PREFETCH, null, true); final ServiceBusReceiverAsyncClient receiver2 = new ServiceBusReceiverAsyncClient(NAMESPACE, ENTITY_PATH, MessagingEntityType.QUEUE, receiverOptions, connectionProcessor, CLEANUP_INTERVAL, tracerProvider, messageSerializer, onClientClose); when(receivedMessage.getLockToken()).thenReturn(lockToken); when(messageSerializer.deserialize(any(Message.class), eq(ServiceBusReceivedMessage.class))) .thenReturn(receivedMessage); when(amqpReceiveLink.updateDisposition(lockToken, Accepted.getInstance())).thenReturn(Mono.error(new AmqpException(false, "some error occurred.", null))); try { StepVerifier.create(receiver2.receiveMessages().take(numberOfEvents)) .then(() -> messages.forEach(m -> messageSink.next(m))) .expectNextCount(messagesToReceive) .verifyErrorSatisfies(throwable -> { Assertions.assertTrue(throwable instanceof ServiceBusAmqpException); final ServiceBusErrorSource actual = ((ServiceBusAmqpException) throwable).getErrorSource(); Assertions.assertEquals(ServiceBusErrorSource.COMPLETE, actual); }); } finally { receiver2.close(); } verify(amqpReceiveLink, atLeast(messagesToReceive)).updateDisposition(lockToken, Accepted.getInstance()); } /** * Verifies that error source is populated when there is any error during receiving of message. */ @Test void errorSourceOnReceiveMessage() { final String lockToken = UUID.randomUUID().toString(); final OffsetDateTime expiration = OffsetDateTime.now().plus(Duration.ofMinutes(5)); final MessageWithLockToken message = mock(MessageWithLockToken.class); when(messageSerializer.deserialize(message, ServiceBusReceivedMessage.class)).thenReturn(receivedMessage); when(receivedMessage.getLockToken()).thenReturn(lockToken); when(receivedMessage.getLockedUntil()).thenReturn(expiration); when(connection.createReceiveLink(anyString(), anyString(), any(ReceiveMode.class), any(), any(MessagingEntityType.class))).thenReturn(Mono.error(new AmqpException(false, "some receive link Error.", null))); StepVerifier.create(receiver.receiveMessages().take(1)) .verifyErrorSatisfies(throwable -> { Assertions.assertTrue(throwable instanceof ServiceBusAmqpException); final ServiceBusErrorSource actual = ((ServiceBusAmqpException) throwable).getErrorSource(); Assertions.assertEquals(ServiceBusErrorSource.RECEIVE, actual); }); verify(amqpReceiveLink, never()).updateDisposition(eq(lockToken), any(DeliveryState.class)); } /** * Verifies that the user can complete settlement methods on received message. */ @ParameterizedTest @EnumSource(DispositionStatus.class) void settleMessageOnManagement(DispositionStatus dispositionStatus) { final String lockToken1 = UUID.randomUUID().toString(); final String lockToken2 = UUID.randomUUID().toString(); final OffsetDateTime expiration = OffsetDateTime.now().plus(Duration.ofMinutes(5)); final long sequenceNumber = 10L; final long sequenceNumber2 = 15L; final MessageWithLockToken message = mock(MessageWithLockToken.class); final MessageWithLockToken message2 = mock(MessageWithLockToken.class); when(messageSerializer.deserialize(message, ServiceBusReceivedMessage.class)).thenReturn(receivedMessage); when(messageSerializer.deserialize(message2, ServiceBusReceivedMessage.class)).thenReturn(receivedMessage2); when(receivedMessage.getLockToken()).thenReturn(lockToken1); when(receivedMessage.getLockedUntil()).thenReturn(expiration); when(receivedMessage2.getLockToken()).thenReturn(lockToken2); when(receivedMessage2.getLockedUntil()).thenReturn(expiration); when(connection.getManagementNode(eq(ENTITY_PATH), eq(ENTITY_TYPE))) .thenReturn(Mono.just(managementNode)); when(managementNode.receiveDeferredMessages(eq(ReceiveMode.PEEK_LOCK), isNull(), isNull(), argThat(arg -> { boolean foundFirst = false; boolean foundSecond = false; for (Long seq : arg) { if (!foundFirst && sequenceNumber == seq) { foundFirst = true; } else if (!foundSecond && sequenceNumber2 == seq) { foundSecond = true; } } return foundFirst && foundSecond; }))) .thenReturn(Flux.just(receivedMessage, receivedMessage2)); when(managementNode.updateDisposition(lockToken1, dispositionStatus, null, null, null, null, null, null)) .thenReturn(Mono.empty()); when(managementNode.updateDisposition(lockToken2, dispositionStatus, null, null, null, null, null, null)) .thenReturn(Mono.empty()); StepVerifier.create(receiver.receiveDeferredMessages(Arrays.asList(sequenceNumber, sequenceNumber2))) .expectNext(receivedMessage, receivedMessage2) .verifyComplete(); final Mono<Void> operation; switch (dispositionStatus) { case DEFERRED: operation = receiver.defer(receivedMessage); break; case ABANDONED: operation = receiver.abandon(receivedMessage); break; case COMPLETED: operation = receiver.complete(receivedMessage); break; case SUSPENDED: operation = receiver.deadLetter(receivedMessage); break; default: throw new IllegalArgumentException("Unrecognized operation: " + dispositionStatus); } StepVerifier.create(operation) .verifyComplete(); verify(managementNode).updateDisposition(lockToken1, dispositionStatus, null, null, null, null, null, null); verify(managementNode, never()).updateDisposition(lockToken2, dispositionStatus, null, null, null, null, null, null); } /** * Verifies that this receive deferred one messages from a sequence Number. */ @Test void receiveDeferredWithSequenceOneMessage() { final int fromSequenceNumber = 10; final ServiceBusReceivedMessage receivedMessage = mock(ServiceBusReceivedMessage.class); when(managementNode.receiveDeferredMessages(any(), any(), any(), any())).thenReturn(Flux.just(receivedMessage)); StepVerifier.create(receiver.receiveDeferredMessage(fromSequenceNumber)) .expectNext(receivedMessage) .verifyComplete(); } /** * Verifies that this receive deferred messages from a sequence Number. */ @Test void receiveDeferredBatchFromSequenceNumber() { final long fromSequenceNumber1 = 10; final long fromSequenceNumber2 = 11; when(managementNode.receiveDeferredMessages(any(), any(), any(), any())) .thenReturn(Flux.fromArray(new ServiceBusReceivedMessage[]{receivedMessage, receivedMessage2})); StepVerifier.create(receiver.receiveDeferredMessages(Arrays.asList(fromSequenceNumber1, fromSequenceNumber2))) .expectNext(receivedMessage) .expectNext(receivedMessage2) .verifyComplete(); } /** * Verifies that the onClientClose is called. */ @Test void callsClientClose() { receiver.close(); verify(onClientClose).run(); } /** * Verifies that the onClientClose is only called once. */ @Test void callsClientCloseOnce() { receiver.close(); receiver.close(); verify(onClientClose).run(); } /** * Tests that invalid options throws and null options. */ @Test void receiveIllegalOptions() { ServiceBusReceiverClientBuilder builder = new ServiceBusClientBuilder() .connectionString(NAMESPACE_CONNECTION_STRING) .receiver() .topicName("baz").subscriptionName("bar") .receiveMode(ReceiveMode.PEEK_LOCK); Assertions.assertThrows(IllegalArgumentException.class, () -> builder.prefetchCount(-1)); } @Test void topicCorrectEntityPath() { final String topicName = "foo"; final String subscriptionName = "bar"; final String entityPath = String.join("/", topicName, "subscriptions", subscriptionName); final ServiceBusReceiverAsyncClient receiver = new ServiceBusClientBuilder() .connectionString(NAMESPACE_CONNECTION_STRING) .receiver() .topicName(topicName) .subscriptionName(subscriptionName) .receiveMode(ReceiveMode.PEEK_LOCK) .buildAsyncClient(); final String actual = receiver.getEntityPath(); final String actualNamespace = receiver.getFullyQualifiedNamespace(); Assertions.assertEquals(entityPath, actual); Assertions.assertEquals(NAMESPACE, actualNamespace); } /** * Verifies that client can call multiple receiveMessages on same receiver instance. */ @Test void canPerformMultipleReceive() { final int numberOfEvents = 1; final List<Message> messages = getMessages(); ServiceBusReceivedMessage receivedMessage = mock(ServiceBusReceivedMessage.class); when(receivedMessage.getLockedUntil()).thenReturn(OffsetDateTime.now()); when(receivedMessage.getLockToken()).thenReturn(UUID.randomUUID().toString()); when(messageSerializer.deserialize(any(Message.class), eq(ServiceBusReceivedMessage.class))) .thenReturn(receivedMessage); StepVerifier.create(receiver.receiveMessages().take(numberOfEvents)) .then(() -> messages.forEach(m -> messageSink.next(m))) .expectNextCount(numberOfEvents) .verifyComplete(); StepVerifier.create(receiver.receiveMessages().take(numberOfEvents)) .then(() -> messages.forEach(m -> messageSink.next(m))) .expectNextCount(numberOfEvents) .verifyComplete(); verify(amqpReceiveLink).addCredits(PREFETCH); } /** * Cannot get session state for non-session receiver. */ @Test void cannotPerformGetSessionState() { final String sessionId = "a-session-id"; StepVerifier.create(receiver.getSessionState(sessionId)) .expectError(IllegalStateException.class) .verify(); } /** * Cannot get session state for non-session receiver. */ @Test void cannotPerformSetSessionState() { final String sessionId = "a-session-id"; final byte[] sessionState = new byte[]{10, 11, 8}; StepVerifier.create(receiver.setSessionState(sessionId, sessionState)) .expectError(IllegalStateException.class) .verify(); } /** * Cannot get session state for non-session receiver. */ @Test void cannotPerformRenewSessionLock() { final String sessionId = "a-session-id"; StepVerifier.create(receiver.renewSessionLock(sessionId)) .expectError(IllegalStateException.class) .verify(); } /** * Verifies that we can get a session state. */ @SuppressWarnings("unchecked") @Test void getSessionState() { final byte[] bytes = new byte[]{95, 11, 54, 10}; when(managementNode.getSessionState(SESSION_ID, null)) .thenReturn(Mono.just(bytes), Mono.empty()); StepVerifier.create(sessionReceiver.getSessionState(SESSION_ID)) .expectNext(bytes) .expectComplete() .verify(); } /** * Verifies that we can set a session state. */ @Test void setSessionState() { final byte[] bytes = new byte[]{95, 11, 54, 10}; when(managementNode.setSessionState(SESSION_ID, bytes, null)).thenReturn(Mono.empty()); StepVerifier.create(sessionReceiver.setSessionState(SESSION_ID, bytes)) .expectComplete() .verify(); } /** * Verifies that we can renew a session state. */ @Test void renewSessionLock() { final OffsetDateTime expiry = Instant.ofEpochSecond(1588011761L).atOffset(ZoneOffset.UTC); when(managementNode.renewSessionLock(SESSION_ID, null)).thenReturn(Mono.just(expiry)); StepVerifier.create(sessionReceiver.renewSessionLock(SESSION_ID)) .expectNext(expiry) .expectComplete() .verify(); } /** * Verifies that we cannot renew a message lock when using a session receiver. */ @Test void cannotRenewMessageLockInSession() { when(receivedMessage.getLockToken()).thenReturn("lock-token"); when(receivedMessage.getSessionId()).thenReturn("fo"); StepVerifier.create(sessionReceiver.renewMessageLock(receivedMessage)) .expectError(IllegalStateException.class) .verify(); } /** * Verifies that we can auto-renew a message lock. */ @Test void autoRenewMessageLock() { final Duration maxDuration = Duration.ofSeconds(8); final Duration renewalPeriod = Duration.ofSeconds(3); final String lockToken = "some-token"; final int atMost = 5; final Duration totalSleepPeriod = maxDuration.plusMillis(500); when(receivedMessage.getLockToken()).thenReturn(lockToken); when(managementNode.renewMessageLock(lockToken, null)) .thenReturn(Mono.fromCallable(() -> OffsetDateTime.now().plus(renewalPeriod))); StepVerifier.create(receiver.renewMessageLock(receivedMessage, maxDuration)) .thenAwait(totalSleepPeriod) .then(() -> logger.info("Finished renewals for first sleep.")) .expectComplete() .verify(Duration.ofSeconds(5)); verify(managementNode, Mockito.atMost(atMost)).renewMessageLock(lockToken, null); } /** * Verifies that it errors when we try a null lock token. */ @Test void autoRenewMessageLockErrorNull() { final Duration maxDuration = Duration.ofSeconds(8); final Duration renewalPeriod = Duration.ofSeconds(3); when(receivedMessage.getLockToken()).thenReturn(null); when(managementNode.renewMessageLock(anyString(), isNull())) .thenReturn(Mono.fromCallable(() -> OffsetDateTime.now().plus(renewalPeriod))); StepVerifier.create(receiver.renewMessageLock(receivedMessage, maxDuration)) .expectError(NullPointerException.class) .verify(); verify(managementNode, never()).renewMessageLock(anyString(), isNull()); } /** * Verifies that it errors when we try an empty string lock token. */ @Test void autoRenewMessageLockErrorEmptyString() { final Duration maxDuration = Duration.ofSeconds(8); final Duration renewalPeriod = Duration.ofSeconds(3); final String lockToken = ""; when(receivedMessage.getLockToken()).thenReturn(""); when(managementNode.renewMessageLock(anyString(), isNull())) .thenReturn(Mono.fromCallable(() -> OffsetDateTime.now().plus(renewalPeriod))); StepVerifier.create(receiver.renewMessageLock(receivedMessage, maxDuration)) .expectError(IllegalArgumentException.class) .verify(); verify(managementNode, never()).renewMessageLock(anyString(), isNull()); } /** * Verifies that we can auto-renew a session lock. */ @Test void autoRenewSessionLock() { final Duration maxDuration = Duration.ofSeconds(8); final Duration renewalPeriod = Duration.ofSeconds(3); final String sessionId = "some-token"; final int atMost = 5; final Duration totalSleepPeriod = maxDuration.plusMillis(500); when(managementNode.renewSessionLock(sessionId, null)) .thenReturn(Mono.fromCallable(() -> OffsetDateTime.now().plus(renewalPeriod))); StepVerifier.create(sessionReceiver.renewSessionLock(sessionId, maxDuration)) .thenAwait(totalSleepPeriod) .then(() -> logger.info("Finished renewals for first sleep.")) .expectComplete() .verify(Duration.ofSeconds(5)); verify(managementNode, Mockito.atMost(atMost)).renewSessionLock(sessionId, null); } /** * Verifies that it errors when we try a null lock token. */ @Test void autoRenewSessionLockErrorNull() { final Duration maxDuration = Duration.ofSeconds(8); final Duration renewalPeriod = Duration.ofSeconds(3); when(managementNode.renewSessionLock(anyString(), isNull())) .thenReturn(Mono.fromCallable(() -> OffsetDateTime.now().plus(renewalPeriod))); StepVerifier.create(sessionReceiver.renewSessionLock(null, maxDuration)) .expectError(NullPointerException.class) .verify(); verify(managementNode, never()).renewSessionLock(anyString(), isNull()); } /** * Verifies that it errors when we try an empty string session id */ @Test void autoRenewSessionLockErrorEmptyString() { final Duration maxDuration = Duration.ofSeconds(8); final Duration renewalPeriod = Duration.ofSeconds(3); final String sessionId = ""; when(managementNode.renewSessionLock(anyString(), isNull())) .thenReturn(Mono.fromCallable(() -> OffsetDateTime.now().plus(renewalPeriod))); StepVerifier.create(sessionReceiver.renewSessionLock(sessionId, maxDuration)) .expectError(IllegalArgumentException.class) .verify(); verify(managementNode, never()).renewSessionLock(anyString(), isNull()); } @Test void autoCompleteMessage() { final int numberOfEvents = 3; final List<Message> messages = getMessages(); final String lockToken = UUID.randomUUID().toString(); final ReceiverOptions receiverOptions = new ReceiverOptions(ReceiveMode.PEEK_LOCK, PREFETCH, null, true); final ServiceBusReceiverAsyncClient receiver2 = new ServiceBusReceiverAsyncClient(NAMESPACE, ENTITY_PATH, MessagingEntityType.QUEUE, receiverOptions, connectionProcessor, CLEANUP_INTERVAL, tracerProvider, messageSerializer, onClientClose); when(receivedMessage.getLockToken()).thenReturn(lockToken); when(messageSerializer.deserialize(any(Message.class), eq(ServiceBusReceivedMessage.class))) .thenReturn(receivedMessage); when(amqpReceiveLink.updateDisposition(lockToken, Accepted.getInstance())).thenReturn(Mono.empty()); try { StepVerifier.create(receiver2.receiveMessages().take(numberOfEvents)) .then(() -> messages.forEach(m -> messageSink.next(m))) .expectNextCount(numberOfEvents) .verifyComplete(); } finally { receiver2.close(); } verify(amqpReceiveLink, times(numberOfEvents)).updateDisposition(lockToken, Accepted.getInstance()); } @Test void autoCompleteMessageSessionReceiver() { final int numberOfEvents = 3; final List<Message> messages = getMessages(); final String lockToken = UUID.randomUUID().toString(); final ReceiverOptions receiverOptions = new ReceiverOptions(ReceiveMode.PEEK_LOCK, PREFETCH, null, true, "Some-Session", null); final ServiceBusReceiverAsyncClient sessionReceiver2 = new ServiceBusReceiverAsyncClient(NAMESPACE, ENTITY_PATH, MessagingEntityType.QUEUE, receiverOptions, connectionProcessor, CLEANUP_INTERVAL, tracerProvider, messageSerializer, onClientClose); when(receivedMessage.getLockToken()).thenReturn(lockToken); when(messageSerializer.deserialize(any(Message.class), eq(ServiceBusReceivedMessage.class))) .thenReturn(receivedMessage); when(sessionReceiveLink.updateDisposition(lockToken, Accepted.getInstance())).thenReturn(Mono.empty()); try { StepVerifier.create(sessionReceiver2.receiveMessages().take(numberOfEvents)) .then(() -> messages.forEach(m -> messageSink.next(m))) .expectNextCount(numberOfEvents) .verifyComplete(); } finally { sessionReceiver2.close(); } verify(sessionReceiveLink, times(numberOfEvents)).updateDisposition(lockToken, Accepted.getInstance()); } private List<Message> getMessages() { final Map<String, String> map = Collections.singletonMap("SAMPLE_HEADER", "foo"); return IntStream.range(0, 10) .mapToObj(index -> getMessage(PAYLOAD_BYTES, messageTrackingUUID, map)) .collect(Collectors.toList()); } private static Stream<Arguments> errorSourceNoneOnSettlement() { return Stream.of( Arguments.of(DispositionStatus.COMPLETED, DeliveryStateType.Accepted), Arguments.of(DispositionStatus.ABANDONED, DeliveryStateType.Modified)); } }
This can be condensed. ```java if ((throwable instanceof ServiceBusAmqpException) || !(throwable instanceof AmqpException)) { return throwable; } return new ServiceBusAmqpException((AmqpException) throwable, errorSource); ````
private Throwable mapError(Throwable throwable, ServiceBusErrorSource errorSource) { if (throwable instanceof ServiceBusAmqpException) { return throwable; } if (throwable instanceof AmqpException) { return new ServiceBusAmqpException((AmqpException) throwable, errorSource); } else { return throwable; } }
if (throwable instanceof ServiceBusAmqpException) {
private Throwable mapError(Throwable throwable, ServiceBusErrorSource errorSource) { if ((throwable instanceof ServiceBusAmqpException) || !(throwable instanceof AmqpException)) { return throwable; } else { return new ServiceBusAmqpException((AmqpException) throwable, errorSource); } }
class ServiceBusReceiverAsyncClient implements AutoCloseable { private static final DeadLetterOptions DEFAULT_DEAD_LETTER_OPTIONS = new DeadLetterOptions(); private static final String TRANSACTION_LINK_NAME = "coordinator"; private final LockContainer<LockRenewalOperation> renewalContainer; private final AtomicBoolean isDisposed = new AtomicBoolean(); private final LockContainer<OffsetDateTime> managementNodeLocks; private final ClientLogger logger = new ClientLogger(ServiceBusReceiverAsyncClient.class); private final String fullyQualifiedNamespace; private final String entityPath; private final MessagingEntityType entityType; private final ReceiverOptions receiverOptions; private final ServiceBusConnectionProcessor connectionProcessor; private final TracerProvider tracerProvider; private final MessageSerializer messageSerializer; private final Runnable onClientClose; private final ServiceBusSessionManager sessionManager; private final Semaphore completionLock = new Semaphore(1); private final AtomicLong lastPeekedSequenceNumber = new AtomicLong(-1); private final AtomicReference<ServiceBusAsyncConsumer> consumer = new AtomicReference<>(); /** * Creates a receiver that listens to a Service Bus resource. * * @param fullyQualifiedNamespace The fully qualified domain name for the Service Bus resource. * @param entityPath The name of the topic or queue. * @param entityType The type of the Service Bus resource. * @param receiverOptions Options when receiving messages. * @param connectionProcessor The AMQP connection to the Service Bus resource. * @param tracerProvider Tracer for telemetry. * @param messageSerializer Serializes and deserializes Service Bus messages. * @param onClientClose Operation to run when the client completes. */ ServiceBusReceiverAsyncClient(String fullyQualifiedNamespace, String entityPath, MessagingEntityType entityType, ReceiverOptions receiverOptions, ServiceBusConnectionProcessor connectionProcessor, Duration cleanupInterval, TracerProvider tracerProvider, MessageSerializer messageSerializer, Runnable onClientClose) { this.fullyQualifiedNamespace = Objects.requireNonNull(fullyQualifiedNamespace, "'fullyQualifiedNamespace' cannot be null."); this.entityPath = Objects.requireNonNull(entityPath, "'entityPath' cannot be null."); this.entityType = Objects.requireNonNull(entityType, "'entityType' cannot be null."); this.receiverOptions = Objects.requireNonNull(receiverOptions, "'receiveOptions cannot be null.'"); this.connectionProcessor = Objects.requireNonNull(connectionProcessor, "'connectionProcessor' cannot be null."); this.tracerProvider = Objects.requireNonNull(tracerProvider, "'tracerProvider' cannot be null."); this.messageSerializer = Objects.requireNonNull(messageSerializer, "'messageSerializer' cannot be null."); this.onClientClose = Objects.requireNonNull(onClientClose, "'onClientClose' cannot be null."); this.managementNodeLocks = new LockContainer<>(cleanupInterval); this.renewalContainer = new LockContainer<>(Duration.ofMinutes(2), renewal -> { logger.info("Closing expired renewal operation. lockToken[{}]. status[{}]. throwable[{}].", renewal.getLockToken(), renewal.getStatus(), renewal.getThrowable()); renewal.close(); }); this.sessionManager = null; } ServiceBusReceiverAsyncClient(String fullyQualifiedNamespace, String entityPath, MessagingEntityType entityType, ReceiverOptions receiverOptions, ServiceBusConnectionProcessor connectionProcessor, Duration cleanupInterval, TracerProvider tracerProvider, MessageSerializer messageSerializer, Runnable onClientClose, ServiceBusSessionManager sessionManager) { this.fullyQualifiedNamespace = Objects.requireNonNull(fullyQualifiedNamespace, "'fullyQualifiedNamespace' cannot be null."); this.entityPath = Objects.requireNonNull(entityPath, "'entityPath' cannot be null."); this.entityType = Objects.requireNonNull(entityType, "'entityType' cannot be null."); this.receiverOptions = Objects.requireNonNull(receiverOptions, "'receiveOptions cannot be null.'"); this.connectionProcessor = Objects.requireNonNull(connectionProcessor, "'connectionProcessor' cannot be null."); this.tracerProvider = Objects.requireNonNull(tracerProvider, "'tracerProvider' cannot be null."); this.messageSerializer = Objects.requireNonNull(messageSerializer, "'messageSerializer' cannot be null."); this.onClientClose = Objects.requireNonNull(onClientClose, "'onClientClose' cannot be null."); this.sessionManager = Objects.requireNonNull(sessionManager, "'sessionManager' cannot be null."); this.managementNodeLocks = new LockContainer<>(cleanupInterval); this.renewalContainer = new LockContainer<>(Duration.ofMinutes(2), renewal -> { logger.info("Closing expired renewal operation. sessionId[{}]. status[{}]. throwable[{}]", renewal.getSessionId(), renewal.getStatus(), renewal.getThrowable()); renewal.close(); }); } /** * Gets the fully qualified Service Bus namespace that the connection is associated with. This is likely similar to * {@code {yournamespace}.servicebus.windows.net}. * * @return The fully qualified Service Bus namespace that the connection is associated with. */ public String getFullyQualifiedNamespace() { return fullyQualifiedNamespace; } /** * Gets the Service Bus resource this client interacts with. * * @return The Service Bus resource this client interacts with. */ public String getEntityPath() { return entityPath; } /** * Abandon a {@link ServiceBusReceivedMessage message}. This will make the message available * again for processing. Abandoning a message will increase the delivery count on the message. * * @param message The {@link ServiceBusReceivedMessage} to perform this operation. * * @return A {@link Mono} that completes when the Service Bus abandon operation completes. * @throws NullPointerException if {@code message} is null. * @throws UnsupportedOperationException if the receiver was opened in {@link ReceiveMode * mode. */ public Mono<Void> abandon(ServiceBusReceivedMessage message) { return updateDisposition(message, DispositionStatus.ABANDONED, null, null, null, null); } /** * Abandon a {@link ServiceBusReceivedMessage message} updates the message's properties. * This will make the message available again for processing. Abandoning a message will increase the delivery count * on the message. * * @param message The {@link ServiceBusReceivedMessage} to perform this operation. * @param options to abandon the message. You can specify * {@link AbandonOptions * {@code transactionContext} can be set using * {@link AbandonOptions * created first by {@link ServiceBusReceiverAsyncClient * {@link ServiceBusSenderAsyncClient * * @return A {@link Mono} that completes when the Service Bus operation finishes. * @throws NullPointerException if {@code message} or {@code options} is null. Also if * {@code transactionContext.transactionId} is null when {@code options.transactionContext} is specified. * @throws UnsupportedOperationException if the receiver was opened in {@link ReceiveMode * mode. */ public Mono<Void> abandon(ServiceBusReceivedMessage message, AbandonOptions options) { if (Objects.isNull(options)) { return monoError(logger, new NullPointerException("'settlementOptions' cannot be null.")); } else if (!Objects.isNull(options.getTransactionContext()) && Objects.isNull(options.getTransactionContext().getTransactionId())) { return monoError(logger, new NullPointerException( "'options.transactionContext.transactionId' cannot be null.")); } return updateDisposition(message, DispositionStatus.ABANDONED, null, null, options.getPropertiesToModify(), options.getTransactionContext()); } /** * Completes a {@link ServiceBusReceivedMessage message}. This will delete the message from the service. * * @param message The {@link ServiceBusReceivedMessage} to perform this operation. * * @return A {@link Mono} that finishes when the message is completed on Service Bus. * @throws NullPointerException if {@code message} is null. * @throws UnsupportedOperationException if the receiver was opened in {@link ReceiveMode * mode. */ public Mono<Void> complete(ServiceBusReceivedMessage message) { return updateDisposition(message, DispositionStatus.COMPLETED, null, null, null, null); } /** * Completes a {@link ServiceBusReceivedMessage message}. This will delete the message from the * service. * * @param message The {@link ServiceBusReceivedMessage} to perform this operation. * @param options to complete the message. The {@code transactionContext} can be set using * {@link CompleteOptions * created first by {@link ServiceBusReceiverAsyncClient * {@link ServiceBusSenderAsyncClient * * @return A {@link Mono} that finishes when the message is completed on Service Bus. * @throws NullPointerException if {@code message} or {@code options} is null. Also if * {@code transactionContext.transactionId} is null when {@code options.transactionContext} is specified. * @throws UnsupportedOperationException if the receiver was opened in {@link ReceiveMode * mode. */ public Mono<Void> complete(ServiceBusReceivedMessage message, CompleteOptions options) { if (Objects.isNull(options)) { return monoError(logger, new NullPointerException("'options' cannot be null.")); } else if (!Objects.isNull(options.getTransactionContext()) && Objects.isNull(options.getTransactionContext().getTransactionId())) { return monoError(logger, new NullPointerException( "'options.transactionContext.transactionId' cannot be null.")); } return updateDisposition(message, DispositionStatus.COMPLETED, null, null, null, options.getTransactionContext()); } /** * Defers a {@link ServiceBusReceivedMessage message}. This will move message into the deferred subqueue. * * @param message The {@link ServiceBusReceivedMessage} to perform this operation. * * @return A {@link Mono} that completes when the Service Bus defer operation finishes. * @throws NullPointerException if {@code message} is null. * @throws UnsupportedOperationException if the receiver was opened in {@link ReceiveMode * @see <a href="https: */ public Mono<Void> defer(ServiceBusReceivedMessage message) { return updateDisposition(message, DispositionStatus.DEFERRED, null, null, null, null); } /** * Defers a {@link ServiceBusReceivedMessage message} with modified message property. This will move message into * the deferred subqueue. * * @param message The {@link ServiceBusReceivedMessage} to perform this operation. * @param options to defer the message. You can specify {@link DeferOptions * to modify on the Message. The {@code transactionContext} can be set using * {@link DeferOptions * created first by {@link ServiceBusReceiverAsyncClient * {@link ServiceBusSenderAsyncClient * * @return A {@link Mono} that completes when the defer operation finishes. * @throws NullPointerException if {@code message} or {@code options} is null. Also if * {@code transactionContext.transactionId} is null when {@code options.transactionContext} is specified. * @throws UnsupportedOperationException if the receiver was opened in {@link ReceiveMode * mode. * @see <a href="https: */ public Mono<Void> defer(ServiceBusReceivedMessage message, DeferOptions options) { if (Objects.isNull(options)) { return monoError(logger, new NullPointerException("'options' cannot be null.")); } else if (!Objects.isNull(options.getTransactionContext()) && Objects.isNull(options.getTransactionContext().getTransactionId())) { return monoError(logger, new NullPointerException( "'options.transactionContext.transactionId' cannot be null.")); } return updateDisposition(message, DispositionStatus.DEFERRED, null, null, options.getPropertiesToModify(), options.getTransactionContext()); } /** * Moves a {@link ServiceBusReceivedMessage message} to the deadletter sub-queue. * * @param message The {@link ServiceBusReceivedMessage} to perform this operation. * * @return A {@link Mono} that completes when the dead letter operation finishes. * @throws NullPointerException if {@code message} is null. * @throws UnsupportedOperationException if the receiver was opened in {@link ReceiveMode * mode. * @see <a href="https: * queues</a> */ public Mono<Void> deadLetter(ServiceBusReceivedMessage message) { return deadLetter(message, DEFAULT_DEAD_LETTER_OPTIONS); } /** * Moves a {@link ServiceBusReceivedMessage message} to the deadletter sub-queue. * * @param message The {@link ServiceBusReceivedMessage} to perform this operation. * @param options to deadLetter the message. You can specify * {@link DeadLetterOptions * {@code transactionContext} can be set using * {@link DeadLetterOptions * created first by {@link ServiceBusReceiverAsyncClient * {@link ServiceBusSenderAsyncClient * @return A {@link Mono} that completes when the dead letter operation finishes. * @throws NullPointerException if {@code message} or {@code options} is null. Also if * {@code transactionContext.transactionId} is null when {@code options.transactionContext} is specified. * @throws UnsupportedOperationException if the receiver was opened in {@link ReceiveMode * mode. * @see <a href="https: * queues</a> */ public Mono<Void> deadLetter(ServiceBusReceivedMessage message, DeadLetterOptions options) { if (Objects.isNull(options)) { return monoError(logger, new NullPointerException("'options' cannot be null.")); } else if (!Objects.isNull(options.getTransactionContext()) && Objects.isNull(options.getTransactionContext().getTransactionId())) { return monoError(logger, new NullPointerException( "'options.transactionContext.transactionId' cannot be null.")); } return updateDisposition(message, DispositionStatus.SUSPENDED, options.getDeadLetterReason(), options.getDeadLetterErrorDescription(), options.getPropertiesToModify(), options.getTransactionContext()); } /** * Gets the state of a session given its identifier. * * @param sessionId Identifier of session to get. * * @return The session state or an empty Mono if there is no state set for the session. * @throws IllegalStateException if the receiver is a non-session receiver. */ public Mono<byte[]> getSessionState(String sessionId) { if (isDisposed.get()) { return monoError(logger, new IllegalStateException( String.format(INVALID_OPERATION_DISPOSED_RECEIVER, "getSessionState"))); } else if (!receiverOptions.isSessionReceiver()) { return monoError(logger, new IllegalStateException("Cannot get session state on a non-session receiver.")); } if (sessionManager != null) { return sessionManager.getSessionState(sessionId); } else { return connectionProcessor .flatMap(connection -> connection.getManagementNode(entityPath, entityType)) .flatMap(channel -> channel.getSessionState(sessionId, getLinkName(sessionId))); } } /** * Reads the next active message without changing the state of the receiver or the message source. The first call to * {@code peek()} fetches the first active message for this receiver. Each subsequent call fetches the subsequent * message in the entity. * * @return A peeked {@link ServiceBusReceivedMessage}. * @see <a href="https: */ public Mono<ServiceBusReceivedMessage> peekMessage() { return peekMessage(receiverOptions.getSessionId()); } /** * Reads the next active message without changing the state of the receiver or the message source. The first call to * {@code peek()} fetches the first active message for this receiver. Each subsequent call fetches the subsequent * message in the entity. * * @param sessionId Session id of the message to peek from. {@code null} if there is no session. * * @return A peeked {@link ServiceBusReceivedMessage}. * @throws IllegalStateException if the receiver is disposed. * @see <a href="https: */ public Mono<ServiceBusReceivedMessage> peekMessage(String sessionId) { if (isDisposed.get()) { return monoError(logger, new IllegalStateException( String.format(INVALID_OPERATION_DISPOSED_RECEIVER, "peek"))); } return connectionProcessor .flatMap(connection -> connection.getManagementNode(entityPath, entityType)) .flatMap(channel -> { final long sequence = lastPeekedSequenceNumber.get() + 1; logger.verbose("Peek message from sequence number: {}", sequence); return channel.peek(sequence, sessionId, getLinkName(sessionId)); }) .handle((message, sink) -> { final long current = lastPeekedSequenceNumber .updateAndGet(value -> Math.max(value, message.getSequenceNumber())); logger.verbose("Updating last peeked sequence number: {}", current); sink.next(message); }); } /** * Starting from the given sequence number, reads next the active message without changing the state of the receiver * or the message source. * * @param sequenceNumber The sequence number from where to read the message. * * @return A peeked {@link ServiceBusReceivedMessage}. * @see <a href="https: */ public Mono<ServiceBusReceivedMessage> peekMessageAt(long sequenceNumber) { return peekMessageAt(sequenceNumber, receiverOptions.getSessionId()); } /** * Starting from the given sequence number, reads next the active message without changing the state of the receiver * or the message source. * * @param sequenceNumber The sequence number from where to read the message. * @param sessionId Session id of the message to peek from. {@code null} if there is no session. * * @return A peeked {@link ServiceBusReceivedMessage}. * @see <a href="https: */ public Mono<ServiceBusReceivedMessage> peekMessageAt(long sequenceNumber, String sessionId) { if (isDisposed.get()) { return monoError(logger, new IllegalStateException( String.format(INVALID_OPERATION_DISPOSED_RECEIVER, "peekAt"))); } return connectionProcessor .flatMap(connection -> connection.getManagementNode(entityPath, entityType)) .flatMap(node -> node.peek(sequenceNumber, sessionId, getLinkName(sessionId))); } /** * Reads the next batch of active messages without changing the state of the receiver or the message source. * * @param maxMessages The number of messages. * * @return A {@link Flux} of {@link ServiceBusReceivedMessage messages} that are peeked. * @throws IllegalArgumentException if {@code maxMessages} is not a positive integer. * @see <a href="https: */ public Flux<ServiceBusReceivedMessage> peekMessages(int maxMessages) { return peekMessages(maxMessages, receiverOptions.getSessionId()); } /** * Reads the next batch of active messages without changing the state of the receiver or the message source. * * @param maxMessages The number of messages. * @param sessionId Session id of the messages to peek from. {@code null} if there is no session. * * @return An {@link IterableStream} of {@link ServiceBusReceivedMessage messages} that are peeked. * @throws IllegalArgumentException if {@code maxMessages} is not a positive integer. * @see <a href="https: */ public Flux<ServiceBusReceivedMessage> peekMessages(int maxMessages, String sessionId) { if (isDisposed.get()) { return fluxError(logger, new IllegalStateException( String.format(INVALID_OPERATION_DISPOSED_RECEIVER, "peekBatch"))); } return connectionProcessor .flatMap(connection -> connection.getManagementNode(entityPath, entityType)) .flatMapMany(node -> { final long nextSequenceNumber = lastPeekedSequenceNumber.get() + 1; logger.verbose("Peek batch from sequence number: {}", nextSequenceNumber); final Flux<ServiceBusReceivedMessage> messages = node.peek(nextSequenceNumber, sessionId, getLinkName(sessionId), maxMessages); final Mono<ServiceBusReceivedMessage> handle = messages .switchIfEmpty(Mono.fromCallable(() -> { ServiceBusReceivedMessage emptyMessage = new ServiceBusReceivedMessage(new byte[0]); emptyMessage.setSequenceNumber(lastPeekedSequenceNumber.get()); return emptyMessage; })) .last() .handle((last, sink) -> { final long current = lastPeekedSequenceNumber .updateAndGet(value -> Math.max(value, last.getSequenceNumber())); logger.verbose("Last peeked sequence number in batch: {}", current); sink.complete(); }); return Flux.merge(messages, handle); }); } /** * Starting from the given sequence number, reads the next batch of active messages without changing the state of * the receiver or the message source. * * @param maxMessages The number of messages. * @param sequenceNumber The sequence number from where to start reading messages. * * @return A {@link Flux} of {@link ServiceBusReceivedMessage} peeked. * @throws IllegalArgumentException if {@code maxMessages} is not a positive integer. * @see <a href="https: */ public Flux<ServiceBusReceivedMessage> peekMessagesAt(int maxMessages, long sequenceNumber) { return peekMessagesAt(maxMessages, sequenceNumber, receiverOptions.getSessionId()); } /** * Starting from the given sequence number, reads the next batch of active messages without changing the state of * the receiver or the message source. * * @param maxMessages The number of messages. * @param sequenceNumber The sequence number from where to start reading messages. * @param sessionId Session id of the messages to peek from. {@code null} if there is no session. * * @return An {@link IterableStream} of {@link ServiceBusReceivedMessage} peeked. * @throws IllegalArgumentException if {@code maxMessages} is not a positive integer. * @see <a href="https: */ public Flux<ServiceBusReceivedMessage> peekMessagesAt(int maxMessages, long sequenceNumber, String sessionId) { if (isDisposed.get()) { return fluxError(logger, new IllegalStateException( String.format(INVALID_OPERATION_DISPOSED_RECEIVER, "peekBatchAt"))); } return connectionProcessor .flatMap(connection -> connection.getManagementNode(entityPath, entityType)) .flatMapMany(node -> node.peek(sequenceNumber, sessionId, getLinkName(sessionId), maxMessages)); } /** * Receives an <b>infinite</b> stream of {@link ServiceBusReceivedMessage messages} from the Service Bus entity. * This Flux continuously receives messages from a Service Bus entity until either: * * <ul> * <li>The receiver is closed.</li> * <li>The subscription to the Flux is disposed.</li> * <li>A terminal signal from a downstream subscriber is propagated upstream (ie. {@link Flux * {@link Flux * <li>An {@link AmqpException} occurs that causes the receive link to stop.</li> * </ul> * * @return An <b>infinite</b> stream of messages from the Service Bus entity. */ public Flux<ServiceBusReceivedMessageContext> receiveMessages() { final Flux<ServiceBusReceivedMessageContext> messageFlux = sessionManager != null ? sessionManager.receive() : getOrCreateConsumer().receive().map(ServiceBusReceivedMessageContext::new); final Flux<ServiceBusReceivedMessageContext> withAutoLockRenewal; if (receiverOptions.isAutoLockRenewEnabled()) { withAutoLockRenewal = new FluxAutoLockRenew(messageFlux, receiverOptions.getMaxLockRenewDuration(), renewalContainer, this::renewMessageLock); } else { withAutoLockRenewal = messageFlux; } final Flux<ServiceBusReceivedMessageContext> withAutoComplete; if (receiverOptions.isEnableAutoComplete()) { withAutoComplete = new FluxAutoComplete(withAutoLockRenewal, completionLock, context -> context.getMessage() != null ? complete(context.getMessage()) : Mono.empty(), context -> context.getMessage() != null ? abandon(context.getMessage()) : Mono.empty()); } else { withAutoComplete = withAutoLockRenewal; } return withAutoComplete .onErrorMap(throwable -> mapError(throwable, ServiceBusErrorSource.RECEIVE)); } /** * Receives a deferred {@link ServiceBusReceivedMessage message}. Deferred messages can only be received by using * sequence number. * * @param sequenceNumber The {@link ServiceBusReceivedMessage * message. * * @return A deferred message with the matching {@code sequenceNumber}. */ public Mono<ServiceBusReceivedMessage> receiveDeferredMessage(long sequenceNumber) { return receiveDeferredMessage(sequenceNumber, receiverOptions.getSessionId()); } /** * Receives a deferred {@link ServiceBusReceivedMessage message}. Deferred messages can only be received by using * sequence number. * * @param sequenceNumber The {@link ServiceBusReceivedMessage * message. * @param sessionId Session id of the deferred message. {@code null} if there is no session. * * @return A deferred message with the matching {@code sequenceNumber}. */ public Mono<ServiceBusReceivedMessage> receiveDeferredMessage(long sequenceNumber, String sessionId) { return connectionProcessor .flatMap(connection -> connection.getManagementNode(entityPath, entityType)) .flatMap(node -> node.receiveDeferredMessages(receiverOptions.getReceiveMode(), sessionId, getLinkName(sessionId), Collections.singleton(sequenceNumber)).last()) .map(receivedMessage -> { if (CoreUtils.isNullOrEmpty(receivedMessage.getLockToken())) { return receivedMessage; } if (receiverOptions.getReceiveMode() == ReceiveMode.PEEK_LOCK) { receivedMessage.setLockedUntil(managementNodeLocks.addOrUpdate(receivedMessage.getLockToken(), receivedMessage.getLockedUntil(), receivedMessage.getLockedUntil())); } return receivedMessage; }); } /** * Receives a batch of deferred {@link ServiceBusReceivedMessage messages}. Deferred messages can only be received * by using sequence number. * * @param sequenceNumbers The sequence numbers of the deferred messages. * * @return A {@link Flux} of deferred {@link ServiceBusReceivedMessage messages}. */ public Flux<ServiceBusReceivedMessage> receiveDeferredMessages(Iterable<Long> sequenceNumbers) { return receiveDeferredMessages(sequenceNumbers, receiverOptions.getSessionId()); } /** * Receives a batch of deferred {@link ServiceBusReceivedMessage messages}. Deferred messages can only be received * by using sequence number. * * @param sequenceNumbers The sequence numbers of the deferred messages. * @param sessionId Session id of the deferred messages. {@code null} if there is no session. * * @return An {@link IterableStream} of deferred {@link ServiceBusReceivedMessage messages}. */ public Flux<ServiceBusReceivedMessage> receiveDeferredMessages(Iterable<Long> sequenceNumbers, String sessionId) { if (isDisposed.get()) { return fluxError(logger, new IllegalStateException( String.format(INVALID_OPERATION_DISPOSED_RECEIVER, "receiveDeferredMessageBatch"))); } return connectionProcessor .flatMap(connection -> connection.getManagementNode(entityPath, entityType)) .flatMapMany(node -> node.receiveDeferredMessages(receiverOptions.getReceiveMode(), sessionId, getLinkName(sessionId), sequenceNumbers)) .map(receivedMessage -> { if (CoreUtils.isNullOrEmpty(receivedMessage.getLockToken())) { return receivedMessage; } if (receiverOptions.getReceiveMode() == ReceiveMode.PEEK_LOCK) { receivedMessage.setLockedUntil(managementNodeLocks.addOrUpdate(receivedMessage.getLockToken(), receivedMessage.getLockedUntil(), receivedMessage.getLockedUntil())); } return receivedMessage; }); } /** * Asynchronously renews the lock on the message. The lock will be renewed based on the setting specified on the * entity. When a message is received in {@link ReceiveMode * this receiver instance for a duration as specified during the entity creation (LockDuration). If processing of * the message requires longer than this duration, the lock needs to be renewed. For each renewal, the lock is reset * to the entity's LockDuration value. * * @param message The {@link ServiceBusReceivedMessage} to perform auto-lock renewal. * * @return The new expiration time for the message. * @throws NullPointerException if {@code message} or {@code message.getLockToken()} is null. * @throws UnsupportedOperationException if the receiver was opened in {@link ReceiveMode * mode. * @throws IllegalStateException if the receiver is a session receiver. * @throws IllegalArgumentException if {@code message.getLockToken()} is an empty value. */ public Mono<OffsetDateTime> renewMessageLock(ServiceBusReceivedMessage message) { if (isDisposed.get()) { return monoError(logger, new IllegalStateException( String.format(INVALID_OPERATION_DISPOSED_RECEIVER, "renewMessageLock"))); } else if (Objects.isNull(message)) { return monoError(logger, new NullPointerException("'message' cannot be null.")); } else if (Objects.isNull(message.getLockToken())) { return monoError(logger, new NullPointerException("'message.getLockToken()' cannot be null.")); } else if (message.getLockToken().isEmpty()) { return monoError(logger, new IllegalArgumentException("'message.getLockToken()' cannot be empty.")); } else if (receiverOptions.isSessionReceiver()) { return monoError(logger, new IllegalStateException( String.format("Cannot renew message lock [%s] for a session receiver.", message.getLockToken()))); } return renewMessageLock(message.getLockToken()) .onErrorMap(throwable -> mapError(throwable, ServiceBusErrorSource.RENEW_LOCK)); } /** * Asynchronously renews the lock on the message. The lock will be renewed based on the setting specified on the * entity. * * @param lockToken to be renewed. * * @return The new expiration time for the message. */ Mono<OffsetDateTime> renewMessageLock(String lockToken) { return connectionProcessor .flatMap(connection -> connection.getManagementNode(entityPath, entityType)) .flatMap(serviceBusManagementNode -> serviceBusManagementNode.renewMessageLock(lockToken, getLinkName(null))) .map(offsetDateTime -> managementNodeLocks.addOrUpdate(lockToken, offsetDateTime, offsetDateTime)) .onErrorMap(throwable -> mapError(throwable, ServiceBusErrorSource.RENEW_LOCK)); } /** * Starts the auto lock renewal for a {@link ServiceBusReceivedMessage message}. * * @param message The {@link ServiceBusReceivedMessage} to perform this operation. * @param maxLockRenewalDuration Maximum duration to keep renewing the lock token. * * @return A lock renewal operation for the message. * @throws NullPointerException if {@code message}, {@code message.getLockToken()} or {@code * maxLockRenewalDuration} is null. * @throws IllegalStateException if the receiver is a session receiver or the receiver is disposed. * @throws IllegalArgumentException if {@code message.getLockToken()} is an empty value. */ public Mono<Void> renewMessageLock(ServiceBusReceivedMessage message, Duration maxLockRenewalDuration) { if (isDisposed.get()) { return monoError(logger, new IllegalStateException( String.format(INVALID_OPERATION_DISPOSED_RECEIVER, "getAutoRenewMessageLock"))); } else if (Objects.isNull(message)) { return monoError(logger, new NullPointerException("'message' cannot be null.")); } else if (Objects.isNull(message.getLockToken())) { return monoError(logger, new NullPointerException("'message.getLockToken()' cannot be null.")); } else if (message.getLockToken().isEmpty()) { return monoError(logger, new IllegalArgumentException("'message.getLockToken()' cannot be empty.")); } else if (receiverOptions.isSessionReceiver()) { return monoError(logger, new IllegalStateException( String.format("Cannot renew message lock [%s] for a session receiver.", message.getLockToken()))); } else if (maxLockRenewalDuration == null) { return monoError(logger, new NullPointerException("'maxLockRenewalDuration' cannot be null.")); } else if (maxLockRenewalDuration.isNegative()) { return monoError(logger, new IllegalArgumentException("'maxLockRenewalDuration' cannot be negative.")); } final LockRenewalOperation operation = new LockRenewalOperation(message.getLockToken(), maxLockRenewalDuration, false, ignored -> renewMessageLock(message)); renewalContainer.addOrUpdate(message.getLockToken(), OffsetDateTime.now().plus(maxLockRenewalDuration), operation); return operation.getCompletionOperation() .onErrorMap(throwable -> mapError(throwable, ServiceBusErrorSource.RENEW_LOCK)); } /** * Renews the session lock. * * @param sessionId Identifier of session to get. * * @return The next expiration time for the session lock. * @throws IllegalStateException if the receiver is a non-session receiver. */ public Mono<OffsetDateTime> renewSessionLock(String sessionId) { if (isDisposed.get()) { return monoError(logger, new IllegalStateException( String.format(INVALID_OPERATION_DISPOSED_RECEIVER, "renewSessionLock"))); } else if (!receiverOptions.isSessionReceiver()) { return monoError(logger, new IllegalStateException("Cannot renew session lock on a non-session receiver.")); } final String linkName = sessionManager != null ? sessionManager.getLinkName(sessionId) : null; return connectionProcessor .flatMap(connection -> connection.getManagementNode(entityPath, entityType)) .flatMap(channel -> channel.renewSessionLock(sessionId, linkName)) .onErrorMap(throwable -> mapError(throwable, ServiceBusErrorSource.RENEW_LOCK)); } /** * Starts the auto lock renewal for a session id. * * @param sessionId Id for the session to renew. * @param maxLockRenewalDuration Maximum duration to keep renewing the session lock. * * @return A lock renewal operation for the message. * @throws NullPointerException if {@code sessionId} or {@code maxLockRenewalDuration} is null. * @throws IllegalArgumentException if {@code sessionId} is an empty string. * @throws IllegalStateException if the receiver is a non-session receiver or the receiver is disposed. */ public Mono<Void> renewSessionLock(String sessionId, Duration maxLockRenewalDuration) { if (isDisposed.get()) { return monoError(logger, new IllegalStateException( String.format(INVALID_OPERATION_DISPOSED_RECEIVER, "getAutoRenewSessionLock"))); } else if (!receiverOptions.isSessionReceiver()) { return monoError(logger, new IllegalStateException( "Cannot renew session lock on a non-session receiver.")); } else if (maxLockRenewalDuration == null) { return monoError(logger, new NullPointerException("'maxLockRenewalDuration' cannot be null.")); } else if (maxLockRenewalDuration.isNegative()) { return monoError(logger, new IllegalArgumentException( "'maxLockRenewalDuration' cannot be negative.")); } else if (Objects.isNull(sessionId)) { return monoError(logger, new NullPointerException("'sessionId' cannot be null.")); } else if (sessionId.isEmpty()) { return monoError(logger, new IllegalArgumentException("'sessionId' cannot be empty.")); } final LockRenewalOperation operation = new LockRenewalOperation(sessionId, maxLockRenewalDuration, true, this::renewSessionLock); renewalContainer.addOrUpdate(sessionId, OffsetDateTime.now().plus(maxLockRenewalDuration), operation); return operation .getCompletionOperation() .onErrorMap(throwable -> mapError(throwable, ServiceBusErrorSource.RENEW_LOCK)); } /** * Sets the state of a session given its identifier. * * @param sessionId Identifier of session to get. * @param sessionState State to set on the session. * * @return A Mono that completes when the session is set * @throws IllegalStateException if the receiver is a non-session receiver. */ public Mono<Void> setSessionState(String sessionId, byte[] sessionState) { if (isDisposed.get()) { return monoError(logger, new IllegalStateException( String.format(INVALID_OPERATION_DISPOSED_RECEIVER, "setSessionState"))); } else if (!receiverOptions.isSessionReceiver()) { return monoError(logger, new IllegalStateException("Cannot set session state on a non-session receiver.")); } final String linkName = sessionManager != null ? sessionManager.getLinkName(sessionId) : null; return connectionProcessor .flatMap(connection -> connection.getManagementNode(entityPath, entityType)) .flatMap(channel -> channel.setSessionState(sessionId, sessionState, linkName)); } /** * Starts a new service side transaction. The {@link ServiceBusTransactionContext} should be passed to all * operations that needs to be in this transaction. * * <p><strong>Create a transaction</strong></p> * {@codesnippet com.azure.messaging.servicebus.servicebusasyncreceiverclient.createTransaction} * * @return The {@link Mono} that finishes this operation on service bus resource. */ public Mono<ServiceBusTransactionContext> createTransaction() { if (isDisposed.get()) { return monoError(logger, new IllegalStateException( String.format(INVALID_OPERATION_DISPOSED_RECEIVER, "createTransaction"))); } return connectionProcessor .flatMap(connection -> connection.createSession(TRANSACTION_LINK_NAME)) .flatMap(transactionSession -> transactionSession.createTransaction()) .map(transaction -> new ServiceBusTransactionContext(transaction.getTransactionId())); } /** * Commits the transaction given {@link ServiceBusTransactionContext}. This will make a call to Service Bus. * <p><strong>Commit a transaction</strong></p> * {@codesnippet com.azure.messaging.servicebus.servicebusasyncreceiverclient.commitTransaction} * * @param transactionContext to be committed. * * @return The {@link Mono} that finishes this operation on service bus resource. * @throws NullPointerException if {@code transactionContext} or {@code transactionContext.transactionId} is * null. */ public Mono<Void> commitTransaction(ServiceBusTransactionContext transactionContext) { if (isDisposed.get()) { return monoError(logger, new IllegalStateException( String.format(INVALID_OPERATION_DISPOSED_RECEIVER, "commitTransaction"))); } if (Objects.isNull(transactionContext)) { return monoError(logger, new NullPointerException("'transactionContext' cannot be null.")); } else if (Objects.isNull(transactionContext.getTransactionId())) { return monoError(logger, new NullPointerException("'transactionContext.transactionId' cannot be null.")); } return connectionProcessor .flatMap(connection -> connection.createSession(TRANSACTION_LINK_NAME)) .flatMap(transactionSession -> transactionSession.commitTransaction(new AmqpTransaction( transactionContext.getTransactionId()))); } /** * Rollbacks the transaction given {@link ServiceBusTransactionContext}. This will make a call to Service Bus. * <p><strong>Rollback a transaction</strong></p> * {@codesnippet com.azure.messaging.servicebus.servicebusasyncreceiverclient.rollbackTransaction} * * @param transactionContext to be rollbacked. * * @return The {@link Mono} that finishes this operation on service bus resource. * @throws NullPointerException if {@code transactionContext} or {@code transactionContext.transactionId} is * null. */ public Mono<Void> rollbackTransaction(ServiceBusTransactionContext transactionContext) { if (isDisposed.get()) { return monoError(logger, new IllegalStateException( String.format(INVALID_OPERATION_DISPOSED_RECEIVER, "rollbackTransaction"))); } if (Objects.isNull(transactionContext)) { return monoError(logger, new NullPointerException("'transactionContext' cannot be null.")); } else if (Objects.isNull(transactionContext.getTransactionId())) { return monoError(logger, new NullPointerException("'transactionContext.transactionId' cannot be null.")); } return connectionProcessor .flatMap(connection -> connection.createSession(TRANSACTION_LINK_NAME)) .flatMap(transactionSession -> transactionSession.rollbackTransaction(new AmqpTransaction( transactionContext.getTransactionId()))); } /** * Disposes of the consumer by closing the underlying connection to the service. */ @Override public void close() { if (isDisposed.getAndSet(true)) { return; } try { completionLock.acquire(); } catch (InterruptedException e) { logger.info("Unable to obtain completion lock.", e); } logger.info("Removing receiver links."); final ServiceBusAsyncConsumer disposed = consumer.getAndSet(null); if (disposed != null) { disposed.close(); } if (sessionManager != null) { sessionManager.close(); } onClientClose.run(); } /** * @return receiver options set by user; */ ReceiverOptions getReceiverOptions() { return receiverOptions; } /** * Gets whether or not the management node contains the message lock token and it has not expired. Lock tokens are * held by the management node when they are received from the management node or management operations are * performed using that {@code lockToken}. * * @param lockToken Lock token to check for. * * @return {@code true} if the management node contains the lock token and false otherwise. */ private boolean isManagementToken(String lockToken) { return managementNodeLocks.containsUnexpired(lockToken); } private Mono<Void> updateDisposition(ServiceBusReceivedMessage message, DispositionStatus dispositionStatus, String deadLetterReason, String deadLetterErrorDescription, Map<String, Object> propertiesToModify, ServiceBusTransactionContext transactionContext) { if (isDisposed.get()) { return monoError(logger, new IllegalStateException( String.format(INVALID_OPERATION_DISPOSED_RECEIVER, dispositionStatus.getValue()))); } else if (Objects.isNull(message)) { return monoError(logger, new NullPointerException("'message' cannot be null.")); } final String lockToken = message.getLockToken(); final String sessionId = message.getSessionId(); if (receiverOptions.getReceiveMode() != ReceiveMode.PEEK_LOCK) { return Mono.error(logger.logExceptionAsError(new UnsupportedOperationException(String.format( "'%s' is not supported on a receiver opened in ReceiveMode.RECEIVE_AND_DELETE.", dispositionStatus)))); } final String sessionIdToUse; if (sessionId == null && !CoreUtils.isNullOrEmpty(receiverOptions.getSessionId())) { sessionIdToUse = receiverOptions.getSessionId(); } else { sessionIdToUse = sessionId; } logger.info("{}: Update started. Disposition: {}. Lock: {}. SessionId: {}.", entityPath, dispositionStatus, lockToken, sessionIdToUse); final Mono<Void> performOnManagement = connectionProcessor .flatMap(connection -> connection.getManagementNode(entityPath, entityType)) .flatMap(node -> node.updateDisposition(lockToken, dispositionStatus, deadLetterReason, deadLetterErrorDescription, propertiesToModify, sessionId, getLinkName(sessionId), transactionContext)) .then(Mono.fromRunnable(() -> { logger.info("{}: Management node Update completed. Disposition: {}. Lock: {}.", entityPath, dispositionStatus, lockToken); managementNodeLocks.remove(lockToken); renewalContainer.remove(lockToken); })); Mono<Void> updateDispositionOperation; if (sessionManager != null) { updateDispositionOperation = sessionManager.updateDisposition(lockToken, sessionId, dispositionStatus, propertiesToModify, deadLetterReason, deadLetterErrorDescription, transactionContext) .flatMap(isSuccess -> { if (isSuccess) { renewalContainer.remove(lockToken); return Mono.empty(); } logger.info("Could not perform on session manger. Performing on management node."); return performOnManagement; }); } else { final ServiceBusAsyncConsumer existingConsumer = consumer.get(); if (isManagementToken(lockToken) || existingConsumer == null) { updateDispositionOperation = performOnManagement; } else { updateDispositionOperation = existingConsumer.updateDisposition(lockToken, dispositionStatus, deadLetterReason, deadLetterErrorDescription, propertiesToModify, transactionContext) .then(Mono.fromRunnable(() -> { logger.info("{}: Update completed. Disposition: {}. Lock: {}.", entityPath, dispositionStatus, lockToken); renewalContainer.remove(lockToken); })); } } return updateDispositionOperation .onErrorMap(throwable -> { ServiceBusErrorSource errorSource; if (throwable instanceof AmqpException) { switch (dispositionStatus) { case COMPLETED: errorSource = ServiceBusErrorSource.COMPLETE; break; case DEFERRED: errorSource = ServiceBusErrorSource.DEFER; break; case SUSPENDED: errorSource = ServiceBusErrorSource.DEAD_LETTER; break; case ABANDONED: errorSource = ServiceBusErrorSource.ABANDONED; break; default: errorSource = ServiceBusErrorSource.UNKNOWN; } return new ServiceBusAmqpException((AmqpException) throwable, errorSource); } else { return throwable; } }); } private ServiceBusAsyncConsumer getOrCreateConsumer() { final ServiceBusAsyncConsumer existing = consumer.get(); if (existing != null) { return existing; } final String linkName = StringUtil.getRandomString(entityPath); logger.info("{}: Creating consumer for link '{}'", entityPath, linkName); final Flux<ServiceBusReceiveLink> receiveLink = connectionProcessor.flatMap(connection -> { if (receiverOptions.isSessionReceiver()) { return connection.createReceiveLink(linkName, entityPath, receiverOptions.getReceiveMode(), null, entityType, receiverOptions.getSessionId()); } else { return connection.createReceiveLink(linkName, entityPath, receiverOptions.getReceiveMode(), null, entityType); } }) .doOnNext(next -> { final String format = "Created consumer for Service Bus resource: [{}] mode: [{}]" + " sessionEnabled? {} transferEntityPath: [{}], entityType: [{}]"; logger.verbose(format, next.getEntityPath(), receiverOptions.getReceiveMode(), CoreUtils.isNullOrEmpty(receiverOptions.getSessionId()), "N/A", entityType); }) .repeat(); final AmqpRetryPolicy retryPolicy = RetryUtil.getRetryPolicy(connectionProcessor.getRetryOptions()); final ServiceBusReceiveLinkProcessor linkMessageProcessor = receiveLink.subscribeWith( new ServiceBusReceiveLinkProcessor(receiverOptions.getPrefetchCount(), retryPolicy, receiverOptions.getReceiveMode())); final ServiceBusAsyncConsumer newConsumer = new ServiceBusAsyncConsumer(linkName, linkMessageProcessor, messageSerializer, receiverOptions); if (consumer.compareAndSet(null, newConsumer)) { return newConsumer; } else { newConsumer.close(); return consumer.get(); } } /** * If the receiver has not connected via {@link * through the management node. * * @return The name of the receive link, or null of it has not connected via a receive link. */ private String getLinkName(String sessionId) { if (sessionManager != null && !CoreUtils.isNullOrEmpty(sessionId)) { return sessionManager.getLinkName(sessionId); } else if (!CoreUtils.isNullOrEmpty(sessionId) && !receiverOptions.isSessionReceiver()) { return null; } else { final ServiceBusAsyncConsumer existing = consumer.get(); return existing != null ? existing.getLinkName() : null; } } /** * Map the error to {@link ServiceBusAmqpException} */ }
class ServiceBusReceiverAsyncClient implements AutoCloseable { private static final DeadLetterOptions DEFAULT_DEAD_LETTER_OPTIONS = new DeadLetterOptions(); private static final String TRANSACTION_LINK_NAME = "coordinator"; private final LockContainer<LockRenewalOperation> renewalContainer; private final AtomicBoolean isDisposed = new AtomicBoolean(); private final LockContainer<OffsetDateTime> managementNodeLocks; private final ClientLogger logger = new ClientLogger(ServiceBusReceiverAsyncClient.class); private final String fullyQualifiedNamespace; private final String entityPath; private final MessagingEntityType entityType; private final ReceiverOptions receiverOptions; private final ServiceBusConnectionProcessor connectionProcessor; private final TracerProvider tracerProvider; private final MessageSerializer messageSerializer; private final Runnable onClientClose; private final ServiceBusSessionManager sessionManager; private final Semaphore completionLock = new Semaphore(1); private final AtomicLong lastPeekedSequenceNumber = new AtomicLong(-1); private final AtomicReference<ServiceBusAsyncConsumer> consumer = new AtomicReference<>(); /** * Creates a receiver that listens to a Service Bus resource. * * @param fullyQualifiedNamespace The fully qualified domain name for the Service Bus resource. * @param entityPath The name of the topic or queue. * @param entityType The type of the Service Bus resource. * @param receiverOptions Options when receiving messages. * @param connectionProcessor The AMQP connection to the Service Bus resource. * @param tracerProvider Tracer for telemetry. * @param messageSerializer Serializes and deserializes Service Bus messages. * @param onClientClose Operation to run when the client completes. */ ServiceBusReceiverAsyncClient(String fullyQualifiedNamespace, String entityPath, MessagingEntityType entityType, ReceiverOptions receiverOptions, ServiceBusConnectionProcessor connectionProcessor, Duration cleanupInterval, TracerProvider tracerProvider, MessageSerializer messageSerializer, Runnable onClientClose) { this.fullyQualifiedNamespace = Objects.requireNonNull(fullyQualifiedNamespace, "'fullyQualifiedNamespace' cannot be null."); this.entityPath = Objects.requireNonNull(entityPath, "'entityPath' cannot be null."); this.entityType = Objects.requireNonNull(entityType, "'entityType' cannot be null."); this.receiverOptions = Objects.requireNonNull(receiverOptions, "'receiveOptions cannot be null.'"); this.connectionProcessor = Objects.requireNonNull(connectionProcessor, "'connectionProcessor' cannot be null."); this.tracerProvider = Objects.requireNonNull(tracerProvider, "'tracerProvider' cannot be null."); this.messageSerializer = Objects.requireNonNull(messageSerializer, "'messageSerializer' cannot be null."); this.onClientClose = Objects.requireNonNull(onClientClose, "'onClientClose' cannot be null."); this.managementNodeLocks = new LockContainer<>(cleanupInterval); this.renewalContainer = new LockContainer<>(Duration.ofMinutes(2), renewal -> { logger.info("Closing expired renewal operation. lockToken[{}]. status[{}]. throwable[{}].", renewal.getLockToken(), renewal.getStatus(), renewal.getThrowable()); renewal.close(); }); this.sessionManager = null; } ServiceBusReceiverAsyncClient(String fullyQualifiedNamespace, String entityPath, MessagingEntityType entityType, ReceiverOptions receiverOptions, ServiceBusConnectionProcessor connectionProcessor, Duration cleanupInterval, TracerProvider tracerProvider, MessageSerializer messageSerializer, Runnable onClientClose, ServiceBusSessionManager sessionManager) { this.fullyQualifiedNamespace = Objects.requireNonNull(fullyQualifiedNamespace, "'fullyQualifiedNamespace' cannot be null."); this.entityPath = Objects.requireNonNull(entityPath, "'entityPath' cannot be null."); this.entityType = Objects.requireNonNull(entityType, "'entityType' cannot be null."); this.receiverOptions = Objects.requireNonNull(receiverOptions, "'receiveOptions cannot be null.'"); this.connectionProcessor = Objects.requireNonNull(connectionProcessor, "'connectionProcessor' cannot be null."); this.tracerProvider = Objects.requireNonNull(tracerProvider, "'tracerProvider' cannot be null."); this.messageSerializer = Objects.requireNonNull(messageSerializer, "'messageSerializer' cannot be null."); this.onClientClose = Objects.requireNonNull(onClientClose, "'onClientClose' cannot be null."); this.sessionManager = Objects.requireNonNull(sessionManager, "'sessionManager' cannot be null."); this.managementNodeLocks = new LockContainer<>(cleanupInterval); this.renewalContainer = new LockContainer<>(Duration.ofMinutes(2), renewal -> { logger.info("Closing expired renewal operation. sessionId[{}]. status[{}]. throwable[{}]", renewal.getSessionId(), renewal.getStatus(), renewal.getThrowable()); renewal.close(); }); } /** * Gets the fully qualified Service Bus namespace that the connection is associated with. This is likely similar to * {@code {yournamespace}.servicebus.windows.net}. * * @return The fully qualified Service Bus namespace that the connection is associated with. */ public String getFullyQualifiedNamespace() { return fullyQualifiedNamespace; } /** * Gets the Service Bus resource this client interacts with. * * @return The Service Bus resource this client interacts with. */ public String getEntityPath() { return entityPath; } /** * Abandon a {@link ServiceBusReceivedMessage message}. This will make the message available * again for processing. Abandoning a message will increase the delivery count on the message. * * @param message The {@link ServiceBusReceivedMessage} to perform this operation. * * @return A {@link Mono} that completes when the Service Bus abandon operation completes. * @throws NullPointerException if {@code message} is null. * @throws UnsupportedOperationException if the receiver was opened in {@link ReceiveMode * mode. */ public Mono<Void> abandon(ServiceBusReceivedMessage message) { return updateDisposition(message, DispositionStatus.ABANDONED, null, null, null, null); } /** * Abandon a {@link ServiceBusReceivedMessage message} updates the message's properties. * This will make the message available again for processing. Abandoning a message will increase the delivery count * on the message. * * @param message The {@link ServiceBusReceivedMessage} to perform this operation. * @param options to abandon the message. You can specify * {@link AbandonOptions * {@code transactionContext} can be set using * {@link AbandonOptions * created first by {@link ServiceBusReceiverAsyncClient * {@link ServiceBusSenderAsyncClient * * @return A {@link Mono} that completes when the Service Bus operation finishes. * @throws NullPointerException if {@code message} or {@code options} is null. Also if * {@code transactionContext.transactionId} is null when {@code options.transactionContext} is specified. * @throws UnsupportedOperationException if the receiver was opened in {@link ReceiveMode * mode. */ public Mono<Void> abandon(ServiceBusReceivedMessage message, AbandonOptions options) { if (Objects.isNull(options)) { return monoError(logger, new NullPointerException("'settlementOptions' cannot be null.")); } else if (!Objects.isNull(options.getTransactionContext()) && Objects.isNull(options.getTransactionContext().getTransactionId())) { return monoError(logger, new NullPointerException( "'options.transactionContext.transactionId' cannot be null.")); } return updateDisposition(message, DispositionStatus.ABANDONED, null, null, options.getPropertiesToModify(), options.getTransactionContext()); } /** * Completes a {@link ServiceBusReceivedMessage message}. This will delete the message from the service. * * @param message The {@link ServiceBusReceivedMessage} to perform this operation. * * @return A {@link Mono} that finishes when the message is completed on Service Bus. * @throws NullPointerException if {@code message} is null. * @throws UnsupportedOperationException if the receiver was opened in {@link ReceiveMode * mode. */ public Mono<Void> complete(ServiceBusReceivedMessage message) { return updateDisposition(message, DispositionStatus.COMPLETED, null, null, null, null); } /** * Completes a {@link ServiceBusReceivedMessage message}. This will delete the message from the * service. * * @param message The {@link ServiceBusReceivedMessage} to perform this operation. * @param options to complete the message. The {@code transactionContext} can be set using * {@link CompleteOptions * created first by {@link ServiceBusReceiverAsyncClient * {@link ServiceBusSenderAsyncClient * * @return A {@link Mono} that finishes when the message is completed on Service Bus. * @throws NullPointerException if {@code message} or {@code options} is null. Also if * {@code transactionContext.transactionId} is null when {@code options.transactionContext} is specified. * @throws UnsupportedOperationException if the receiver was opened in {@link ReceiveMode * mode. */ public Mono<Void> complete(ServiceBusReceivedMessage message, CompleteOptions options) { if (Objects.isNull(options)) { return monoError(logger, new NullPointerException("'options' cannot be null.")); } else if (!Objects.isNull(options.getTransactionContext()) && Objects.isNull(options.getTransactionContext().getTransactionId())) { return monoError(logger, new NullPointerException( "'options.transactionContext.transactionId' cannot be null.")); } return updateDisposition(message, DispositionStatus.COMPLETED, null, null, null, options.getTransactionContext()); } /** * Defers a {@link ServiceBusReceivedMessage message}. This will move message into the deferred subqueue. * * @param message The {@link ServiceBusReceivedMessage} to perform this operation. * * @return A {@link Mono} that completes when the Service Bus defer operation finishes. * @throws NullPointerException if {@code message} is null. * @throws UnsupportedOperationException if the receiver was opened in {@link ReceiveMode * @see <a href="https: */ public Mono<Void> defer(ServiceBusReceivedMessage message) { return updateDisposition(message, DispositionStatus.DEFERRED, null, null, null, null); } /** * Defers a {@link ServiceBusReceivedMessage message} with modified message property. This will move message into * the deferred subqueue. * * @param message The {@link ServiceBusReceivedMessage} to perform this operation. * @param options to defer the message. You can specify {@link DeferOptions * to modify on the Message. The {@code transactionContext} can be set using * {@link DeferOptions * created first by {@link ServiceBusReceiverAsyncClient * {@link ServiceBusSenderAsyncClient * * @return A {@link Mono} that completes when the defer operation finishes. * @throws NullPointerException if {@code message} or {@code options} is null. Also if * {@code transactionContext.transactionId} is null when {@code options.transactionContext} is specified. * @throws UnsupportedOperationException if the receiver was opened in {@link ReceiveMode * mode. * @see <a href="https: */ public Mono<Void> defer(ServiceBusReceivedMessage message, DeferOptions options) { if (Objects.isNull(options)) { return monoError(logger, new NullPointerException("'options' cannot be null.")); } else if (!Objects.isNull(options.getTransactionContext()) && Objects.isNull(options.getTransactionContext().getTransactionId())) { return monoError(logger, new NullPointerException( "'options.transactionContext.transactionId' cannot be null.")); } return updateDisposition(message, DispositionStatus.DEFERRED, null, null, options.getPropertiesToModify(), options.getTransactionContext()); } /** * Moves a {@link ServiceBusReceivedMessage message} to the deadletter sub-queue. * * @param message The {@link ServiceBusReceivedMessage} to perform this operation. * * @return A {@link Mono} that completes when the dead letter operation finishes. * @throws NullPointerException if {@code message} is null. * @throws UnsupportedOperationException if the receiver was opened in {@link ReceiveMode * mode. * @see <a href="https: * queues</a> */ public Mono<Void> deadLetter(ServiceBusReceivedMessage message) { return deadLetter(message, DEFAULT_DEAD_LETTER_OPTIONS); } /** * Moves a {@link ServiceBusReceivedMessage message} to the deadletter sub-queue. * * @param message The {@link ServiceBusReceivedMessage} to perform this operation. * @param options to deadLetter the message. You can specify * {@link DeadLetterOptions * {@code transactionContext} can be set using * {@link DeadLetterOptions * created first by {@link ServiceBusReceiverAsyncClient * {@link ServiceBusSenderAsyncClient * @return A {@link Mono} that completes when the dead letter operation finishes. * @throws NullPointerException if {@code message} or {@code options} is null. Also if * {@code transactionContext.transactionId} is null when {@code options.transactionContext} is specified. * @throws UnsupportedOperationException if the receiver was opened in {@link ReceiveMode * mode. * @see <a href="https: * queues</a> */ public Mono<Void> deadLetter(ServiceBusReceivedMessage message, DeadLetterOptions options) { if (Objects.isNull(options)) { return monoError(logger, new NullPointerException("'options' cannot be null.")); } else if (!Objects.isNull(options.getTransactionContext()) && Objects.isNull(options.getTransactionContext().getTransactionId())) { return monoError(logger, new NullPointerException( "'options.transactionContext.transactionId' cannot be null.")); } return updateDisposition(message, DispositionStatus.SUSPENDED, options.getDeadLetterReason(), options.getDeadLetterErrorDescription(), options.getPropertiesToModify(), options.getTransactionContext()); } /** * Gets the state of the session if this receiver is a session receiver. * * @return The session state or an empty Mono if there is no state set for the session. * @throws IllegalStateException if the receiver is a non-session receiver. */ public Mono<byte[]> getSessionState() { return getSessionState(receiverOptions.getSessionId()); } /** * Reads the next active message without changing the state of the receiver or the message source. The first call to * {@code peek()} fetches the first active message for this receiver. Each subsequent call fetches the subsequent * message in the entity. * * @return A peeked {@link ServiceBusReceivedMessage}. * @see <a href="https: */ public Mono<ServiceBusReceivedMessage> peekMessage() { return peekMessage(receiverOptions.getSessionId()); } /** * Reads the next active message without changing the state of the receiver or the message source. The first call to * {@code peek()} fetches the first active message for this receiver. Each subsequent call fetches the subsequent * message in the entity. * * @param sessionId Session id of the message to peek from. {@code null} if there is no session. * * @return A peeked {@link ServiceBusReceivedMessage}. * @throws IllegalStateException if the receiver is disposed. * @see <a href="https: */ Mono<ServiceBusReceivedMessage> peekMessage(String sessionId) { if (isDisposed.get()) { return monoError(logger, new IllegalStateException( String.format(INVALID_OPERATION_DISPOSED_RECEIVER, "peek"))); } return connectionProcessor .flatMap(connection -> connection.getManagementNode(entityPath, entityType)) .flatMap(channel -> { final long sequence = lastPeekedSequenceNumber.get() + 1; logger.verbose("Peek message from sequence number: {}", sequence); return channel.peek(sequence, sessionId, getLinkName(sessionId)); }) .handle((message, sink) -> { final long current = lastPeekedSequenceNumber .updateAndGet(value -> Math.max(value, message.getSequenceNumber())); logger.verbose("Updating last peeked sequence number: {}", current); sink.next(message); }); } /** * Starting from the given sequence number, reads next the active message without changing the state of the receiver * or the message source. * * @param sequenceNumber The sequence number from where to read the message. * * @return A peeked {@link ServiceBusReceivedMessage}. * @see <a href="https: */ public Mono<ServiceBusReceivedMessage> peekMessageAt(long sequenceNumber) { return peekMessageAt(sequenceNumber, receiverOptions.getSessionId()); } /** * Starting from the given sequence number, reads next the active message without changing the state of the receiver * or the message source. * * @param sequenceNumber The sequence number from where to read the message. * @param sessionId Session id of the message to peek from. {@code null} if there is no session. * * @return A peeked {@link ServiceBusReceivedMessage}. * @see <a href="https: */ Mono<ServiceBusReceivedMessage> peekMessageAt(long sequenceNumber, String sessionId) { if (isDisposed.get()) { return monoError(logger, new IllegalStateException( String.format(INVALID_OPERATION_DISPOSED_RECEIVER, "peekAt"))); } return connectionProcessor .flatMap(connection -> connection.getManagementNode(entityPath, entityType)) .flatMap(node -> node.peek(sequenceNumber, sessionId, getLinkName(sessionId))); } /** * Reads the next batch of active messages without changing the state of the receiver or the message source. * * @param maxMessages The number of messages. * * @return A {@link Flux} of {@link ServiceBusReceivedMessage messages} that are peeked. * @throws IllegalArgumentException if {@code maxMessages} is not a positive integer. * @see <a href="https: */ public Flux<ServiceBusReceivedMessage> peekMessages(int maxMessages) { return peekMessages(maxMessages, receiverOptions.getSessionId()); } /** * Reads the next batch of active messages without changing the state of the receiver or the message source. * * @param maxMessages The number of messages. * @param sessionId Session id of the messages to peek from. {@code null} if there is no session. * * @return An {@link IterableStream} of {@link ServiceBusReceivedMessage messages} that are peeked. * @throws IllegalArgumentException if {@code maxMessages} is not a positive integer. * @see <a href="https: */ Flux<ServiceBusReceivedMessage> peekMessages(int maxMessages, String sessionId) { if (isDisposed.get()) { return fluxError(logger, new IllegalStateException( String.format(INVALID_OPERATION_DISPOSED_RECEIVER, "peekBatch"))); } return connectionProcessor .flatMap(connection -> connection.getManagementNode(entityPath, entityType)) .flatMapMany(node -> { final long nextSequenceNumber = lastPeekedSequenceNumber.get() + 1; logger.verbose("Peek batch from sequence number: {}", nextSequenceNumber); final Flux<ServiceBusReceivedMessage> messages = node.peek(nextSequenceNumber, sessionId, getLinkName(sessionId), maxMessages); final Mono<ServiceBusReceivedMessage> handle = messages .switchIfEmpty(Mono.fromCallable(() -> { ServiceBusReceivedMessage emptyMessage = new ServiceBusReceivedMessage(new byte[0]); emptyMessage.setSequenceNumber(lastPeekedSequenceNumber.get()); return emptyMessage; })) .last() .handle((last, sink) -> { final long current = lastPeekedSequenceNumber .updateAndGet(value -> Math.max(value, last.getSequenceNumber())); logger.verbose("Last peeked sequence number in batch: {}", current); sink.complete(); }); return Flux.merge(messages, handle); }); } /** * Starting from the given sequence number, reads the next batch of active messages without changing the state of * the receiver or the message source. * * @param maxMessages The number of messages. * @param sequenceNumber The sequence number from where to start reading messages. * * @return A {@link Flux} of {@link ServiceBusReceivedMessage} peeked. * @throws IllegalArgumentException if {@code maxMessages} is not a positive integer. * @see <a href="https: */ public Flux<ServiceBusReceivedMessage> peekMessagesAt(int maxMessages, long sequenceNumber) { return peekMessagesAt(maxMessages, sequenceNumber, receiverOptions.getSessionId()); } /** * Starting from the given sequence number, reads the next batch of active messages without changing the state of * the receiver or the message source. * * @param maxMessages The number of messages. * @param sequenceNumber The sequence number from where to start reading messages. * @param sessionId Session id of the messages to peek from. {@code null} if there is no session. * * @return An {@link IterableStream} of {@link ServiceBusReceivedMessage} peeked. * @throws IllegalArgumentException if {@code maxMessages} is not a positive integer. * @see <a href="https: */ Flux<ServiceBusReceivedMessage> peekMessagesAt(int maxMessages, long sequenceNumber, String sessionId) { if (isDisposed.get()) { return fluxError(logger, new IllegalStateException( String.format(INVALID_OPERATION_DISPOSED_RECEIVER, "peekBatchAt"))); } return connectionProcessor .flatMap(connection -> connection.getManagementNode(entityPath, entityType)) .flatMapMany(node -> node.peek(sequenceNumber, sessionId, getLinkName(sessionId), maxMessages)); } /** * Receives an <b>infinite</b> stream of {@link ServiceBusReceivedMessage messages} from the Service Bus entity. * This Flux continuously receives messages from a Service Bus entity until either: * * <ul> * <li>The receiver is closed.</li> * <li>The subscription to the Flux is disposed.</li> * <li>A terminal signal from a downstream subscriber is propagated upstream (ie. {@link Flux * {@link Flux * <li>An {@link AmqpException} occurs that causes the receive link to stop.</li> * </ul> * * @return An <b>infinite</b> stream of messages from the Service Bus entity. */ public Flux<ServiceBusReceivedMessageContext> receiveMessages() { final Flux<ServiceBusReceivedMessageContext> messageFlux = sessionManager != null ? sessionManager.receive() : getOrCreateConsumer().receive().map(ServiceBusReceivedMessageContext::new); final Flux<ServiceBusReceivedMessageContext> withAutoLockRenewal; if (receiverOptions.isAutoLockRenewEnabled()) { withAutoLockRenewal = new FluxAutoLockRenew(messageFlux, receiverOptions.getMaxLockRenewDuration(), renewalContainer, this::renewMessageLock); } else { withAutoLockRenewal = messageFlux; } final Flux<ServiceBusReceivedMessageContext> withAutoComplete; if (receiverOptions.isEnableAutoComplete()) { withAutoComplete = new FluxAutoComplete(withAutoLockRenewal, completionLock, context -> context.getMessage() != null ? complete(context.getMessage()) : Mono.empty(), context -> context.getMessage() != null ? abandon(context.getMessage()) : Mono.empty()); } else { withAutoComplete = withAutoLockRenewal; } return withAutoComplete .onErrorMap(throwable -> mapError(throwable, ServiceBusErrorSource.RECEIVE)); } /** * Receives a deferred {@link ServiceBusReceivedMessage message}. Deferred messages can only be received by using * sequence number. * * @param sequenceNumber The {@link ServiceBusReceivedMessage * message. * * @return A deferred message with the matching {@code sequenceNumber}. */ public Mono<ServiceBusReceivedMessage> receiveDeferredMessage(long sequenceNumber) { return receiveDeferredMessage(sequenceNumber, receiverOptions.getSessionId()); } /** * Receives a deferred {@link ServiceBusReceivedMessage message}. Deferred messages can only be received by using * sequence number. * * @param sequenceNumber The {@link ServiceBusReceivedMessage * message. * @param sessionId Session id of the deferred message. {@code null} if there is no session. * * @return A deferred message with the matching {@code sequenceNumber}. */ Mono<ServiceBusReceivedMessage> receiveDeferredMessage(long sequenceNumber, String sessionId) { return connectionProcessor .flatMap(connection -> connection.getManagementNode(entityPath, entityType)) .flatMap(node -> node.receiveDeferredMessages(receiverOptions.getReceiveMode(), sessionId, getLinkName(sessionId), Collections.singleton(sequenceNumber)).last()) .map(receivedMessage -> { if (CoreUtils.isNullOrEmpty(receivedMessage.getLockToken())) { return receivedMessage; } if (receiverOptions.getReceiveMode() == ReceiveMode.PEEK_LOCK) { receivedMessage.setLockedUntil(managementNodeLocks.addOrUpdate(receivedMessage.getLockToken(), receivedMessage.getLockedUntil(), receivedMessage.getLockedUntil())); } return receivedMessage; }); } /** * Receives a batch of deferred {@link ServiceBusReceivedMessage messages}. Deferred messages can only be received * by using sequence number. * * @param sequenceNumbers The sequence numbers of the deferred messages. * * @return A {@link Flux} of deferred {@link ServiceBusReceivedMessage messages}. */ public Flux<ServiceBusReceivedMessage> receiveDeferredMessages(Iterable<Long> sequenceNumbers) { return receiveDeferredMessages(sequenceNumbers, receiverOptions.getSessionId()); } /** * Receives a batch of deferred {@link ServiceBusReceivedMessage messages}. Deferred messages can only be received * by using sequence number. * * @param sequenceNumbers The sequence numbers of the deferred messages. * @param sessionId Session id of the deferred messages. {@code null} if there is no session. * * @return An {@link IterableStream} of deferred {@link ServiceBusReceivedMessage messages}. */ Flux<ServiceBusReceivedMessage> receiveDeferredMessages(Iterable<Long> sequenceNumbers, String sessionId) { if (isDisposed.get()) { return fluxError(logger, new IllegalStateException( String.format(INVALID_OPERATION_DISPOSED_RECEIVER, "receiveDeferredMessageBatch"))); } return connectionProcessor .flatMap(connection -> connection.getManagementNode(entityPath, entityType)) .flatMapMany(node -> node.receiveDeferredMessages(receiverOptions.getReceiveMode(), sessionId, getLinkName(sessionId), sequenceNumbers)) .map(receivedMessage -> { if (CoreUtils.isNullOrEmpty(receivedMessage.getLockToken())) { return receivedMessage; } if (receiverOptions.getReceiveMode() == ReceiveMode.PEEK_LOCK) { receivedMessage.setLockedUntil(managementNodeLocks.addOrUpdate(receivedMessage.getLockToken(), receivedMessage.getLockedUntil(), receivedMessage.getLockedUntil())); } return receivedMessage; }); } /** * Asynchronously renews the lock on the message. The lock will be renewed based on the setting specified on the * entity. When a message is received in {@link ReceiveMode * this receiver instance for a duration as specified during the entity creation (LockDuration). If processing of * the message requires longer than this duration, the lock needs to be renewed. For each renewal, the lock is reset * to the entity's LockDuration value. * * @param message The {@link ServiceBusReceivedMessage} to perform auto-lock renewal. * * @return The new expiration time for the message. * @throws NullPointerException if {@code message} or {@code message.getLockToken()} is null. * @throws UnsupportedOperationException if the receiver was opened in {@link ReceiveMode * mode. * @throws IllegalStateException if the receiver is a session receiver. * @throws IllegalArgumentException if {@code message.getLockToken()} is an empty value. */ public Mono<OffsetDateTime> renewMessageLock(ServiceBusReceivedMessage message) { if (isDisposed.get()) { return monoError(logger, new IllegalStateException( String.format(INVALID_OPERATION_DISPOSED_RECEIVER, "renewMessageLock"))); } else if (Objects.isNull(message)) { return monoError(logger, new NullPointerException("'message' cannot be null.")); } else if (Objects.isNull(message.getLockToken())) { return monoError(logger, new NullPointerException("'message.getLockToken()' cannot be null.")); } else if (message.getLockToken().isEmpty()) { return monoError(logger, new IllegalArgumentException("'message.getLockToken()' cannot be empty.")); } else if (receiverOptions.isSessionReceiver()) { return monoError(logger, new IllegalStateException( String.format("Cannot renew message lock [%s] for a session receiver.", message.getLockToken()))); } return renewMessageLock(message.getLockToken()) .onErrorMap(throwable -> mapError(throwable, ServiceBusErrorSource.RENEW_LOCK)); } /** * Asynchronously renews the lock on the message. The lock will be renewed based on the setting specified on the * entity. * * @param lockToken to be renewed. * * @return The new expiration time for the message. */ Mono<OffsetDateTime> renewMessageLock(String lockToken) { return connectionProcessor .flatMap(connection -> connection.getManagementNode(entityPath, entityType)) .flatMap(serviceBusManagementNode -> serviceBusManagementNode.renewMessageLock(lockToken, getLinkName(null))) .map(offsetDateTime -> managementNodeLocks.addOrUpdate(lockToken, offsetDateTime, offsetDateTime)); } /** * Starts the auto lock renewal for a {@link ServiceBusReceivedMessage message}. * * @param message The {@link ServiceBusReceivedMessage} to perform this operation. * @param maxLockRenewalDuration Maximum duration to keep renewing the lock token. * * @return A lock renewal operation for the message. * @throws NullPointerException if {@code message}, {@code message.getLockToken()} or {@code * maxLockRenewalDuration} is null. * @throws IllegalStateException if the receiver is a session receiver or the receiver is disposed. * @throws IllegalArgumentException if {@code message.getLockToken()} is an empty value. */ public Mono<Void> renewMessageLock(ServiceBusReceivedMessage message, Duration maxLockRenewalDuration) { if (isDisposed.get()) { return monoError(logger, new IllegalStateException( String.format(INVALID_OPERATION_DISPOSED_RECEIVER, "getAutoRenewMessageLock"))); } else if (Objects.isNull(message)) { return monoError(logger, new NullPointerException("'message' cannot be null.")); } else if (Objects.isNull(message.getLockToken())) { return monoError(logger, new NullPointerException("'message.getLockToken()' cannot be null.")); } else if (message.getLockToken().isEmpty()) { return monoError(logger, new IllegalArgumentException("'message.getLockToken()' cannot be empty.")); } else if (receiverOptions.isSessionReceiver()) { return monoError(logger, new IllegalStateException( String.format("Cannot renew message lock [%s] for a session receiver.", message.getLockToken()))); } else if (maxLockRenewalDuration == null) { return monoError(logger, new NullPointerException("'maxLockRenewalDuration' cannot be null.")); } else if (maxLockRenewalDuration.isNegative()) { return monoError(logger, new IllegalArgumentException("'maxLockRenewalDuration' cannot be negative.")); } final LockRenewalOperation operation = new LockRenewalOperation(message.getLockToken(), maxLockRenewalDuration, false, ignored -> renewMessageLock(message)); renewalContainer.addOrUpdate(message.getLockToken(), OffsetDateTime.now().plus(maxLockRenewalDuration), operation); return operation.getCompletionOperation() .onErrorMap(throwable -> mapError(throwable, ServiceBusErrorSource.RENEW_LOCK)); } /** * Renews the session lock if this receiver is a session receiver. * * @return The next expiration time for the session lock. * @throws IllegalStateException if the receiver is a non-session receiver. */ public Mono<OffsetDateTime> renewSessionLock() { return renewSessionLock(receiverOptions.getSessionId()); } /** * Starts the auto lock renewal for the session this receiver works for. * * @param maxLockRenewalDuration Maximum duration to keep renewing the session lock. * * @return A lock renewal operation for the message. * @throws NullPointerException if {@code sessionId} or {@code maxLockRenewalDuration} is null. * @throws IllegalArgumentException if {@code sessionId} is an empty string. * @throws IllegalStateException if the receiver is a non-session receiver or the receiver is disposed. */ public Mono<Void> renewSessionLock(Duration maxLockRenewalDuration) { return this.renewSessionLock(receiverOptions.getSessionId(), maxLockRenewalDuration); } /** * Sets the state of the session this receiver works for. * * @param sessionState State to set on the session. * * @return A Mono that completes when the session is set * @throws IllegalStateException if the receiver is a non-session receiver. */ public Mono<Void> setSessionState(byte[] sessionState) { return this.setSessionState(receiverOptions.getSessionId(), sessionState); } /** * Starts a new service side transaction. The {@link ServiceBusTransactionContext} should be passed to all * operations that needs to be in this transaction. * * <p><strong>Create a transaction</strong></p> * {@codesnippet com.azure.messaging.servicebus.servicebusasyncreceiverclient.createTransaction} * * @return The {@link Mono} that finishes this operation on service bus resource. */ public Mono<ServiceBusTransactionContext> createTransaction() { if (isDisposed.get()) { return monoError(logger, new IllegalStateException( String.format(INVALID_OPERATION_DISPOSED_RECEIVER, "createTransaction"))); } return connectionProcessor .flatMap(connection -> connection.createSession(TRANSACTION_LINK_NAME)) .flatMap(transactionSession -> transactionSession.createTransaction()) .map(transaction -> new ServiceBusTransactionContext(transaction.getTransactionId())); } /** * Commits the transaction given {@link ServiceBusTransactionContext}. This will make a call to Service Bus. * <p><strong>Commit a transaction</strong></p> * {@codesnippet com.azure.messaging.servicebus.servicebusasyncreceiverclient.commitTransaction} * * @param transactionContext to be committed. * * @return The {@link Mono} that finishes this operation on service bus resource. * @throws NullPointerException if {@code transactionContext} or {@code transactionContext.transactionId} is * null. */ public Mono<Void> commitTransaction(ServiceBusTransactionContext transactionContext) { if (isDisposed.get()) { return monoError(logger, new IllegalStateException( String.format(INVALID_OPERATION_DISPOSED_RECEIVER, "commitTransaction"))); } if (Objects.isNull(transactionContext)) { return monoError(logger, new NullPointerException("'transactionContext' cannot be null.")); } else if (Objects.isNull(transactionContext.getTransactionId())) { return monoError(logger, new NullPointerException("'transactionContext.transactionId' cannot be null.")); } return connectionProcessor .flatMap(connection -> connection.createSession(TRANSACTION_LINK_NAME)) .flatMap(transactionSession -> transactionSession.commitTransaction(new AmqpTransaction( transactionContext.getTransactionId()))); } /** * Rollbacks the transaction given {@link ServiceBusTransactionContext}. This will make a call to Service Bus. * <p><strong>Rollback a transaction</strong></p> * {@codesnippet com.azure.messaging.servicebus.servicebusasyncreceiverclient.rollbackTransaction} * * @param transactionContext to be rollbacked. * * @return The {@link Mono} that finishes this operation on service bus resource. * @throws NullPointerException if {@code transactionContext} or {@code transactionContext.transactionId} is * null. */ public Mono<Void> rollbackTransaction(ServiceBusTransactionContext transactionContext) { if (isDisposed.get()) { return monoError(logger, new IllegalStateException( String.format(INVALID_OPERATION_DISPOSED_RECEIVER, "rollbackTransaction"))); } if (Objects.isNull(transactionContext)) { return monoError(logger, new NullPointerException("'transactionContext' cannot be null.")); } else if (Objects.isNull(transactionContext.getTransactionId())) { return monoError(logger, new NullPointerException("'transactionContext.transactionId' cannot be null.")); } return connectionProcessor .flatMap(connection -> connection.createSession(TRANSACTION_LINK_NAME)) .flatMap(transactionSession -> transactionSession.rollbackTransaction(new AmqpTransaction( transactionContext.getTransactionId()))); } /** * Disposes of the consumer by closing the underlying connection to the service. */ @Override public void close() { if (isDisposed.getAndSet(true)) { return; } try { completionLock.acquire(); } catch (InterruptedException e) { logger.info("Unable to obtain completion lock.", e); } logger.info("Removing receiver links."); final ServiceBusAsyncConsumer disposed = consumer.getAndSet(null); if (disposed != null) { disposed.close(); } if (sessionManager != null) { sessionManager.close(); } onClientClose.run(); } /** * @return receiver options set by user; */ ReceiverOptions getReceiverOptions() { return receiverOptions; } /** * Gets whether or not the management node contains the message lock token and it has not expired. Lock tokens are * held by the management node when they are received from the management node or management operations are * performed using that {@code lockToken}. * * @param lockToken Lock token to check for. * * @return {@code true} if the management node contains the lock token and false otherwise. */ private boolean isManagementToken(String lockToken) { return managementNodeLocks.containsUnexpired(lockToken); } private Mono<Void> updateDisposition(ServiceBusReceivedMessage message, DispositionStatus dispositionStatus, String deadLetterReason, String deadLetterErrorDescription, Map<String, Object> propertiesToModify, ServiceBusTransactionContext transactionContext) { if (isDisposed.get()) { return monoError(logger, new IllegalStateException( String.format(INVALID_OPERATION_DISPOSED_RECEIVER, dispositionStatus.getValue()))); } else if (Objects.isNull(message)) { return monoError(logger, new NullPointerException("'message' cannot be null.")); } final String lockToken = message.getLockToken(); final String sessionId = message.getSessionId(); if (receiverOptions.getReceiveMode() != ReceiveMode.PEEK_LOCK) { return Mono.error(logger.logExceptionAsError(new UnsupportedOperationException(String.format( "'%s' is not supported on a receiver opened in ReceiveMode.RECEIVE_AND_DELETE.", dispositionStatus)))); } final String sessionIdToUse; if (sessionId == null && !CoreUtils.isNullOrEmpty(receiverOptions.getSessionId())) { sessionIdToUse = receiverOptions.getSessionId(); } else { sessionIdToUse = sessionId; } logger.info("{}: Update started. Disposition: {}. Lock: {}. SessionId: {}.", entityPath, dispositionStatus, lockToken, sessionIdToUse); final Mono<Void> performOnManagement = connectionProcessor .flatMap(connection -> connection.getManagementNode(entityPath, entityType)) .flatMap(node -> node.updateDisposition(lockToken, dispositionStatus, deadLetterReason, deadLetterErrorDescription, propertiesToModify, sessionId, getLinkName(sessionId), transactionContext)) .then(Mono.fromRunnable(() -> { logger.info("{}: Management node Update completed. Disposition: {}. Lock: {}.", entityPath, dispositionStatus, lockToken); managementNodeLocks.remove(lockToken); renewalContainer.remove(lockToken); })); Mono<Void> updateDispositionOperation; if (sessionManager != null) { updateDispositionOperation = sessionManager.updateDisposition(lockToken, sessionId, dispositionStatus, propertiesToModify, deadLetterReason, deadLetterErrorDescription, transactionContext) .flatMap(isSuccess -> { if (isSuccess) { renewalContainer.remove(lockToken); return Mono.empty(); } logger.info("Could not perform on session manger. Performing on management node."); return performOnManagement; }); } else { final ServiceBusAsyncConsumer existingConsumer = consumer.get(); if (isManagementToken(lockToken) || existingConsumer == null) { updateDispositionOperation = performOnManagement; } else { updateDispositionOperation = existingConsumer.updateDisposition(lockToken, dispositionStatus, deadLetterReason, deadLetterErrorDescription, propertiesToModify, transactionContext) .then(Mono.fromRunnable(() -> { logger.info("{}: Update completed. Disposition: {}. Lock: {}.", entityPath, dispositionStatus, lockToken); renewalContainer.remove(lockToken); })); } } return updateDispositionOperation .onErrorMap(throwable -> { if (receiverOptions.isEnableAutoComplete() && throwable instanceof AmqpException) { switch (dispositionStatus) { case COMPLETED: return new ServiceBusAmqpException((AmqpException) throwable, ServiceBusErrorSource.COMPLETE); case ABANDONED: return new ServiceBusAmqpException((AmqpException) throwable, ServiceBusErrorSource.ABANDONED); default: } } return throwable; }); } private ServiceBusAsyncConsumer getOrCreateConsumer() { final ServiceBusAsyncConsumer existing = consumer.get(); if (existing != null) { return existing; } final String linkName = StringUtil.getRandomString(entityPath); logger.info("{}: Creating consumer for link '{}'", entityPath, linkName); final Flux<ServiceBusReceiveLink> receiveLink = connectionProcessor.flatMap(connection -> { if (receiverOptions.isSessionReceiver()) { return connection.createReceiveLink(linkName, entityPath, receiverOptions.getReceiveMode(), null, entityType, receiverOptions.getSessionId()); } else { return connection.createReceiveLink(linkName, entityPath, receiverOptions.getReceiveMode(), null, entityType); } }) .doOnNext(next -> { final String format = "Created consumer for Service Bus resource: [{}] mode: [{}]" + " sessionEnabled? {} transferEntityPath: [{}], entityType: [{}]"; logger.verbose(format, next.getEntityPath(), receiverOptions.getReceiveMode(), CoreUtils.isNullOrEmpty(receiverOptions.getSessionId()), "N/A", entityType); }) .repeat(); final AmqpRetryPolicy retryPolicy = RetryUtil.getRetryPolicy(connectionProcessor.getRetryOptions()); final ServiceBusReceiveLinkProcessor linkMessageProcessor = receiveLink.subscribeWith( new ServiceBusReceiveLinkProcessor(receiverOptions.getPrefetchCount(), retryPolicy, receiverOptions.getReceiveMode())); final ServiceBusAsyncConsumer newConsumer = new ServiceBusAsyncConsumer(linkName, linkMessageProcessor, messageSerializer, receiverOptions); if (consumer.compareAndSet(null, newConsumer)) { return newConsumer; } else { newConsumer.close(); return consumer.get(); } } /** * If the receiver has not connected via {@link * through the management node. * * @return The name of the receive link, or null of it has not connected via a receive link. */ private String getLinkName(String sessionId) { if (sessionManager != null && !CoreUtils.isNullOrEmpty(sessionId)) { return sessionManager.getLinkName(sessionId); } else if (!CoreUtils.isNullOrEmpty(sessionId) && !receiverOptions.isSessionReceiver()) { return null; } else { final ServiceBusAsyncConsumer existing = consumer.get(); return existing != null ? existing.getLinkName() : null; } } Mono<OffsetDateTime> renewSessionLock(String sessionId) { if (isDisposed.get()) { return monoError(logger, new IllegalStateException( String.format(INVALID_OPERATION_DISPOSED_RECEIVER, "renewSessionLock"))); } else if (!receiverOptions.isSessionReceiver()) { return monoError(logger, new IllegalStateException("Cannot renew session lock on a non-session receiver.")); } final String linkName = sessionManager != null ? sessionManager.getLinkName(sessionId) : null; return connectionProcessor .flatMap(connection -> connection.getManagementNode(entityPath, entityType)) .flatMap(channel -> channel.renewSessionLock(sessionId, linkName)) .onErrorMap(throwable -> mapError(throwable, ServiceBusErrorSource.RENEW_LOCK)); } Mono<Void> renewSessionLock(String sessionId, Duration maxLockRenewalDuration) { if (isDisposed.get()) { return monoError(logger, new IllegalStateException( String.format(INVALID_OPERATION_DISPOSED_RECEIVER, "getAutoRenewSessionLock"))); } else if (!receiverOptions.isSessionReceiver()) { return monoError(logger, new IllegalStateException( "Cannot renew session lock on a non-session receiver.")); } else if (maxLockRenewalDuration == null) { return monoError(logger, new NullPointerException("'maxLockRenewalDuration' cannot be null.")); } else if (maxLockRenewalDuration.isNegative()) { return monoError(logger, new IllegalArgumentException( "'maxLockRenewalDuration' cannot be negative.")); } else if (Objects.isNull(sessionId)) { return monoError(logger, new NullPointerException("'sessionId' cannot be null.")); } else if (sessionId.isEmpty()) { return monoError(logger, new IllegalArgumentException("'sessionId' cannot be empty.")); } final LockRenewalOperation operation = new LockRenewalOperation(sessionId, maxLockRenewalDuration, true, this::renewSessionLock); renewalContainer.addOrUpdate(sessionId, OffsetDateTime.now().plus(maxLockRenewalDuration), operation); return operation.getCompletionOperation() .onErrorMap(throwable -> mapError(throwable, ServiceBusErrorSource.RENEW_LOCK)); } Mono<Void> setSessionState(String sessionId, byte[] sessionState) { if (isDisposed.get()) { return monoError(logger, new IllegalStateException( String.format(INVALID_OPERATION_DISPOSED_RECEIVER, "setSessionState"))); } else if (!receiverOptions.isSessionReceiver()) { return monoError(logger, new IllegalStateException("Cannot set session state on a non-session receiver.")); } final String linkName = sessionManager != null ? sessionManager.getLinkName(sessionId) : null; return connectionProcessor .flatMap(connection -> connection.getManagementNode(entityPath, entityType)) .flatMap(channel -> channel.setSessionState(sessionId, sessionState, linkName)); } Mono<byte[]> getSessionState(String sessionId) { if (isDisposed.get()) { return monoError(logger, new IllegalStateException( String.format(INVALID_OPERATION_DISPOSED_RECEIVER, "getSessionState"))); } else if (!receiverOptions.isSessionReceiver()) { return monoError(logger, new IllegalStateException("Cannot get session state on a non-session receiver.")); } if (sessionManager != null) { return sessionManager.getSessionState(sessionId); } else { return connectionProcessor .flatMap(connection -> connection.getManagementNode(entityPath, entityType)) .flatMap(channel -> channel.getSessionState(sessionId, getLinkName(sessionId))); } } /** * Map the error to {@link ServiceBusAmqpException} */ }
could you please also add tests for case when the input flux `cosmosItemOperationFlux` errors.
public void createItem_withBulk() { int totalRequest = getTotalRequest(); Flux<CosmosItemOperation> cosmosItemOperationFlux = Flux.merge( Flux.range(0, totalRequest).map(i -> { String partitionKey = UUID.randomUUID().toString(); TestDoc testDoc = this.populateTestDoc(partitionKey); return BulkProcessingUtil.getBulkCreateItemOperation(testDoc, new PartitionKey(partitionKey)); }), Flux.range(0, totalRequest).map(i -> { String partitionKey = UUID.randomUUID().toString(); EventDoc eventDoc = new EventDoc(UUID.randomUUID().toString(), 2, 4, "type1", partitionKey); return BulkProcessingUtil.getBulkCreateItemOperation(eventDoc, new PartitionKey(partitionKey)); })); BulkProcessingOptions<CosmosBulkTest> bulkProcessingOptions = new BulkProcessingOptions<>(); bulkProcessingOptions.setMaxMicroBatchSize(100); bulkProcessingOptions.setMaxMicroBatchConcurrency(5); Flux<CosmosBulkOperationRequestAndResponse<CosmosBulkTest>> responseFlux = bulkAsyncContainer .processBulkOperations(cosmosItemOperationFlux, bulkProcessingOptions); AtomicInteger processedDoc = new AtomicInteger(0); responseFlux .flatMap((CosmosBulkOperationRequestAndResponse<CosmosBulkTest> cosmosBulkOperationRequestAndResponse) -> { processedDoc.incrementAndGet(); CosmosBulkItemResponse cosmosBulkItemResponse = cosmosBulkOperationRequestAndResponse.getResponse(); Assertions.assertThat(cosmosBulkItemResponse.getStatusCode()).isEqualTo(HttpResponseStatus.CREATED.code()); Assertions.assertThat(cosmosBulkItemResponse.getRequestCharge()).isGreaterThan(0); Assertions.assertThat(cosmosBulkItemResponse.getCosmosDiagnostics().toString()).isNotNull(); Assertions.assertThat(cosmosBulkItemResponse.getSessionToken()).isNotNull(); Assertions.assertThat(cosmosBulkItemResponse.getActivityId()).isNotNull(); Assertions.assertThat(cosmosBulkItemResponse.getRequestCharge()).isNotNull(); return Mono.just(cosmosBulkItemResponse); }).blockLast(); Assertions.assertThat(processedDoc.get()).isEqualTo(totalRequest * 2); }
.processBulkOperations(cosmosItemOperationFlux, bulkProcessingOptions);
public void createItem_withBulk() { int totalRequest = getTotalRequest(); List<CosmosItemOperation> cosmosItemOperations = new ArrayList<>(); for (int i = 0; i < totalRequest; i++) { String partitionKey = UUID.randomUUID().toString(); TestDoc testDoc = this.populateTestDoc(partitionKey); cosmosItemOperations.add(BulkOperations.getCreateItemOperation(testDoc, new PartitionKey(partitionKey))); partitionKey = UUID.randomUUID().toString(); EventDoc eventDoc = new EventDoc(UUID.randomUUID().toString(), 2, 4, "type1", partitionKey); cosmosItemOperations.add(BulkOperations.getCreateItemOperation(eventDoc, new PartitionKey(partitionKey))); } BulkProcessingOptions<CosmosBulkAsyncTest> bulkProcessingOptions = new BulkProcessingOptions<>(); bulkProcessingOptions.setMaxMicroBatchSize(100); bulkProcessingOptions.setMaxMicroBatchConcurrency(5); List<CosmosBulkOperationResponse<CosmosBulkAsyncTest>> bulkResponse = bulkContainer .processBulkOperations(cosmosItemOperations, bulkProcessingOptions); assertThat(bulkResponse.size()).isEqualTo(totalRequest * 2); for (CosmosBulkOperationResponse<CosmosBulkAsyncTest> cosmosBulkOperationResponse : bulkResponse) { CosmosBulkItemResponse cosmosBulkItemResponse = cosmosBulkOperationResponse.getResponse(); assertThat(cosmosBulkItemResponse.getStatusCode()).isEqualTo(HttpResponseStatus.CREATED.code()); assertThat(cosmosBulkItemResponse.getRequestCharge()).isGreaterThan(0); assertThat(cosmosBulkItemResponse.getCosmosDiagnostics().toString()).isNotNull(); assertThat(cosmosBulkItemResponse.getSessionToken()).isNotNull(); assertThat(cosmosBulkItemResponse.getActivityId()).isNotNull(); assertThat(cosmosBulkItemResponse.getRequestCharge()).isNotNull(); } }
class CosmosBulkTest extends BatchTestBase { private CosmosAsyncClient bulkClient; private CosmosAsyncContainer bulkAsyncContainer; @Factory(dataProvider = "clientBuildersWithDirectSession") public CosmosBulkTest(CosmosClientBuilder clientBuilder) { super(clientBuilder); } @BeforeClass(groups = {"simple"}, timeOut = SETUP_TIMEOUT) public void before_CosmosBulkTest() { assertThat(this.bulkClient).isNull(); this.bulkClient = getClientBuilder().buildAsyncClient(); bulkAsyncContainer = getSharedMultiPartitionCosmosContainer(this.bulkClient); } @AfterClass(groups = {"simple"}, timeOut = SHUTDOWN_TIMEOUT, alwaysRun = true) public void afterClass() { safeCloseAsync(this.bulkClient); } @Test(groups = {"simple"}, timeOut = TIMEOUT) @Test(groups = {"simple"}, timeOut = TIMEOUT) public void upsertItem_withbulk() { int totalRequest = getTotalRequest(); List<CosmosItemOperation> cosmosItemOperations = new ArrayList<>(); for (int i = 0; i < totalRequest; i++) { String partitionKey = UUID.randomUUID().toString(); TestDoc testDoc = this.populateTestDoc(partitionKey, i, 20); cosmosItemOperations.add(BulkProcessingUtil.getBulkUpsertItemOperation(testDoc, new PartitionKey(partitionKey))); } BulkProcessingOptions<Object> bulkProcessingOptions = new BulkProcessingOptions<>(); bulkProcessingOptions.setMaxMicroBatchSize(100); bulkProcessingOptions.setMaxMicroBatchConcurrency(1); Flux<CosmosBulkOperationRequestAndResponse<Object>> responseFlux = bulkAsyncContainer .processBulkOperations(Flux.fromIterable(cosmosItemOperations)); AtomicInteger processedDoc = new AtomicInteger(0); responseFlux .flatMap((CosmosBulkOperationRequestAndResponse<Object> cosmosBulkOperationRequestAndResponse) -> { processedDoc.incrementAndGet(); CosmosBulkItemResponse cosmosBulkItemResponse = cosmosBulkOperationRequestAndResponse.getResponse(); Assertions.assertThat(cosmosBulkItemResponse.getStatusCode()).isEqualTo(HttpResponseStatus.CREATED.code()); Assertions.assertThat(cosmosBulkItemResponse.getRequestCharge()).isGreaterThan(0); Assertions.assertThat(cosmosBulkItemResponse.getCosmosDiagnostics().toString()).isNotNull(); Assertions.assertThat(cosmosBulkItemResponse.getSessionToken()).isNotNull(); Assertions.assertThat(cosmosBulkItemResponse.getActivityId()).isNotNull(); Assertions.assertThat(cosmosBulkItemResponse.getRequestCharge()).isNotNull(); TestDoc testDoc = cosmosBulkItemResponse.getItem(TestDoc.class); Assertions.assertThat(cosmosBulkOperationRequestAndResponse.getOperation()).isEqualTo(cosmosItemOperations.get(testDoc.getCost())); Assertions.assertThat(testDoc).isEqualTo(cosmosBulkOperationRequestAndResponse.getOperation().getItem()); Assertions.assertThat(testDoc).isEqualTo(cosmosItemOperations.get(testDoc.getCost()).getItem()); return Mono.just(cosmosBulkItemResponse); }).blockLast(); Assertions.assertThat(processedDoc.get()).isEqualTo(totalRequest); } @Test(groups = {"simple"}, timeOut = TIMEOUT) public void deleteItem_withBulk() { int totalRequest = getTotalRequest(); List<CosmosItemOperation> cosmosItemOperations = new ArrayList<>(); for (int i = 0; i < totalRequest; i++) { String partitionKey = UUID.randomUUID().toString(); TestDoc testDoc = this.populateTestDoc(partitionKey, i, 20); cosmosItemOperations.add(BulkProcessingUtil.getBulkCreateItemOperation(testDoc, new PartitionKey(partitionKey))); } createItemsAndVerify(cosmosItemOperations); Flux<CosmosItemOperation> deleteCosmosItemOperationFlux = Flux.fromIterable(cosmosItemOperations).map((CosmosItemOperation cosmosItemOperation) -> { TestDoc testDoc = cosmosItemOperation.getItem(); return BulkProcessingUtil.getBulkDeleteItemOperation(testDoc.getId(), cosmosItemOperation.getPartitionKeyValue()); }); BulkProcessingOptions<TestDoc> bulkProcessingOptions = new BulkProcessingOptions<>(); bulkProcessingOptions.setMaxMicroBatchSize(30); bulkProcessingOptions.setMaxMicroBatchConcurrency(1); AtomicInteger processedDoc = new AtomicInteger(0); bulkAsyncContainer .processBulkOperations(deleteCosmosItemOperationFlux, bulkProcessingOptions) .flatMap((CosmosBulkOperationRequestAndResponse<TestDoc> cosmosBulkOperationRequestAndResponse) -> { processedDoc.incrementAndGet(); CosmosBulkItemResponse cosmosBulkItemResponse = cosmosBulkOperationRequestAndResponse.getResponse(); Assertions.assertThat(cosmosBulkItemResponse.getStatusCode()).isEqualTo(HttpResponseStatus.NO_CONTENT.code()); Assertions.assertThat(cosmosBulkItemResponse.getRequestCharge()).isGreaterThan(0); Assertions.assertThat(cosmosBulkItemResponse.getCosmosDiagnostics().toString()).isNotNull(); Assertions.assertThat(cosmosBulkItemResponse.getSessionToken()).isNotNull(); Assertions.assertThat(cosmosBulkItemResponse.getActivityId()).isNotNull(); Assertions.assertThat(cosmosBulkItemResponse.getRequestCharge()).isNotNull(); return Mono.just(cosmosBulkItemResponse); }).blockLast(); Assertions.assertThat(processedDoc.get()).isEqualTo(totalRequest); } @Test(groups = {"simple"}, timeOut = TIMEOUT) public void readItem_withBulk() { int totalRequest = getTotalRequest(); List<CosmosItemOperation> cosmosItemOperations = new ArrayList<>(); for (int i = 0; i < totalRequest; i++) { String partitionKey = UUID.randomUUID().toString(); TestDoc testDoc = this.populateTestDoc(partitionKey, i, 20); cosmosItemOperations.add(BulkProcessingUtil.getBulkUpsertItemOperation(testDoc, new PartitionKey(partitionKey))); } createItemsAndVerify(cosmosItemOperations); Flux<CosmosItemOperation> readCosmosItemOperationFlux = Flux.fromIterable(cosmosItemOperations).map((CosmosItemOperation cosmosItemOperation) -> { TestDoc testDoc = cosmosItemOperation.getItem(); return BulkProcessingUtil.getBulkReadItemOperation(testDoc.getId(), cosmosItemOperation.getPartitionKeyValue()); }); BulkProcessingOptions<Object> bulkProcessingOptions = new BulkProcessingOptions<>(Object.class); bulkProcessingOptions.setMaxMicroBatchSize(30); bulkProcessingOptions.setMaxMicroBatchConcurrency(5); AtomicInteger processedDoc = new AtomicInteger(0); bulkAsyncContainer .processBulkOperations(readCosmosItemOperationFlux, bulkProcessingOptions) .flatMap((CosmosBulkOperationRequestAndResponse<Object> cosmosBulkOperationRequestAndResponse) -> { processedDoc.incrementAndGet(); CosmosBulkItemResponse cosmosBulkItemResponse = cosmosBulkOperationRequestAndResponse.getResponse(); Assertions.assertThat(cosmosBulkItemResponse.getStatusCode()).isEqualTo(HttpResponseStatus.OK.code()); Assertions.assertThat(cosmosBulkItemResponse.getRequestCharge()).isGreaterThan(0); Assertions.assertThat(cosmosBulkItemResponse.getCosmosDiagnostics().toString()).isNotNull(); Assertions.assertThat(cosmosBulkItemResponse.getSessionToken()).isNotNull(); Assertions.assertThat(cosmosBulkItemResponse.getActivityId()).isNotNull(); Assertions.assertThat(cosmosBulkItemResponse.getRequestCharge()).isNotNull(); TestDoc testDoc = cosmosBulkItemResponse.getItem(TestDoc.class); Assertions.assertThat(testDoc).isEqualTo(cosmosItemOperations.get(testDoc.getCost()).getItem()); return Mono.just(cosmosBulkItemResponse); }).blockLast(); Assertions.assertThat(processedDoc.get()).isEqualTo(totalRequest); } @Test(groups = {"simple"}, timeOut = TIMEOUT) public void readItemMultipleTimes_withBulk() { int totalRequest = getTotalRequest(); List<CosmosItemOperation> cosmosItemOperations = new ArrayList<>(); for (int i = 0; i < totalRequest; i++) { String partitionKey = UUID.randomUUID().toString(); TestDoc testDoc = this.populateTestDoc(partitionKey, i, 20); cosmosItemOperations.add(BulkProcessingUtil.getBulkUpsertItemOperation(testDoc, new PartitionKey(partitionKey))); } createItemsAndVerify(cosmosItemOperations); Flux<CosmosItemOperation> readCosmosItemOperationFlux = Flux.fromIterable(cosmosItemOperations).map((CosmosItemOperation cosmosItemOperation) -> { TestDoc testDoc = cosmosItemOperation.getItem(); return BulkProcessingUtil.getBulkReadItemOperation(testDoc.getId(), cosmosItemOperation.getPartitionKeyValue()); }); BulkProcessingOptions<Object> bulkProcessingOptions = new BulkProcessingOptions<>(Object.class); bulkProcessingOptions.setMaxMicroBatchSize(30); bulkProcessingOptions.setMaxMicroBatchConcurrency(5); HashSet<TestDoc> distinctDocs = new HashSet<>(); AtomicInteger processedDoc = new AtomicInteger(0); Flux<CosmosBulkOperationRequestAndResponse<Object>> readResponseFlux = bulkAsyncContainer .processBulkOperations(readCosmosItemOperationFlux, bulkProcessingOptions) .flatMap((CosmosBulkOperationRequestAndResponse<Object> cosmosBulkOperationRequestAndResponse) -> { processedDoc.incrementAndGet(); CosmosBulkItemResponse cosmosBulkItemResponse = cosmosBulkOperationRequestAndResponse.getResponse(); Assertions.assertThat(cosmosBulkItemResponse.getStatusCode()).isEqualTo(HttpResponseStatus.OK.code()); Assertions.assertThat(cosmosBulkItemResponse.getRequestCharge()).isGreaterThan(0); Assertions.assertThat(cosmosBulkItemResponse.getCosmosDiagnostics().toString()).isNotNull(); Assertions.assertThat(cosmosBulkItemResponse.getSessionToken()).isNotNull(); Assertions.assertThat(cosmosBulkItemResponse.getActivityId()).isNotNull(); Assertions.assertThat(cosmosBulkItemResponse.getRequestCharge()).isNotNull(); TestDoc testDoc = cosmosBulkItemResponse.getItem(TestDoc.class); distinctDocs.add(testDoc); Assertions.assertThat(testDoc).isEqualTo(cosmosItemOperations.get(testDoc.getCost()).getItem()); return Mono.just(cosmosBulkOperationRequestAndResponse); }); readResponseFlux .blockLast(); Assertions.assertThat(processedDoc.get()).isEqualTo(totalRequest); Assertions.assertThat(distinctDocs.size()).isEqualTo(totalRequest); readResponseFlux .blockLast(); Assertions.assertThat(processedDoc.get()).isEqualTo(2 * totalRequest); Assertions.assertThat(distinctDocs.size()).isEqualTo(totalRequest); } @Test(groups = {"simple"}, timeOut = TIMEOUT) public void replaceItem_withBulk() { int totalRequest = getTotalRequest(); List<CosmosItemOperation> cosmosItemOperations = new ArrayList<>(); for (int i = 0; i < totalRequest; i++) { String partitionKey = UUID.randomUUID().toString(); TestDoc testDoc = this.populateTestDoc(partitionKey, i, 20); cosmosItemOperations.add(BulkProcessingUtil.getBulkCreateItemOperation(testDoc, new PartitionKey(partitionKey))); } createItemsAndVerify(cosmosItemOperations); Flux<CosmosItemOperation> replaceCosmosItemOperationFlux = Flux.fromIterable(cosmosItemOperations).map((CosmosItemOperation cosmosItemOperation) -> { TestDoc testDoc = cosmosItemOperation.getItem(); return BulkProcessingUtil.getBulkReplaceItemOperation( testDoc.getId(), cosmosItemOperation.getItem(), cosmosItemOperation.getPartitionKeyValue()); }); AtomicInteger processedDoc = new AtomicInteger(0); bulkAsyncContainer .processBulkOperations(replaceCosmosItemOperationFlux) .flatMap((CosmosBulkOperationRequestAndResponse<?> cosmosBulkOperationRequestAndResponse) -> { processedDoc.incrementAndGet(); CosmosBulkItemResponse cosmosBulkItemResponse = cosmosBulkOperationRequestAndResponse.getResponse(); Assertions.assertThat(cosmosBulkItemResponse.getStatusCode()).isEqualTo(HttpResponseStatus.OK.code()); Assertions.assertThat(cosmosBulkItemResponse.getRequestCharge()).isGreaterThan(0); Assertions.assertThat(cosmosBulkItemResponse.getCosmosDiagnostics().toString()).isNotNull(); Assertions.assertThat(cosmosBulkItemResponse.getSessionToken()).isNotNull(); Assertions.assertThat(cosmosBulkItemResponse.getActivityId()).isNotNull(); Assertions.assertThat(cosmosBulkItemResponse.getRequestCharge()).isNotNull(); TestDoc testDoc = cosmosBulkItemResponse.getItem(TestDoc.class); Assertions.assertThat(testDoc).isEqualTo(cosmosBulkOperationRequestAndResponse.getOperation().getItem()); Assertions.assertThat(testDoc).isEqualTo(cosmosItemOperations.get(testDoc.getCost()).getItem()); return Mono.just(cosmosBulkItemResponse); }).blockLast(); Assertions.assertThat(processedDoc.get()).isEqualTo(totalRequest); } private void createItemsAndVerify(List<CosmosItemOperation> cosmosItemOperations) { BulkProcessingOptions<Object> bulkProcessingOptions = new BulkProcessingOptions<>(Object.class); bulkProcessingOptions.setMaxMicroBatchSize(100); bulkProcessingOptions.setMaxMicroBatchConcurrency(5); Flux<CosmosBulkOperationRequestAndResponse<Object>> createResonseFlux = bulkAsyncContainer .processBulkOperations(Flux.fromIterable(cosmosItemOperations), bulkProcessingOptions); HashSet<Integer> distinctIndex = new HashSet<>(); AtomicInteger processedDoc = new AtomicInteger(0); createResonseFlux .flatMap((CosmosBulkOperationRequestAndResponse<Object> cosmosBulkOperationRequestAndResponse) -> { processedDoc.incrementAndGet(); CosmosBulkItemResponse cosmosBulkItemResponse = cosmosBulkOperationRequestAndResponse.getResponse(); Assertions.assertThat(cosmosBulkItemResponse.getStatusCode()).isEqualTo(HttpResponseStatus.CREATED.code()); Assertions.assertThat(cosmosBulkItemResponse.getRequestCharge()).isGreaterThan(0); Assertions.assertThat(cosmosBulkItemResponse.getCosmosDiagnostics().toString()).isNotNull(); Assertions.assertThat(cosmosBulkItemResponse.getSessionToken()).isNotNull(); Assertions.assertThat(cosmosBulkItemResponse.getActivityId()).isNotNull(); Assertions.assertThat(cosmosBulkItemResponse.getRequestCharge()).isNotNull(); TestDoc testDoc = cosmosBulkItemResponse.getItem(TestDoc.class); distinctIndex.add(testDoc.getCost()); Assertions.assertThat(cosmosBulkOperationRequestAndResponse.getOperation()).isEqualTo(cosmosItemOperations.get(testDoc.getCost())); Assertions.assertThat(testDoc).isEqualTo(cosmosBulkOperationRequestAndResponse.getOperation().getItem()); Assertions.assertThat(testDoc).isEqualTo(cosmosItemOperations.get(testDoc.getCost()).getItem()); return Mono.just(cosmosBulkItemResponse); }).blockLast(); Assertions.assertThat(processedDoc.get()).isEqualTo(cosmosItemOperations.size()); Assertions.assertThat(distinctIndex.size()).isEqualTo(cosmosItemOperations.size()); } private int getTotalRequest() { int countRequest = new Random().nextInt(100) + 120; System.out.println("Total count of request for this test case: " + countRequest); return countRequest; } }
class CosmosBulkTest extends BatchTestBase { private final static Logger logger = LoggerFactory.getLogger(CosmosBulkAsyncTest.class); private CosmosClient bulkClient; private CosmosContainer bulkContainer; @Factory(dataProvider = "clientBuildersWithDirectSession") public CosmosBulkTest(CosmosClientBuilder clientBuilder) { super(clientBuilder); } @BeforeClass(groups = {"simple"}, timeOut = SETUP_TIMEOUT) public void before_CosmosBulkTest() { assertThat(this.bulkClient).isNull(); this.bulkClient = getClientBuilder().buildClient(); CosmosAsyncContainer asyncContainer = getSharedMultiPartitionCosmosContainer(this.bulkClient.asyncClient()); bulkContainer = bulkClient.getDatabase(asyncContainer.getDatabase().getId()).getContainer(asyncContainer.getId()); } @AfterClass(groups = {"simple"}, timeOut = SHUTDOWN_TIMEOUT, alwaysRun = true) public void afterClass() { assertThat(this.bulkClient).isNotNull(); this.bulkClient.close(); } @Test(groups = {"simple"}, timeOut = TIMEOUT) @Test(groups = {"simple"}, timeOut = TIMEOUT) public void upsertItem_withbulk() { int totalRequest = getTotalRequest(); List<CosmosItemOperation> cosmosItemOperations = new ArrayList<>(); for (int i = 0; i < totalRequest; i++) { String partitionKey = UUID.randomUUID().toString(); TestDoc testDoc = this.populateTestDoc(partitionKey, i, 20); cosmosItemOperations.add(BulkOperations.getUpsertItemOperation(testDoc, new PartitionKey(partitionKey))); } BulkProcessingOptions<Object> bulkProcessingOptions = new BulkProcessingOptions<>(); bulkProcessingOptions.setMaxMicroBatchSize(100); bulkProcessingOptions.setMaxMicroBatchConcurrency(1); List<CosmosBulkOperationResponse<Object>> bulkResponse = bulkContainer .processBulkOperations(cosmosItemOperations); assertThat(bulkResponse.size()).isEqualTo(totalRequest); for (CosmosBulkOperationResponse<Object> cosmosBulkOperationResponse : bulkResponse) { CosmosBulkItemResponse cosmosBulkItemResponse = cosmosBulkOperationResponse.getResponse(); assertThat(cosmosBulkItemResponse.getStatusCode()).isEqualTo(HttpResponseStatus.CREATED.code()); assertThat(cosmosBulkItemResponse.getRequestCharge()).isGreaterThan(0); assertThat(cosmosBulkItemResponse.getCosmosDiagnostics().toString()).isNotNull(); assertThat(cosmosBulkItemResponse.getSessionToken()).isNotNull(); assertThat(cosmosBulkItemResponse.getActivityId()).isNotNull(); assertThat(cosmosBulkItemResponse.getRequestCharge()).isNotNull(); TestDoc testDoc = cosmosBulkItemResponse.getItem(TestDoc.class); assertThat(cosmosBulkOperationResponse.getOperation()).isEqualTo(cosmosItemOperations.get(testDoc.getCost())); assertThat(testDoc).isEqualTo(cosmosBulkOperationResponse.getOperation().getItem()); assertThat(testDoc).isEqualTo(cosmosItemOperations.get(testDoc.getCost()).getItem()); } } @Test(groups = {"simple"}, timeOut = TIMEOUT) public void deleteItem_withBulk() { int totalRequest = getTotalRequest(); List<CosmosItemOperation> cosmosItemOperations = new ArrayList<>(); for (int i = 0; i < totalRequest; i++) { String partitionKey = UUID.randomUUID().toString(); TestDoc testDoc = this.populateTestDoc(partitionKey, i, 20); cosmosItemOperations.add(BulkOperations.getCreateItemOperation(testDoc, new PartitionKey(partitionKey))); } createItemsAndVerify(cosmosItemOperations); List<CosmosItemOperation> deleteCosmosItemOperation = new ArrayList<>(); for(CosmosItemOperation cosmosItemOperation : cosmosItemOperations) { TestDoc testDoc = cosmosItemOperation.getItem(); deleteCosmosItemOperation.add( BulkOperations.getDeleteItemOperation(testDoc.getId(), cosmosItemOperation.getPartitionKeyValue())); } BulkProcessingOptions<TestDoc> bulkProcessingOptions = new BulkProcessingOptions<>(); bulkProcessingOptions.setMaxMicroBatchSize(30); bulkProcessingOptions.setMaxMicroBatchConcurrency(1); List<CosmosBulkOperationResponse<TestDoc>> bulkResponse = bulkContainer .processBulkOperations(deleteCosmosItemOperation, bulkProcessingOptions); assertThat(bulkResponse.size()).isEqualTo(totalRequest); for (CosmosBulkOperationResponse<TestDoc> cosmosBulkOperationResponse : bulkResponse) { CosmosBulkItemResponse cosmosBulkItemResponse = cosmosBulkOperationResponse.getResponse(); assertThat(cosmosBulkItemResponse.getStatusCode()).isEqualTo(HttpResponseStatus.NO_CONTENT.code()); assertThat(cosmosBulkItemResponse.getRequestCharge()).isGreaterThan(0); assertThat(cosmosBulkItemResponse.getCosmosDiagnostics().toString()).isNotNull(); assertThat(cosmosBulkItemResponse.getSessionToken()).isNotNull(); assertThat(cosmosBulkItemResponse.getActivityId()).isNotNull(); assertThat(cosmosBulkItemResponse.getRequestCharge()).isNotNull(); } } @Test(groups = {"simple"}, timeOut = TIMEOUT) public void readItem_withBulk() { int totalRequest = getTotalRequest(); List<CosmosItemOperation> cosmosItemOperations = new ArrayList<>(); for (int i = 0; i < totalRequest; i++) { String partitionKey = UUID.randomUUID().toString(); TestDoc testDoc = this.populateTestDoc(partitionKey, i, 20); cosmosItemOperations.add(BulkOperations.getUpsertItemOperation(testDoc, new PartitionKey(partitionKey))); } createItemsAndVerify(cosmosItemOperations); List<CosmosItemOperation> readCosmosItemOperations = new ArrayList<>(); for(CosmosItemOperation cosmosItemOperation : cosmosItemOperations) { TestDoc testDoc = cosmosItemOperation.getItem(); readCosmosItemOperations.add( BulkOperations.getReadItemOperation(testDoc.getId(), cosmosItemOperation.getPartitionKeyValue())); } BulkProcessingOptions<Object> bulkProcessingOptions = new BulkProcessingOptions<>(Object.class); bulkProcessingOptions.setMaxMicroBatchSize(30); bulkProcessingOptions.setMaxMicroBatchConcurrency(5); List<CosmosBulkOperationResponse<Object>> bulkResponse = bulkContainer .processBulkOperations(readCosmosItemOperations, bulkProcessingOptions); assertThat(bulkResponse.size()).isEqualTo(totalRequest); for (CosmosBulkOperationResponse<Object> cosmosBulkOperationResponse : bulkResponse) { CosmosBulkItemResponse cosmosBulkItemResponse = cosmosBulkOperationResponse.getResponse(); assertThat(cosmosBulkItemResponse.getStatusCode()).isEqualTo(HttpResponseStatus.OK.code()); assertThat(cosmosBulkItemResponse.getRequestCharge()).isGreaterThan(0); assertThat(cosmosBulkItemResponse.getCosmosDiagnostics().toString()).isNotNull(); assertThat(cosmosBulkItemResponse.getActivityId()).isNotNull(); assertThat(cosmosBulkItemResponse.getRequestCharge()).isNotNull(); TestDoc testDoc = cosmosBulkItemResponse.getItem(TestDoc.class); assertThat(testDoc).isEqualTo(cosmosItemOperations.get(testDoc.getCost()).getItem()); } } @Test(groups = {"simple"}, timeOut = TIMEOUT) public void replaceItem_withBulk() { int totalRequest = getTotalRequest(); List<CosmosItemOperation> cosmosItemOperations = new ArrayList<>(); for (int i = 0; i < totalRequest; i++) { String partitionKey = UUID.randomUUID().toString(); TestDoc testDoc = this.populateTestDoc(partitionKey, i, 20); cosmosItemOperations.add(BulkOperations.getCreateItemOperation(testDoc, new PartitionKey(partitionKey))); } createItemsAndVerify(cosmosItemOperations); List<CosmosItemOperation> replaceCosmosItemOperations = new ArrayList<>(); for(CosmosItemOperation cosmosItemOperation : cosmosItemOperations) { TestDoc testDoc = cosmosItemOperation.getItem(); replaceCosmosItemOperations.add(BulkOperations.getReplaceItemOperation( testDoc.getId(), cosmosItemOperation.getItem(), cosmosItemOperation.getPartitionKeyValue())); } List<CosmosBulkOperationResponse<Object>> bulkResponse = bulkContainer .processBulkOperations(replaceCosmosItemOperations); assertThat(bulkResponse.size()).isEqualTo(totalRequest); for (CosmosBulkOperationResponse<Object> cosmosBulkOperationResponse : bulkResponse) { CosmosBulkItemResponse cosmosBulkItemResponse = cosmosBulkOperationResponse.getResponse(); assertThat(cosmosBulkItemResponse.getStatusCode()).isEqualTo(HttpResponseStatus.OK.code()); assertThat(cosmosBulkItemResponse.getRequestCharge()).isGreaterThan(0); assertThat(cosmosBulkItemResponse.getCosmosDiagnostics().toString()).isNotNull(); assertThat(cosmosBulkItemResponse.getSessionToken()).isNotNull(); assertThat(cosmosBulkItemResponse.getActivityId()).isNotNull(); assertThat(cosmosBulkItemResponse.getRequestCharge()).isNotNull(); TestDoc testDoc = cosmosBulkItemResponse.getItem(TestDoc.class); assertThat(testDoc).isEqualTo(cosmosBulkOperationResponse.getOperation().getItem()); assertThat(testDoc).isEqualTo(cosmosItemOperations.get(testDoc.getCost()).getItem()); } } private void createItemsAndVerify(List<CosmosItemOperation> cosmosItemOperations) { BulkProcessingOptions<Object> bulkProcessingOptions = new BulkProcessingOptions<>(Object.class); bulkProcessingOptions.setMaxMicroBatchSize(100); bulkProcessingOptions.setMaxMicroBatchConcurrency(5); List<CosmosBulkOperationResponse<Object>> bulkResponse = bulkContainer .processBulkOperations(cosmosItemOperations, bulkProcessingOptions); assertThat(bulkResponse.size()).isEqualTo(cosmosItemOperations.size()); HashSet<Integer> distinctIndex = new HashSet<>(); for (CosmosBulkOperationResponse<Object> cosmosBulkOperationResponse : bulkResponse) { CosmosBulkItemResponse cosmosBulkItemResponse = cosmosBulkOperationResponse.getResponse(); assertThat(cosmosBulkItemResponse.getStatusCode()).isEqualTo(HttpResponseStatus.CREATED.code()); assertThat(cosmosBulkItemResponse.getRequestCharge()).isGreaterThan(0); assertThat(cosmosBulkItemResponse.getCosmosDiagnostics().toString()).isNotNull(); assertThat(cosmosBulkItemResponse.getSessionToken()).isNotNull(); assertThat(cosmosBulkItemResponse.getActivityId()).isNotNull(); assertThat(cosmosBulkItemResponse.getRequestCharge()).isNotNull(); TestDoc testDoc = cosmosBulkItemResponse.getItem(TestDoc.class); distinctIndex.add(testDoc.getCost()); assertThat(cosmosBulkOperationResponse.getOperation()).isEqualTo(cosmosItemOperations.get(testDoc.getCost())); assertThat(testDoc).isEqualTo(cosmosBulkOperationResponse.getOperation().getItem()); assertThat(testDoc).isEqualTo(cosmosItemOperations.get(testDoc.getCost()).getItem()); } assertThat(distinctIndex.size()).isEqualTo(cosmosItemOperations.size()); } private int getTotalRequest() { int countRequest = new Random().nextInt(100) + 120; logger.info("Total count of request for this test case: " + countRequest); return countRequest; } @Test(groups = {"simple"}, timeOut = TIMEOUT) public void bulkETagTest() { this.createJsonTestDocs(bulkContainer); { BatchTestBase.TestDoc testDocToCreate = this.populateTestDoc(this.partitionKey1); BatchTestBase.TestDoc testDocToReplace = this.getTestDocCopy(this.TestDocPk1ExistingA); testDocToReplace.setCost(testDocToReplace.getCost() + 1); CosmosItemResponse<TestDoc> response = bulkContainer.readItem( this.TestDocPk1ExistingA.getId(), this.getPartitionKey(this.partitionKey1), TestDoc.class); assertThat(response.getStatusCode()).isEqualTo(HttpResponseStatus.OK.code()); BulkItemRequestOptions firstReplaceOptions = new BulkItemRequestOptions(); firstReplaceOptions.setIfMatchETag(response.getETag()); List<CosmosItemOperation> cosmosItemOperations = new ArrayList<>(); cosmosItemOperations.add(BulkOperations.getCreateItemOperation(testDocToCreate, new PartitionKey(this.partitionKey1))); cosmosItemOperations.add(BulkOperations.getReplaceItemOperation( testDocToReplace.getId(), testDocToReplace, new PartitionKey(this.partitionKey1), firstReplaceOptions)); List<CosmosBulkOperationResponse<Object>> bulkResponses = bulkContainer .processBulkOperations(cosmosItemOperations); assertThat(bulkResponses.size()).isEqualTo(cosmosItemOperations.size()); assertThat(bulkResponses.get(0).getResponse().getStatusCode()).isEqualTo(HttpResponseStatus.CREATED.code()); assertThat(bulkResponses.get(1).getResponse().getStatusCode()).isEqualTo(HttpResponseStatus.OK.code()); this.verifyByRead(bulkContainer, testDocToCreate, bulkResponses.get(0).getResponse().getETag()); this.verifyByRead(bulkContainer, testDocToReplace, bulkResponses.get(1).getResponse().getETag()); } { TestDoc testDocToReplace = this.getTestDocCopy(this.TestDocPk1ExistingB); testDocToReplace.setCost(testDocToReplace.getCost() + 1); BulkItemRequestOptions replaceOptions = new BulkItemRequestOptions(); replaceOptions.setIfMatchETag(String.valueOf(this.getRandom().nextInt())); List<CosmosItemOperation> cosmosItemOperations = new ArrayList<>(); cosmosItemOperations.add(BulkOperations.getReplaceItemOperation( testDocToReplace.getId(), testDocToReplace, new PartitionKey(this.partitionKey1), replaceOptions)); List<CosmosBulkOperationResponse<Object>> bulkResponses = bulkContainer .processBulkOperations(cosmosItemOperations); assertThat(bulkResponses.size()).isEqualTo(cosmosItemOperations.size()); assertThat(bulkResponses.get(0).getResponse().getStatusCode()).isEqualTo(HttpResponseStatus.PRECONDITION_FAILED.code()); this.verifyByRead(bulkContainer, this.TestDocPk1ExistingB); } { CosmosItemResponse<TestDoc> response = bulkContainer.readItem( this.TestDocPk1ExistingA.getId(), this.getPartitionKey(this.partitionKey1), TestDoc.class); assertThat(response.getStatusCode()).isEqualTo(HttpResponseStatus.OK.code()); BulkItemRequestOptions readOptions = new BulkItemRequestOptions(); readOptions.setIfMatchETag(response.getETag()); BatchTestBase.TestDoc testDocToCreate = this.populateTestDoc(this.partitionKey1); List<CosmosItemOperation> cosmosItemOperations = new ArrayList<>(); cosmosItemOperations.add(BulkOperations.getReadItemOperation( this.TestDocPk1ExistingA.getId(), this.getPartitionKey(this.partitionKey1), readOptions)); cosmosItemOperations.add(BulkOperations.getCreateItemOperation(testDocToCreate, new PartitionKey(this.partitionKey1))); List<CosmosBulkOperationResponse<Object>> bulkResponses = bulkContainer .processBulkOperations(cosmosItemOperations); assertThat(bulkResponses.size()).isEqualTo(cosmosItemOperations.size()); assertThat(bulkResponses.get(0).getResponse().getStatusCode()).isEqualTo(HttpResponseStatus.NOT_MODIFIED.code()); assertThat(bulkResponses.get(1).getResponse().getStatusCode()).isEqualTo(HttpResponseStatus.CREATED.code()); assertThat(bulkResponses.get(1).getResponse().getItem(TestDoc.class)).isEqualTo(testDocToCreate); this.verifyByRead(bulkContainer, testDocToCreate); } } @Test(groups = {"simple"}, timeOut = TIMEOUT) public void bulkWithInvalidCreateTest() { CosmosItemOperation operation = BulkOperations.getCreateItemOperation( this.populateTestDoc(UUID.randomUUID().toString()), new PartitionKey(this.partitionKey1)); this.runWithError( bulkContainer, operations -> operations.add(operation), HttpResponseStatus.BAD_REQUEST); } @Test(groups = {"simple"}, timeOut = TIMEOUT) public void bulkWithReadOfNonExistentEntityTest() { CosmosItemOperation operation = BulkOperations.getReadItemOperation( UUID.randomUUID().toString(), new PartitionKey(this.partitionKey1)); this.runWithError( bulkContainer, operations -> operations.add(operation), HttpResponseStatus.NOT_FOUND); } @Test(groups = {"simple"}, timeOut = TIMEOUT) public void bulkWithReplaceOfStaleEntity() { this.createJsonTestDocs(bulkContainer); TestDoc staleTestDocToReplace = this.getTestDocCopy(this.TestDocPk1ExistingA); staleTestDocToReplace.setCost(staleTestDocToReplace.getCost() + 1); BulkItemRequestOptions staleReplaceOptions = new BulkItemRequestOptions(); staleReplaceOptions.setIfMatchETag(UUID.randomUUID().toString()); CosmosItemOperation operation = BulkOperations.getReplaceItemOperation( staleTestDocToReplace.getId(), staleTestDocToReplace, new PartitionKey(this.partitionKey1), staleReplaceOptions); this.runWithError( bulkContainer, operations -> operations.add(operation), HttpResponseStatus.PRECONDITION_FAILED); this.verifyByRead(bulkContainer, this.TestDocPk1ExistingA); } @Test(groups = {"simple"}, timeOut = TIMEOUT) public void bulkWithDeleteOfNonExistentEntity() { CosmosItemOperation operation = BulkOperations.getDeleteItemOperation( UUID.randomUUID().toString(), new PartitionKey(this.partitionKey1)); this.runWithError( bulkContainer, operations -> operations.add(operation), HttpResponseStatus.NOT_FOUND); } @Test(groups = {"simple"}, timeOut = TIMEOUT) public void bulkWithCreateConflict() { this.createJsonTestDocs(bulkContainer); TestDoc conflictingTestDocToCreate = this.getTestDocCopy(this.TestDocPk1ExistingA); conflictingTestDocToCreate.setCost(conflictingTestDocToCreate.getCost()); CosmosItemOperation operation = BulkOperations.getCreateItemOperation( conflictingTestDocToCreate, new PartitionKey(this.partitionKey1)); this.runWithError( bulkContainer, operations -> operations.add(operation), HttpResponseStatus.CONFLICT); this.verifyByRead(bulkContainer, this.TestDocPk1ExistingA); } private void runWithError( CosmosContainer container, Function<List<CosmosItemOperation>, Boolean> appendOperation, HttpResponseStatus expectedFailedOperationStatusCode) { TestDoc testDocToCreate = this.populateTestDoc(this.partitionKey1); TestDoc anotherTestDocToCreate = this.populateTestDoc(this.partitionKey1); List<CosmosItemOperation> operations = new ArrayList<>(); operations.add(BulkOperations.getCreateItemOperation(testDocToCreate, new PartitionKey(this.partitionKey1))); appendOperation.apply(operations); operations.add(BulkOperations.getCreateItemOperation(anotherTestDocToCreate, new PartitionKey(this.partitionKey1))); List<CosmosBulkOperationResponse<Object>> bulkResponses = bulkContainer.processBulkOperations(operations); assertThat(bulkResponses.size()).isEqualTo(operations.size()); assertThat(bulkResponses.get(0).getResponse().getStatusCode()).isEqualTo(HttpResponseStatus.CREATED.code()); assertThat(bulkResponses.get(1).getResponse().getStatusCode()).isEqualTo(expectedFailedOperationStatusCode.code()); assertThat(bulkResponses.get(2).getResponse().getStatusCode()).isEqualTo(HttpResponseStatus.CREATED.code()); this.verifyByRead(container, testDocToCreate); this.verifyByRead(container, anotherTestDocToCreate); } @Test(groups = {"simple"}, timeOut = TIMEOUT) public void bulkSessionTokenTest() { this.createJsonTestDocs(bulkContainer); CosmosItemResponse<TestDoc> readResponse = bulkContainer.readItem( this.TestDocPk1ExistingC.getId(), this.getPartitionKey(this.partitionKey1), TestDoc.class); assertThat(readResponse.getStatusCode()).isEqualTo(HttpResponseStatus.OK.code()); ISessionToken sessionToken = this.getSessionToken(readResponse.getSessionToken()); TestDoc testDocToCreate = this.populateTestDoc(this.partitionKey1); TestDoc testDocToReplace = this.getTestDocCopy(this.TestDocPk1ExistingA); testDocToReplace.setCost(testDocToReplace.getCost() + 1); TestDoc testDocToUpsert = this.populateTestDoc(this.partitionKey1); List<CosmosItemOperation> operations = new ArrayList<>(); operations.add( BulkOperations.getCreateItemOperation(testDocToCreate, new PartitionKey(this.partitionKey1))); operations.add( BulkOperations.getReplaceItemOperation(testDocToReplace.getId(), testDocToReplace, new PartitionKey(this.partitionKey1))); operations.add( BulkOperations.getUpsertItemOperation(testDocToUpsert, new PartitionKey(this.partitionKey1))); operations.add( BulkOperations.getDeleteItemOperation(this.TestDocPk1ExistingC.getId(), new PartitionKey(this.partitionKey1))); List<CosmosBulkOperationResponse<Object>> bulkResponses = bulkContainer.processBulkOperations(operations); assertThat(bulkResponses.size()).isEqualTo(operations.size()); assertThat(bulkResponses.get(0).getResponse().getStatusCode()).isEqualTo(HttpResponseStatus.CREATED.code()); assertThat(this.getSessionToken((bulkResponses.get(0).getResponse().getSessionToken())).getLSN()) .isGreaterThan(sessionToken.getLSN()); assertThat(bulkResponses.get(1).getResponse().getStatusCode()).isEqualTo(HttpResponseStatus.OK.code()); assertThat(this.getSessionToken((bulkResponses.get(1).getResponse().getSessionToken())).getLSN()) .isGreaterThan(sessionToken.getLSN()); assertThat(bulkResponses.get(2).getResponse().getStatusCode()).isEqualTo(HttpResponseStatus.CREATED.code()); assertThat(this.getSessionToken((bulkResponses.get(2).getResponse().getSessionToken())).getLSN()) .isGreaterThan(sessionToken.getLSN()); assertThat(bulkResponses.get(3).getResponse().getStatusCode()).isEqualTo(HttpResponseStatus.NO_CONTENT.code()); assertThat(this.getSessionToken((bulkResponses.get(2).getResponse().getSessionToken())).getLSN()) .isGreaterThan(sessionToken.getLSN()); } @Test(groups = {"simple"}, timeOut = TIMEOUT) public void bulkContentResponseOnWriteTest() { this.createJsonTestDocs(bulkContainer); TestDoc testDocToCreate = this.populateTestDoc(this.partitionKey1); TestDoc testDocToReplace = this.getTestDocCopy(this.TestDocPk1ExistingA); testDocToReplace.setCost(testDocToReplace.getCost() + 1); TestDoc testDocToUpsert = this.populateTestDoc(this.partitionKey1); BulkItemRequestOptions contentResponseDisableRequestOption = new BulkItemRequestOptions() .setContentResponseOnWriteEnabled(false); List<CosmosItemOperation> operations = new ArrayList<>(); operations.add( BulkOperations.getCreateItemOperation(testDocToCreate, new PartitionKey(this.partitionKey1))); operations.add( BulkOperations.getReplaceItemOperation( testDocToReplace.getId(), testDocToReplace, new PartitionKey(this.partitionKey1), contentResponseDisableRequestOption)); operations.add( BulkOperations.getUpsertItemOperation( testDocToUpsert, new PartitionKey(this.partitionKey1), contentResponseDisableRequestOption)); operations.add( BulkOperations.getDeleteItemOperation(this.TestDocPk1ExistingC.getId(), new PartitionKey(this.partitionKey1))); operations.add(BulkOperations.getReadItemOperation( this.TestDocPk1ExistingD.getId(), new PartitionKey(this.partitionKey1), contentResponseDisableRequestOption)); operations.add(BulkOperations.getReadItemOperation(this.TestDocPk1ExistingB.getId(), new PartitionKey(this.partitionKey1))); List<CosmosBulkOperationResponse<Object>> bulkResponses = bulkContainer.processBulkOperations(operations); assertThat(bulkResponses.size()).isEqualTo(operations.size()); assertThat(bulkResponses.get(0).getResponse().getStatusCode()).isEqualTo(HttpResponseStatus.CREATED.code()); assertThat(bulkResponses.get(0).getResponse().getItem(TestDoc.class)).isEqualTo(testDocToCreate); assertThat(bulkResponses.get(1).getResponse().getStatusCode()).isEqualTo(HttpResponseStatus.OK.code()); assertThat(bulkResponses.get(1).getResponse().getItem(TestDoc.class)).isNull(); assertThat(bulkResponses.get(2).getResponse().getStatusCode()).isEqualTo(HttpResponseStatus.CREATED.code()); assertThat(bulkResponses.get(2).getResponse().getItem(TestDoc.class)).isNull(); assertThat(bulkResponses.get(3).getResponse().getStatusCode()).isEqualTo(HttpResponseStatus.NO_CONTENT.code()); assertThat(bulkResponses.get(3).getResponse().getItem(TestDoc.class)).isNull(); assertThat(bulkResponses.get(4).getResponse().getStatusCode()).isEqualTo(HttpResponseStatus.OK.code()); assertThat(bulkResponses.get(4).getResponse().getItem(TestDoc.class)).isEqualTo(this.TestDocPk1ExistingD); assertThat(bulkResponses.get(5).getResponse().getStatusCode()).isEqualTo(HttpResponseStatus.OK.code()); assertThat(bulkResponses.get(5).getResponse().getItem(TestDoc.class)).isEqualTo(this.TestDocPk1ExistingB); } }
200 >= instead of <=
public boolean isSuccessStatusCode() { return 200 <= this.statusCode && this.statusCode <= 299; }
return 200 <= this.statusCode && this.statusCode <= 299;
public boolean isSuccessStatusCode() { return this.statusCode >= 200 && this.statusCode <= 299; }
class type for which deserialization is needed. * * @return item associated with the current result. */ public <T> T getItem(final Class<T> type) { T item = null; if (this.getResourceObject() != null) { item = new JsonSerializable(this.getResourceObject()).toObject(type); } return item; }
class type for which deserialization is needed. * * @return item associated with the current result. */ @Beta(Beta.SinceVersion.V4_9_0) public <T> T getItem(final Class<T> type) { T item = null; if (this.getResourceObject() != null) { item = new JsonSerializable(this.getResourceObject()).toObject(type); } return item; }
Have added one Mo and will make it better. Right now we are eating up the errors passed, and I cannot find another way. I wanted to have a response for it too, with the operation/response null and user passed exception in CosmosBulkOperationResponse. Is ignoring the error signal a valid way?
public void createItem_withBulk() { int totalRequest = getTotalRequest(); Flux<CosmosItemOperation> cosmosItemOperationFlux = Flux.merge( Flux.range(0, totalRequest).map(i -> { String partitionKey = UUID.randomUUID().toString(); TestDoc testDoc = this.populateTestDoc(partitionKey); return BulkProcessingUtil.getBulkCreateItemOperation(testDoc, new PartitionKey(partitionKey)); }), Flux.range(0, totalRequest).map(i -> { String partitionKey = UUID.randomUUID().toString(); EventDoc eventDoc = new EventDoc(UUID.randomUUID().toString(), 2, 4, "type1", partitionKey); return BulkProcessingUtil.getBulkCreateItemOperation(eventDoc, new PartitionKey(partitionKey)); })); BulkProcessingOptions<CosmosBulkTest> bulkProcessingOptions = new BulkProcessingOptions<>(); bulkProcessingOptions.setMaxMicroBatchSize(100); bulkProcessingOptions.setMaxMicroBatchConcurrency(5); Flux<CosmosBulkOperationRequestAndResponse<CosmosBulkTest>> responseFlux = bulkAsyncContainer .processBulkOperations(cosmosItemOperationFlux, bulkProcessingOptions); AtomicInteger processedDoc = new AtomicInteger(0); responseFlux .flatMap((CosmosBulkOperationRequestAndResponse<CosmosBulkTest> cosmosBulkOperationRequestAndResponse) -> { processedDoc.incrementAndGet(); CosmosBulkItemResponse cosmosBulkItemResponse = cosmosBulkOperationRequestAndResponse.getResponse(); Assertions.assertThat(cosmosBulkItemResponse.getStatusCode()).isEqualTo(HttpResponseStatus.CREATED.code()); Assertions.assertThat(cosmosBulkItemResponse.getRequestCharge()).isGreaterThan(0); Assertions.assertThat(cosmosBulkItemResponse.getCosmosDiagnostics().toString()).isNotNull(); Assertions.assertThat(cosmosBulkItemResponse.getSessionToken()).isNotNull(); Assertions.assertThat(cosmosBulkItemResponse.getActivityId()).isNotNull(); Assertions.assertThat(cosmosBulkItemResponse.getRequestCharge()).isNotNull(); return Mono.just(cosmosBulkItemResponse); }).blockLast(); Assertions.assertThat(processedDoc.get()).isEqualTo(totalRequest * 2); }
.processBulkOperations(cosmosItemOperationFlux, bulkProcessingOptions);
public void createItem_withBulk() { int totalRequest = getTotalRequest(); List<CosmosItemOperation> cosmosItemOperations = new ArrayList<>(); for (int i = 0; i < totalRequest; i++) { String partitionKey = UUID.randomUUID().toString(); TestDoc testDoc = this.populateTestDoc(partitionKey); cosmosItemOperations.add(BulkOperations.getCreateItemOperation(testDoc, new PartitionKey(partitionKey))); partitionKey = UUID.randomUUID().toString(); EventDoc eventDoc = new EventDoc(UUID.randomUUID().toString(), 2, 4, "type1", partitionKey); cosmosItemOperations.add(BulkOperations.getCreateItemOperation(eventDoc, new PartitionKey(partitionKey))); } BulkProcessingOptions<CosmosBulkAsyncTest> bulkProcessingOptions = new BulkProcessingOptions<>(); bulkProcessingOptions.setMaxMicroBatchSize(100); bulkProcessingOptions.setMaxMicroBatchConcurrency(5); List<CosmosBulkOperationResponse<CosmosBulkAsyncTest>> bulkResponse = bulkContainer .processBulkOperations(cosmosItemOperations, bulkProcessingOptions); assertThat(bulkResponse.size()).isEqualTo(totalRequest * 2); for (CosmosBulkOperationResponse<CosmosBulkAsyncTest> cosmosBulkOperationResponse : bulkResponse) { CosmosBulkItemResponse cosmosBulkItemResponse = cosmosBulkOperationResponse.getResponse(); assertThat(cosmosBulkItemResponse.getStatusCode()).isEqualTo(HttpResponseStatus.CREATED.code()); assertThat(cosmosBulkItemResponse.getRequestCharge()).isGreaterThan(0); assertThat(cosmosBulkItemResponse.getCosmosDiagnostics().toString()).isNotNull(); assertThat(cosmosBulkItemResponse.getSessionToken()).isNotNull(); assertThat(cosmosBulkItemResponse.getActivityId()).isNotNull(); assertThat(cosmosBulkItemResponse.getRequestCharge()).isNotNull(); } }
class CosmosBulkTest extends BatchTestBase { private CosmosAsyncClient bulkClient; private CosmosAsyncContainer bulkAsyncContainer; @Factory(dataProvider = "clientBuildersWithDirectSession") public CosmosBulkTest(CosmosClientBuilder clientBuilder) { super(clientBuilder); } @BeforeClass(groups = {"simple"}, timeOut = SETUP_TIMEOUT) public void before_CosmosBulkTest() { assertThat(this.bulkClient).isNull(); this.bulkClient = getClientBuilder().buildAsyncClient(); bulkAsyncContainer = getSharedMultiPartitionCosmosContainer(this.bulkClient); } @AfterClass(groups = {"simple"}, timeOut = SHUTDOWN_TIMEOUT, alwaysRun = true) public void afterClass() { safeCloseAsync(this.bulkClient); } @Test(groups = {"simple"}, timeOut = TIMEOUT) @Test(groups = {"simple"}, timeOut = TIMEOUT) public void upsertItem_withbulk() { int totalRequest = getTotalRequest(); List<CosmosItemOperation> cosmosItemOperations = new ArrayList<>(); for (int i = 0; i < totalRequest; i++) { String partitionKey = UUID.randomUUID().toString(); TestDoc testDoc = this.populateTestDoc(partitionKey, i, 20); cosmosItemOperations.add(BulkProcessingUtil.getBulkUpsertItemOperation(testDoc, new PartitionKey(partitionKey))); } BulkProcessingOptions<Object> bulkProcessingOptions = new BulkProcessingOptions<>(); bulkProcessingOptions.setMaxMicroBatchSize(100); bulkProcessingOptions.setMaxMicroBatchConcurrency(1); Flux<CosmosBulkOperationRequestAndResponse<Object>> responseFlux = bulkAsyncContainer .processBulkOperations(Flux.fromIterable(cosmosItemOperations)); AtomicInteger processedDoc = new AtomicInteger(0); responseFlux .flatMap((CosmosBulkOperationRequestAndResponse<Object> cosmosBulkOperationRequestAndResponse) -> { processedDoc.incrementAndGet(); CosmosBulkItemResponse cosmosBulkItemResponse = cosmosBulkOperationRequestAndResponse.getResponse(); Assertions.assertThat(cosmosBulkItemResponse.getStatusCode()).isEqualTo(HttpResponseStatus.CREATED.code()); Assertions.assertThat(cosmosBulkItemResponse.getRequestCharge()).isGreaterThan(0); Assertions.assertThat(cosmosBulkItemResponse.getCosmosDiagnostics().toString()).isNotNull(); Assertions.assertThat(cosmosBulkItemResponse.getSessionToken()).isNotNull(); Assertions.assertThat(cosmosBulkItemResponse.getActivityId()).isNotNull(); Assertions.assertThat(cosmosBulkItemResponse.getRequestCharge()).isNotNull(); TestDoc testDoc = cosmosBulkItemResponse.getItem(TestDoc.class); Assertions.assertThat(cosmosBulkOperationRequestAndResponse.getOperation()).isEqualTo(cosmosItemOperations.get(testDoc.getCost())); Assertions.assertThat(testDoc).isEqualTo(cosmosBulkOperationRequestAndResponse.getOperation().getItem()); Assertions.assertThat(testDoc).isEqualTo(cosmosItemOperations.get(testDoc.getCost()).getItem()); return Mono.just(cosmosBulkItemResponse); }).blockLast(); Assertions.assertThat(processedDoc.get()).isEqualTo(totalRequest); } @Test(groups = {"simple"}, timeOut = TIMEOUT) public void deleteItem_withBulk() { int totalRequest = getTotalRequest(); List<CosmosItemOperation> cosmosItemOperations = new ArrayList<>(); for (int i = 0; i < totalRequest; i++) { String partitionKey = UUID.randomUUID().toString(); TestDoc testDoc = this.populateTestDoc(partitionKey, i, 20); cosmosItemOperations.add(BulkProcessingUtil.getBulkCreateItemOperation(testDoc, new PartitionKey(partitionKey))); } createItemsAndVerify(cosmosItemOperations); Flux<CosmosItemOperation> deleteCosmosItemOperationFlux = Flux.fromIterable(cosmosItemOperations).map((CosmosItemOperation cosmosItemOperation) -> { TestDoc testDoc = cosmosItemOperation.getItem(); return BulkProcessingUtil.getBulkDeleteItemOperation(testDoc.getId(), cosmosItemOperation.getPartitionKeyValue()); }); BulkProcessingOptions<TestDoc> bulkProcessingOptions = new BulkProcessingOptions<>(); bulkProcessingOptions.setMaxMicroBatchSize(30); bulkProcessingOptions.setMaxMicroBatchConcurrency(1); AtomicInteger processedDoc = new AtomicInteger(0); bulkAsyncContainer .processBulkOperations(deleteCosmosItemOperationFlux, bulkProcessingOptions) .flatMap((CosmosBulkOperationRequestAndResponse<TestDoc> cosmosBulkOperationRequestAndResponse) -> { processedDoc.incrementAndGet(); CosmosBulkItemResponse cosmosBulkItemResponse = cosmosBulkOperationRequestAndResponse.getResponse(); Assertions.assertThat(cosmosBulkItemResponse.getStatusCode()).isEqualTo(HttpResponseStatus.NO_CONTENT.code()); Assertions.assertThat(cosmosBulkItemResponse.getRequestCharge()).isGreaterThan(0); Assertions.assertThat(cosmosBulkItemResponse.getCosmosDiagnostics().toString()).isNotNull(); Assertions.assertThat(cosmosBulkItemResponse.getSessionToken()).isNotNull(); Assertions.assertThat(cosmosBulkItemResponse.getActivityId()).isNotNull(); Assertions.assertThat(cosmosBulkItemResponse.getRequestCharge()).isNotNull(); return Mono.just(cosmosBulkItemResponse); }).blockLast(); Assertions.assertThat(processedDoc.get()).isEqualTo(totalRequest); } @Test(groups = {"simple"}, timeOut = TIMEOUT) public void readItem_withBulk() { int totalRequest = getTotalRequest(); List<CosmosItemOperation> cosmosItemOperations = new ArrayList<>(); for (int i = 0; i < totalRequest; i++) { String partitionKey = UUID.randomUUID().toString(); TestDoc testDoc = this.populateTestDoc(partitionKey, i, 20); cosmosItemOperations.add(BulkProcessingUtil.getBulkUpsertItemOperation(testDoc, new PartitionKey(partitionKey))); } createItemsAndVerify(cosmosItemOperations); Flux<CosmosItemOperation> readCosmosItemOperationFlux = Flux.fromIterable(cosmosItemOperations).map((CosmosItemOperation cosmosItemOperation) -> { TestDoc testDoc = cosmosItemOperation.getItem(); return BulkProcessingUtil.getBulkReadItemOperation(testDoc.getId(), cosmosItemOperation.getPartitionKeyValue()); }); BulkProcessingOptions<Object> bulkProcessingOptions = new BulkProcessingOptions<>(Object.class); bulkProcessingOptions.setMaxMicroBatchSize(30); bulkProcessingOptions.setMaxMicroBatchConcurrency(5); AtomicInteger processedDoc = new AtomicInteger(0); bulkAsyncContainer .processBulkOperations(readCosmosItemOperationFlux, bulkProcessingOptions) .flatMap((CosmosBulkOperationRequestAndResponse<Object> cosmosBulkOperationRequestAndResponse) -> { processedDoc.incrementAndGet(); CosmosBulkItemResponse cosmosBulkItemResponse = cosmosBulkOperationRequestAndResponse.getResponse(); Assertions.assertThat(cosmosBulkItemResponse.getStatusCode()).isEqualTo(HttpResponseStatus.OK.code()); Assertions.assertThat(cosmosBulkItemResponse.getRequestCharge()).isGreaterThan(0); Assertions.assertThat(cosmosBulkItemResponse.getCosmosDiagnostics().toString()).isNotNull(); Assertions.assertThat(cosmosBulkItemResponse.getSessionToken()).isNotNull(); Assertions.assertThat(cosmosBulkItemResponse.getActivityId()).isNotNull(); Assertions.assertThat(cosmosBulkItemResponse.getRequestCharge()).isNotNull(); TestDoc testDoc = cosmosBulkItemResponse.getItem(TestDoc.class); Assertions.assertThat(testDoc).isEqualTo(cosmosItemOperations.get(testDoc.getCost()).getItem()); return Mono.just(cosmosBulkItemResponse); }).blockLast(); Assertions.assertThat(processedDoc.get()).isEqualTo(totalRequest); } @Test(groups = {"simple"}, timeOut = TIMEOUT) public void readItemMultipleTimes_withBulk() { int totalRequest = getTotalRequest(); List<CosmosItemOperation> cosmosItemOperations = new ArrayList<>(); for (int i = 0; i < totalRequest; i++) { String partitionKey = UUID.randomUUID().toString(); TestDoc testDoc = this.populateTestDoc(partitionKey, i, 20); cosmosItemOperations.add(BulkProcessingUtil.getBulkUpsertItemOperation(testDoc, new PartitionKey(partitionKey))); } createItemsAndVerify(cosmosItemOperations); Flux<CosmosItemOperation> readCosmosItemOperationFlux = Flux.fromIterable(cosmosItemOperations).map((CosmosItemOperation cosmosItemOperation) -> { TestDoc testDoc = cosmosItemOperation.getItem(); return BulkProcessingUtil.getBulkReadItemOperation(testDoc.getId(), cosmosItemOperation.getPartitionKeyValue()); }); BulkProcessingOptions<Object> bulkProcessingOptions = new BulkProcessingOptions<>(Object.class); bulkProcessingOptions.setMaxMicroBatchSize(30); bulkProcessingOptions.setMaxMicroBatchConcurrency(5); HashSet<TestDoc> distinctDocs = new HashSet<>(); AtomicInteger processedDoc = new AtomicInteger(0); Flux<CosmosBulkOperationRequestAndResponse<Object>> readResponseFlux = bulkAsyncContainer .processBulkOperations(readCosmosItemOperationFlux, bulkProcessingOptions) .flatMap((CosmosBulkOperationRequestAndResponse<Object> cosmosBulkOperationRequestAndResponse) -> { processedDoc.incrementAndGet(); CosmosBulkItemResponse cosmosBulkItemResponse = cosmosBulkOperationRequestAndResponse.getResponse(); Assertions.assertThat(cosmosBulkItemResponse.getStatusCode()).isEqualTo(HttpResponseStatus.OK.code()); Assertions.assertThat(cosmosBulkItemResponse.getRequestCharge()).isGreaterThan(0); Assertions.assertThat(cosmosBulkItemResponse.getCosmosDiagnostics().toString()).isNotNull(); Assertions.assertThat(cosmosBulkItemResponse.getSessionToken()).isNotNull(); Assertions.assertThat(cosmosBulkItemResponse.getActivityId()).isNotNull(); Assertions.assertThat(cosmosBulkItemResponse.getRequestCharge()).isNotNull(); TestDoc testDoc = cosmosBulkItemResponse.getItem(TestDoc.class); distinctDocs.add(testDoc); Assertions.assertThat(testDoc).isEqualTo(cosmosItemOperations.get(testDoc.getCost()).getItem()); return Mono.just(cosmosBulkOperationRequestAndResponse); }); readResponseFlux .blockLast(); Assertions.assertThat(processedDoc.get()).isEqualTo(totalRequest); Assertions.assertThat(distinctDocs.size()).isEqualTo(totalRequest); readResponseFlux .blockLast(); Assertions.assertThat(processedDoc.get()).isEqualTo(2 * totalRequest); Assertions.assertThat(distinctDocs.size()).isEqualTo(totalRequest); } @Test(groups = {"simple"}, timeOut = TIMEOUT) public void replaceItem_withBulk() { int totalRequest = getTotalRequest(); List<CosmosItemOperation> cosmosItemOperations = new ArrayList<>(); for (int i = 0; i < totalRequest; i++) { String partitionKey = UUID.randomUUID().toString(); TestDoc testDoc = this.populateTestDoc(partitionKey, i, 20); cosmosItemOperations.add(BulkProcessingUtil.getBulkCreateItemOperation(testDoc, new PartitionKey(partitionKey))); } createItemsAndVerify(cosmosItemOperations); Flux<CosmosItemOperation> replaceCosmosItemOperationFlux = Flux.fromIterable(cosmosItemOperations).map((CosmosItemOperation cosmosItemOperation) -> { TestDoc testDoc = cosmosItemOperation.getItem(); return BulkProcessingUtil.getBulkReplaceItemOperation( testDoc.getId(), cosmosItemOperation.getItem(), cosmosItemOperation.getPartitionKeyValue()); }); AtomicInteger processedDoc = new AtomicInteger(0); bulkAsyncContainer .processBulkOperations(replaceCosmosItemOperationFlux) .flatMap((CosmosBulkOperationRequestAndResponse<?> cosmosBulkOperationRequestAndResponse) -> { processedDoc.incrementAndGet(); CosmosBulkItemResponse cosmosBulkItemResponse = cosmosBulkOperationRequestAndResponse.getResponse(); Assertions.assertThat(cosmosBulkItemResponse.getStatusCode()).isEqualTo(HttpResponseStatus.OK.code()); Assertions.assertThat(cosmosBulkItemResponse.getRequestCharge()).isGreaterThan(0); Assertions.assertThat(cosmosBulkItemResponse.getCosmosDiagnostics().toString()).isNotNull(); Assertions.assertThat(cosmosBulkItemResponse.getSessionToken()).isNotNull(); Assertions.assertThat(cosmosBulkItemResponse.getActivityId()).isNotNull(); Assertions.assertThat(cosmosBulkItemResponse.getRequestCharge()).isNotNull(); TestDoc testDoc = cosmosBulkItemResponse.getItem(TestDoc.class); Assertions.assertThat(testDoc).isEqualTo(cosmosBulkOperationRequestAndResponse.getOperation().getItem()); Assertions.assertThat(testDoc).isEqualTo(cosmosItemOperations.get(testDoc.getCost()).getItem()); return Mono.just(cosmosBulkItemResponse); }).blockLast(); Assertions.assertThat(processedDoc.get()).isEqualTo(totalRequest); } private void createItemsAndVerify(List<CosmosItemOperation> cosmosItemOperations) { BulkProcessingOptions<Object> bulkProcessingOptions = new BulkProcessingOptions<>(Object.class); bulkProcessingOptions.setMaxMicroBatchSize(100); bulkProcessingOptions.setMaxMicroBatchConcurrency(5); Flux<CosmosBulkOperationRequestAndResponse<Object>> createResonseFlux = bulkAsyncContainer .processBulkOperations(Flux.fromIterable(cosmosItemOperations), bulkProcessingOptions); HashSet<Integer> distinctIndex = new HashSet<>(); AtomicInteger processedDoc = new AtomicInteger(0); createResonseFlux .flatMap((CosmosBulkOperationRequestAndResponse<Object> cosmosBulkOperationRequestAndResponse) -> { processedDoc.incrementAndGet(); CosmosBulkItemResponse cosmosBulkItemResponse = cosmosBulkOperationRequestAndResponse.getResponse(); Assertions.assertThat(cosmosBulkItemResponse.getStatusCode()).isEqualTo(HttpResponseStatus.CREATED.code()); Assertions.assertThat(cosmosBulkItemResponse.getRequestCharge()).isGreaterThan(0); Assertions.assertThat(cosmosBulkItemResponse.getCosmosDiagnostics().toString()).isNotNull(); Assertions.assertThat(cosmosBulkItemResponse.getSessionToken()).isNotNull(); Assertions.assertThat(cosmosBulkItemResponse.getActivityId()).isNotNull(); Assertions.assertThat(cosmosBulkItemResponse.getRequestCharge()).isNotNull(); TestDoc testDoc = cosmosBulkItemResponse.getItem(TestDoc.class); distinctIndex.add(testDoc.getCost()); Assertions.assertThat(cosmosBulkOperationRequestAndResponse.getOperation()).isEqualTo(cosmosItemOperations.get(testDoc.getCost())); Assertions.assertThat(testDoc).isEqualTo(cosmosBulkOperationRequestAndResponse.getOperation().getItem()); Assertions.assertThat(testDoc).isEqualTo(cosmosItemOperations.get(testDoc.getCost()).getItem()); return Mono.just(cosmosBulkItemResponse); }).blockLast(); Assertions.assertThat(processedDoc.get()).isEqualTo(cosmosItemOperations.size()); Assertions.assertThat(distinctIndex.size()).isEqualTo(cosmosItemOperations.size()); } private int getTotalRequest() { int countRequest = new Random().nextInt(100) + 120; System.out.println("Total count of request for this test case: " + countRequest); return countRequest; } }
class CosmosBulkTest extends BatchTestBase { private final static Logger logger = LoggerFactory.getLogger(CosmosBulkAsyncTest.class); private CosmosClient bulkClient; private CosmosContainer bulkContainer; @Factory(dataProvider = "clientBuildersWithDirectSession") public CosmosBulkTest(CosmosClientBuilder clientBuilder) { super(clientBuilder); } @BeforeClass(groups = {"simple"}, timeOut = SETUP_TIMEOUT) public void before_CosmosBulkTest() { assertThat(this.bulkClient).isNull(); this.bulkClient = getClientBuilder().buildClient(); CosmosAsyncContainer asyncContainer = getSharedMultiPartitionCosmosContainer(this.bulkClient.asyncClient()); bulkContainer = bulkClient.getDatabase(asyncContainer.getDatabase().getId()).getContainer(asyncContainer.getId()); } @AfterClass(groups = {"simple"}, timeOut = SHUTDOWN_TIMEOUT, alwaysRun = true) public void afterClass() { assertThat(this.bulkClient).isNotNull(); this.bulkClient.close(); } @Test(groups = {"simple"}, timeOut = TIMEOUT) @Test(groups = {"simple"}, timeOut = TIMEOUT) public void upsertItem_withbulk() { int totalRequest = getTotalRequest(); List<CosmosItemOperation> cosmosItemOperations = new ArrayList<>(); for (int i = 0; i < totalRequest; i++) { String partitionKey = UUID.randomUUID().toString(); TestDoc testDoc = this.populateTestDoc(partitionKey, i, 20); cosmosItemOperations.add(BulkOperations.getUpsertItemOperation(testDoc, new PartitionKey(partitionKey))); } BulkProcessingOptions<Object> bulkProcessingOptions = new BulkProcessingOptions<>(); bulkProcessingOptions.setMaxMicroBatchSize(100); bulkProcessingOptions.setMaxMicroBatchConcurrency(1); List<CosmosBulkOperationResponse<Object>> bulkResponse = bulkContainer .processBulkOperations(cosmosItemOperations); assertThat(bulkResponse.size()).isEqualTo(totalRequest); for (CosmosBulkOperationResponse<Object> cosmosBulkOperationResponse : bulkResponse) { CosmosBulkItemResponse cosmosBulkItemResponse = cosmosBulkOperationResponse.getResponse(); assertThat(cosmosBulkItemResponse.getStatusCode()).isEqualTo(HttpResponseStatus.CREATED.code()); assertThat(cosmosBulkItemResponse.getRequestCharge()).isGreaterThan(0); assertThat(cosmosBulkItemResponse.getCosmosDiagnostics().toString()).isNotNull(); assertThat(cosmosBulkItemResponse.getSessionToken()).isNotNull(); assertThat(cosmosBulkItemResponse.getActivityId()).isNotNull(); assertThat(cosmosBulkItemResponse.getRequestCharge()).isNotNull(); TestDoc testDoc = cosmosBulkItemResponse.getItem(TestDoc.class); assertThat(cosmosBulkOperationResponse.getOperation()).isEqualTo(cosmosItemOperations.get(testDoc.getCost())); assertThat(testDoc).isEqualTo(cosmosBulkOperationResponse.getOperation().getItem()); assertThat(testDoc).isEqualTo(cosmosItemOperations.get(testDoc.getCost()).getItem()); } } @Test(groups = {"simple"}, timeOut = TIMEOUT) public void deleteItem_withBulk() { int totalRequest = getTotalRequest(); List<CosmosItemOperation> cosmosItemOperations = new ArrayList<>(); for (int i = 0; i < totalRequest; i++) { String partitionKey = UUID.randomUUID().toString(); TestDoc testDoc = this.populateTestDoc(partitionKey, i, 20); cosmosItemOperations.add(BulkOperations.getCreateItemOperation(testDoc, new PartitionKey(partitionKey))); } createItemsAndVerify(cosmosItemOperations); List<CosmosItemOperation> deleteCosmosItemOperation = new ArrayList<>(); for(CosmosItemOperation cosmosItemOperation : cosmosItemOperations) { TestDoc testDoc = cosmosItemOperation.getItem(); deleteCosmosItemOperation.add( BulkOperations.getDeleteItemOperation(testDoc.getId(), cosmosItemOperation.getPartitionKeyValue())); } BulkProcessingOptions<TestDoc> bulkProcessingOptions = new BulkProcessingOptions<>(); bulkProcessingOptions.setMaxMicroBatchSize(30); bulkProcessingOptions.setMaxMicroBatchConcurrency(1); List<CosmosBulkOperationResponse<TestDoc>> bulkResponse = bulkContainer .processBulkOperations(deleteCosmosItemOperation, bulkProcessingOptions); assertThat(bulkResponse.size()).isEqualTo(totalRequest); for (CosmosBulkOperationResponse<TestDoc> cosmosBulkOperationResponse : bulkResponse) { CosmosBulkItemResponse cosmosBulkItemResponse = cosmosBulkOperationResponse.getResponse(); assertThat(cosmosBulkItemResponse.getStatusCode()).isEqualTo(HttpResponseStatus.NO_CONTENT.code()); assertThat(cosmosBulkItemResponse.getRequestCharge()).isGreaterThan(0); assertThat(cosmosBulkItemResponse.getCosmosDiagnostics().toString()).isNotNull(); assertThat(cosmosBulkItemResponse.getSessionToken()).isNotNull(); assertThat(cosmosBulkItemResponse.getActivityId()).isNotNull(); assertThat(cosmosBulkItemResponse.getRequestCharge()).isNotNull(); } } @Test(groups = {"simple"}, timeOut = TIMEOUT) public void readItem_withBulk() { int totalRequest = getTotalRequest(); List<CosmosItemOperation> cosmosItemOperations = new ArrayList<>(); for (int i = 0; i < totalRequest; i++) { String partitionKey = UUID.randomUUID().toString(); TestDoc testDoc = this.populateTestDoc(partitionKey, i, 20); cosmosItemOperations.add(BulkOperations.getUpsertItemOperation(testDoc, new PartitionKey(partitionKey))); } createItemsAndVerify(cosmosItemOperations); List<CosmosItemOperation> readCosmosItemOperations = new ArrayList<>(); for(CosmosItemOperation cosmosItemOperation : cosmosItemOperations) { TestDoc testDoc = cosmosItemOperation.getItem(); readCosmosItemOperations.add( BulkOperations.getReadItemOperation(testDoc.getId(), cosmosItemOperation.getPartitionKeyValue())); } BulkProcessingOptions<Object> bulkProcessingOptions = new BulkProcessingOptions<>(Object.class); bulkProcessingOptions.setMaxMicroBatchSize(30); bulkProcessingOptions.setMaxMicroBatchConcurrency(5); List<CosmosBulkOperationResponse<Object>> bulkResponse = bulkContainer .processBulkOperations(readCosmosItemOperations, bulkProcessingOptions); assertThat(bulkResponse.size()).isEqualTo(totalRequest); for (CosmosBulkOperationResponse<Object> cosmosBulkOperationResponse : bulkResponse) { CosmosBulkItemResponse cosmosBulkItemResponse = cosmosBulkOperationResponse.getResponse(); assertThat(cosmosBulkItemResponse.getStatusCode()).isEqualTo(HttpResponseStatus.OK.code()); assertThat(cosmosBulkItemResponse.getRequestCharge()).isGreaterThan(0); assertThat(cosmosBulkItemResponse.getCosmosDiagnostics().toString()).isNotNull(); assertThat(cosmosBulkItemResponse.getActivityId()).isNotNull(); assertThat(cosmosBulkItemResponse.getRequestCharge()).isNotNull(); TestDoc testDoc = cosmosBulkItemResponse.getItem(TestDoc.class); assertThat(testDoc).isEqualTo(cosmosItemOperations.get(testDoc.getCost()).getItem()); } } @Test(groups = {"simple"}, timeOut = TIMEOUT) public void replaceItem_withBulk() { int totalRequest = getTotalRequest(); List<CosmosItemOperation> cosmosItemOperations = new ArrayList<>(); for (int i = 0; i < totalRequest; i++) { String partitionKey = UUID.randomUUID().toString(); TestDoc testDoc = this.populateTestDoc(partitionKey, i, 20); cosmosItemOperations.add(BulkOperations.getCreateItemOperation(testDoc, new PartitionKey(partitionKey))); } createItemsAndVerify(cosmosItemOperations); List<CosmosItemOperation> replaceCosmosItemOperations = new ArrayList<>(); for(CosmosItemOperation cosmosItemOperation : cosmosItemOperations) { TestDoc testDoc = cosmosItemOperation.getItem(); replaceCosmosItemOperations.add(BulkOperations.getReplaceItemOperation( testDoc.getId(), cosmosItemOperation.getItem(), cosmosItemOperation.getPartitionKeyValue())); } List<CosmosBulkOperationResponse<Object>> bulkResponse = bulkContainer .processBulkOperations(replaceCosmosItemOperations); assertThat(bulkResponse.size()).isEqualTo(totalRequest); for (CosmosBulkOperationResponse<Object> cosmosBulkOperationResponse : bulkResponse) { CosmosBulkItemResponse cosmosBulkItemResponse = cosmosBulkOperationResponse.getResponse(); assertThat(cosmosBulkItemResponse.getStatusCode()).isEqualTo(HttpResponseStatus.OK.code()); assertThat(cosmosBulkItemResponse.getRequestCharge()).isGreaterThan(0); assertThat(cosmosBulkItemResponse.getCosmosDiagnostics().toString()).isNotNull(); assertThat(cosmosBulkItemResponse.getSessionToken()).isNotNull(); assertThat(cosmosBulkItemResponse.getActivityId()).isNotNull(); assertThat(cosmosBulkItemResponse.getRequestCharge()).isNotNull(); TestDoc testDoc = cosmosBulkItemResponse.getItem(TestDoc.class); assertThat(testDoc).isEqualTo(cosmosBulkOperationResponse.getOperation().getItem()); assertThat(testDoc).isEqualTo(cosmosItemOperations.get(testDoc.getCost()).getItem()); } } private void createItemsAndVerify(List<CosmosItemOperation> cosmosItemOperations) { BulkProcessingOptions<Object> bulkProcessingOptions = new BulkProcessingOptions<>(Object.class); bulkProcessingOptions.setMaxMicroBatchSize(100); bulkProcessingOptions.setMaxMicroBatchConcurrency(5); List<CosmosBulkOperationResponse<Object>> bulkResponse = bulkContainer .processBulkOperations(cosmosItemOperations, bulkProcessingOptions); assertThat(bulkResponse.size()).isEqualTo(cosmosItemOperations.size()); HashSet<Integer> distinctIndex = new HashSet<>(); for (CosmosBulkOperationResponse<Object> cosmosBulkOperationResponse : bulkResponse) { CosmosBulkItemResponse cosmosBulkItemResponse = cosmosBulkOperationResponse.getResponse(); assertThat(cosmosBulkItemResponse.getStatusCode()).isEqualTo(HttpResponseStatus.CREATED.code()); assertThat(cosmosBulkItemResponse.getRequestCharge()).isGreaterThan(0); assertThat(cosmosBulkItemResponse.getCosmosDiagnostics().toString()).isNotNull(); assertThat(cosmosBulkItemResponse.getSessionToken()).isNotNull(); assertThat(cosmosBulkItemResponse.getActivityId()).isNotNull(); assertThat(cosmosBulkItemResponse.getRequestCharge()).isNotNull(); TestDoc testDoc = cosmosBulkItemResponse.getItem(TestDoc.class); distinctIndex.add(testDoc.getCost()); assertThat(cosmosBulkOperationResponse.getOperation()).isEqualTo(cosmosItemOperations.get(testDoc.getCost())); assertThat(testDoc).isEqualTo(cosmosBulkOperationResponse.getOperation().getItem()); assertThat(testDoc).isEqualTo(cosmosItemOperations.get(testDoc.getCost()).getItem()); } assertThat(distinctIndex.size()).isEqualTo(cosmosItemOperations.size()); } private int getTotalRequest() { int countRequest = new Random().nextInt(100) + 120; logger.info("Total count of request for this test case: " + countRequest); return countRequest; } @Test(groups = {"simple"}, timeOut = TIMEOUT) public void bulkETagTest() { this.createJsonTestDocs(bulkContainer); { BatchTestBase.TestDoc testDocToCreate = this.populateTestDoc(this.partitionKey1); BatchTestBase.TestDoc testDocToReplace = this.getTestDocCopy(this.TestDocPk1ExistingA); testDocToReplace.setCost(testDocToReplace.getCost() + 1); CosmosItemResponse<TestDoc> response = bulkContainer.readItem( this.TestDocPk1ExistingA.getId(), this.getPartitionKey(this.partitionKey1), TestDoc.class); assertThat(response.getStatusCode()).isEqualTo(HttpResponseStatus.OK.code()); BulkItemRequestOptions firstReplaceOptions = new BulkItemRequestOptions(); firstReplaceOptions.setIfMatchETag(response.getETag()); List<CosmosItemOperation> cosmosItemOperations = new ArrayList<>(); cosmosItemOperations.add(BulkOperations.getCreateItemOperation(testDocToCreate, new PartitionKey(this.partitionKey1))); cosmosItemOperations.add(BulkOperations.getReplaceItemOperation( testDocToReplace.getId(), testDocToReplace, new PartitionKey(this.partitionKey1), firstReplaceOptions)); List<CosmosBulkOperationResponse<Object>> bulkResponses = bulkContainer .processBulkOperations(cosmosItemOperations); assertThat(bulkResponses.size()).isEqualTo(cosmosItemOperations.size()); assertThat(bulkResponses.get(0).getResponse().getStatusCode()).isEqualTo(HttpResponseStatus.CREATED.code()); assertThat(bulkResponses.get(1).getResponse().getStatusCode()).isEqualTo(HttpResponseStatus.OK.code()); this.verifyByRead(bulkContainer, testDocToCreate, bulkResponses.get(0).getResponse().getETag()); this.verifyByRead(bulkContainer, testDocToReplace, bulkResponses.get(1).getResponse().getETag()); } { TestDoc testDocToReplace = this.getTestDocCopy(this.TestDocPk1ExistingB); testDocToReplace.setCost(testDocToReplace.getCost() + 1); BulkItemRequestOptions replaceOptions = new BulkItemRequestOptions(); replaceOptions.setIfMatchETag(String.valueOf(this.getRandom().nextInt())); List<CosmosItemOperation> cosmosItemOperations = new ArrayList<>(); cosmosItemOperations.add(BulkOperations.getReplaceItemOperation( testDocToReplace.getId(), testDocToReplace, new PartitionKey(this.partitionKey1), replaceOptions)); List<CosmosBulkOperationResponse<Object>> bulkResponses = bulkContainer .processBulkOperations(cosmosItemOperations); assertThat(bulkResponses.size()).isEqualTo(cosmosItemOperations.size()); assertThat(bulkResponses.get(0).getResponse().getStatusCode()).isEqualTo(HttpResponseStatus.PRECONDITION_FAILED.code()); this.verifyByRead(bulkContainer, this.TestDocPk1ExistingB); } { CosmosItemResponse<TestDoc> response = bulkContainer.readItem( this.TestDocPk1ExistingA.getId(), this.getPartitionKey(this.partitionKey1), TestDoc.class); assertThat(response.getStatusCode()).isEqualTo(HttpResponseStatus.OK.code()); BulkItemRequestOptions readOptions = new BulkItemRequestOptions(); readOptions.setIfMatchETag(response.getETag()); BatchTestBase.TestDoc testDocToCreate = this.populateTestDoc(this.partitionKey1); List<CosmosItemOperation> cosmosItemOperations = new ArrayList<>(); cosmosItemOperations.add(BulkOperations.getReadItemOperation( this.TestDocPk1ExistingA.getId(), this.getPartitionKey(this.partitionKey1), readOptions)); cosmosItemOperations.add(BulkOperations.getCreateItemOperation(testDocToCreate, new PartitionKey(this.partitionKey1))); List<CosmosBulkOperationResponse<Object>> bulkResponses = bulkContainer .processBulkOperations(cosmosItemOperations); assertThat(bulkResponses.size()).isEqualTo(cosmosItemOperations.size()); assertThat(bulkResponses.get(0).getResponse().getStatusCode()).isEqualTo(HttpResponseStatus.NOT_MODIFIED.code()); assertThat(bulkResponses.get(1).getResponse().getStatusCode()).isEqualTo(HttpResponseStatus.CREATED.code()); assertThat(bulkResponses.get(1).getResponse().getItem(TestDoc.class)).isEqualTo(testDocToCreate); this.verifyByRead(bulkContainer, testDocToCreate); } } @Test(groups = {"simple"}, timeOut = TIMEOUT) public void bulkWithInvalidCreateTest() { CosmosItemOperation operation = BulkOperations.getCreateItemOperation( this.populateTestDoc(UUID.randomUUID().toString()), new PartitionKey(this.partitionKey1)); this.runWithError( bulkContainer, operations -> operations.add(operation), HttpResponseStatus.BAD_REQUEST); } @Test(groups = {"simple"}, timeOut = TIMEOUT) public void bulkWithReadOfNonExistentEntityTest() { CosmosItemOperation operation = BulkOperations.getReadItemOperation( UUID.randomUUID().toString(), new PartitionKey(this.partitionKey1)); this.runWithError( bulkContainer, operations -> operations.add(operation), HttpResponseStatus.NOT_FOUND); } @Test(groups = {"simple"}, timeOut = TIMEOUT) public void bulkWithReplaceOfStaleEntity() { this.createJsonTestDocs(bulkContainer); TestDoc staleTestDocToReplace = this.getTestDocCopy(this.TestDocPk1ExistingA); staleTestDocToReplace.setCost(staleTestDocToReplace.getCost() + 1); BulkItemRequestOptions staleReplaceOptions = new BulkItemRequestOptions(); staleReplaceOptions.setIfMatchETag(UUID.randomUUID().toString()); CosmosItemOperation operation = BulkOperations.getReplaceItemOperation( staleTestDocToReplace.getId(), staleTestDocToReplace, new PartitionKey(this.partitionKey1), staleReplaceOptions); this.runWithError( bulkContainer, operations -> operations.add(operation), HttpResponseStatus.PRECONDITION_FAILED); this.verifyByRead(bulkContainer, this.TestDocPk1ExistingA); } @Test(groups = {"simple"}, timeOut = TIMEOUT) public void bulkWithDeleteOfNonExistentEntity() { CosmosItemOperation operation = BulkOperations.getDeleteItemOperation( UUID.randomUUID().toString(), new PartitionKey(this.partitionKey1)); this.runWithError( bulkContainer, operations -> operations.add(operation), HttpResponseStatus.NOT_FOUND); } @Test(groups = {"simple"}, timeOut = TIMEOUT) public void bulkWithCreateConflict() { this.createJsonTestDocs(bulkContainer); TestDoc conflictingTestDocToCreate = this.getTestDocCopy(this.TestDocPk1ExistingA); conflictingTestDocToCreate.setCost(conflictingTestDocToCreate.getCost()); CosmosItemOperation operation = BulkOperations.getCreateItemOperation( conflictingTestDocToCreate, new PartitionKey(this.partitionKey1)); this.runWithError( bulkContainer, operations -> operations.add(operation), HttpResponseStatus.CONFLICT); this.verifyByRead(bulkContainer, this.TestDocPk1ExistingA); } private void runWithError( CosmosContainer container, Function<List<CosmosItemOperation>, Boolean> appendOperation, HttpResponseStatus expectedFailedOperationStatusCode) { TestDoc testDocToCreate = this.populateTestDoc(this.partitionKey1); TestDoc anotherTestDocToCreate = this.populateTestDoc(this.partitionKey1); List<CosmosItemOperation> operations = new ArrayList<>(); operations.add(BulkOperations.getCreateItemOperation(testDocToCreate, new PartitionKey(this.partitionKey1))); appendOperation.apply(operations); operations.add(BulkOperations.getCreateItemOperation(anotherTestDocToCreate, new PartitionKey(this.partitionKey1))); List<CosmosBulkOperationResponse<Object>> bulkResponses = bulkContainer.processBulkOperations(operations); assertThat(bulkResponses.size()).isEqualTo(operations.size()); assertThat(bulkResponses.get(0).getResponse().getStatusCode()).isEqualTo(HttpResponseStatus.CREATED.code()); assertThat(bulkResponses.get(1).getResponse().getStatusCode()).isEqualTo(expectedFailedOperationStatusCode.code()); assertThat(bulkResponses.get(2).getResponse().getStatusCode()).isEqualTo(HttpResponseStatus.CREATED.code()); this.verifyByRead(container, testDocToCreate); this.verifyByRead(container, anotherTestDocToCreate); } @Test(groups = {"simple"}, timeOut = TIMEOUT) public void bulkSessionTokenTest() { this.createJsonTestDocs(bulkContainer); CosmosItemResponse<TestDoc> readResponse = bulkContainer.readItem( this.TestDocPk1ExistingC.getId(), this.getPartitionKey(this.partitionKey1), TestDoc.class); assertThat(readResponse.getStatusCode()).isEqualTo(HttpResponseStatus.OK.code()); ISessionToken sessionToken = this.getSessionToken(readResponse.getSessionToken()); TestDoc testDocToCreate = this.populateTestDoc(this.partitionKey1); TestDoc testDocToReplace = this.getTestDocCopy(this.TestDocPk1ExistingA); testDocToReplace.setCost(testDocToReplace.getCost() + 1); TestDoc testDocToUpsert = this.populateTestDoc(this.partitionKey1); List<CosmosItemOperation> operations = new ArrayList<>(); operations.add( BulkOperations.getCreateItemOperation(testDocToCreate, new PartitionKey(this.partitionKey1))); operations.add( BulkOperations.getReplaceItemOperation(testDocToReplace.getId(), testDocToReplace, new PartitionKey(this.partitionKey1))); operations.add( BulkOperations.getUpsertItemOperation(testDocToUpsert, new PartitionKey(this.partitionKey1))); operations.add( BulkOperations.getDeleteItemOperation(this.TestDocPk1ExistingC.getId(), new PartitionKey(this.partitionKey1))); List<CosmosBulkOperationResponse<Object>> bulkResponses = bulkContainer.processBulkOperations(operations); assertThat(bulkResponses.size()).isEqualTo(operations.size()); assertThat(bulkResponses.get(0).getResponse().getStatusCode()).isEqualTo(HttpResponseStatus.CREATED.code()); assertThat(this.getSessionToken((bulkResponses.get(0).getResponse().getSessionToken())).getLSN()) .isGreaterThan(sessionToken.getLSN()); assertThat(bulkResponses.get(1).getResponse().getStatusCode()).isEqualTo(HttpResponseStatus.OK.code()); assertThat(this.getSessionToken((bulkResponses.get(1).getResponse().getSessionToken())).getLSN()) .isGreaterThan(sessionToken.getLSN()); assertThat(bulkResponses.get(2).getResponse().getStatusCode()).isEqualTo(HttpResponseStatus.CREATED.code()); assertThat(this.getSessionToken((bulkResponses.get(2).getResponse().getSessionToken())).getLSN()) .isGreaterThan(sessionToken.getLSN()); assertThat(bulkResponses.get(3).getResponse().getStatusCode()).isEqualTo(HttpResponseStatus.NO_CONTENT.code()); assertThat(this.getSessionToken((bulkResponses.get(2).getResponse().getSessionToken())).getLSN()) .isGreaterThan(sessionToken.getLSN()); } @Test(groups = {"simple"}, timeOut = TIMEOUT) public void bulkContentResponseOnWriteTest() { this.createJsonTestDocs(bulkContainer); TestDoc testDocToCreate = this.populateTestDoc(this.partitionKey1); TestDoc testDocToReplace = this.getTestDocCopy(this.TestDocPk1ExistingA); testDocToReplace.setCost(testDocToReplace.getCost() + 1); TestDoc testDocToUpsert = this.populateTestDoc(this.partitionKey1); BulkItemRequestOptions contentResponseDisableRequestOption = new BulkItemRequestOptions() .setContentResponseOnWriteEnabled(false); List<CosmosItemOperation> operations = new ArrayList<>(); operations.add( BulkOperations.getCreateItemOperation(testDocToCreate, new PartitionKey(this.partitionKey1))); operations.add( BulkOperations.getReplaceItemOperation( testDocToReplace.getId(), testDocToReplace, new PartitionKey(this.partitionKey1), contentResponseDisableRequestOption)); operations.add( BulkOperations.getUpsertItemOperation( testDocToUpsert, new PartitionKey(this.partitionKey1), contentResponseDisableRequestOption)); operations.add( BulkOperations.getDeleteItemOperation(this.TestDocPk1ExistingC.getId(), new PartitionKey(this.partitionKey1))); operations.add(BulkOperations.getReadItemOperation( this.TestDocPk1ExistingD.getId(), new PartitionKey(this.partitionKey1), contentResponseDisableRequestOption)); operations.add(BulkOperations.getReadItemOperation(this.TestDocPk1ExistingB.getId(), new PartitionKey(this.partitionKey1))); List<CosmosBulkOperationResponse<Object>> bulkResponses = bulkContainer.processBulkOperations(operations); assertThat(bulkResponses.size()).isEqualTo(operations.size()); assertThat(bulkResponses.get(0).getResponse().getStatusCode()).isEqualTo(HttpResponseStatus.CREATED.code()); assertThat(bulkResponses.get(0).getResponse().getItem(TestDoc.class)).isEqualTo(testDocToCreate); assertThat(bulkResponses.get(1).getResponse().getStatusCode()).isEqualTo(HttpResponseStatus.OK.code()); assertThat(bulkResponses.get(1).getResponse().getItem(TestDoc.class)).isNull(); assertThat(bulkResponses.get(2).getResponse().getStatusCode()).isEqualTo(HttpResponseStatus.CREATED.code()); assertThat(bulkResponses.get(2).getResponse().getItem(TestDoc.class)).isNull(); assertThat(bulkResponses.get(3).getResponse().getStatusCode()).isEqualTo(HttpResponseStatus.NO_CONTENT.code()); assertThat(bulkResponses.get(3).getResponse().getItem(TestDoc.class)).isNull(); assertThat(bulkResponses.get(4).getResponse().getStatusCode()).isEqualTo(HttpResponseStatus.OK.code()); assertThat(bulkResponses.get(4).getResponse().getItem(TestDoc.class)).isEqualTo(this.TestDocPk1ExistingD); assertThat(bulkResponses.get(5).getResponse().getStatusCode()).isEqualTo(HttpResponseStatus.OK.code()); assertThat(bulkResponses.get(5).getResponse().getItem(TestDoc.class)).isEqualTo(this.TestDocPk1ExistingB); } }
Done. Thanks.
public boolean isSuccessStatusCode() { return 200 <= this.statusCode && this.statusCode <= 299; }
return 200 <= this.statusCode && this.statusCode <= 299;
public boolean isSuccessStatusCode() { return this.statusCode >= 200 && this.statusCode <= 299; }
class type for which deserialization is needed. * * @return item associated with the current result. */ public <T> T getItem(final Class<T> type) { T item = null; if (this.getResourceObject() != null) { item = new JsonSerializable(this.getResourceObject()).toObject(type); } return item; }
class type for which deserialization is needed. * * @return item associated with the current result. */ @Beta(Beta.SinceVersion.V4_9_0) public <T> T getItem(final Class<T> type) { T item = null; if (this.getResourceObject() != null) { item = new JsonSerializable(this.getResourceObject()).toObject(type); } return item; }
I have two questions here: 1. When these two exceptions happens, it will go back to the main sink and re-calculate the partitionKeyRangeId, but without refreshing the cache, we may get the same/old partitionKeyRangeId back. Where the cache is refreshed? 2. Currently we check for PARTITION_KEY_RANGE_GONE and NAME_CACHE_IS_STALE exception, what about COMPLETING_SPLIT etc?
boolean shouldRetryForGone(int statusCode, int subStatusCode) { if (statusCode == StatusCodes.GONE && (subStatusCode == SubStatusCodes.PARTITION_KEY_RANGE_GONE || subStatusCode == SubStatusCodes.NAME_CACHE_IS_STALE) && this.attemptedRetries < MAX_RETRIES) { this.attemptedRetries++; return true; } return false; }
&& this.attemptedRetries < MAX_RETRIES) {
boolean shouldRetryForGone(int statusCode, int subStatusCode) { if (statusCode == StatusCodes.GONE && (subStatusCode == SubStatusCodes.PARTITION_KEY_RANGE_GONE || subStatusCode == SubStatusCodes.NAME_CACHE_IS_STALE || subStatusCode == SubStatusCodes.COMPLETING_SPLIT || subStatusCode == SubStatusCodes.COMPLETING_PARTITION_MIGRATION) && this.attemptedRetries < MAX_RETRIES) { this.attemptedRetries++; if (subStatusCode == SubStatusCodes.NAME_CACHE_IS_STALE) { refreshCollectionCache(); } return true; } return false; }
class BulkOperationRetryPolicy extends RetryPolicyWithDiagnostics { private static final int MAX_RETRIES = 1; private final ResourceThrottleRetryPolicy resourceThrottleRetryPolicy; private int attemptedRetries; BulkOperationRetryPolicy(ResourceThrottleRetryPolicy resourceThrottleRetryPolicy) { this.resourceThrottleRetryPolicy = resourceThrottleRetryPolicy; } final Mono<IRetryPolicy.ShouldRetryResult> shouldRetry(final TransactionalBatchOperationResult result) { checkNotNull(result, "expected non-null result"); CosmosException exception = BridgeInternal.createCosmosException( result.getStatusCode(), null, BulkExecutorUtil.getResponseHeadersFromBatchOperationResult(result)); if (this.resourceThrottleRetryPolicy == null) { return Mono.just(IRetryPolicy.ShouldRetryResult.noRetry()); } return this.resourceThrottleRetryPolicy.shouldRetry(exception); } @Override public Mono<IRetryPolicy.ShouldRetryResult> shouldRetry(Exception exception) { if (this.resourceThrottleRetryPolicy == null) { return Mono.just(IRetryPolicy.ShouldRetryResult.noRetry()); } return this.resourceThrottleRetryPolicy.shouldRetry(exception); } }
class BulkOperationRetryPolicy extends RetryPolicyWithDiagnostics { private static final int MAX_RETRIES = 1; private final RxCollectionCache collectionCache; private final String collectionLink; private final ResourceThrottleRetryPolicy resourceThrottleRetryPolicy; private int attemptedRetries; BulkOperationRetryPolicy( RxCollectionCache collectionCache, String resourceFullName, ResourceThrottleRetryPolicy resourceThrottleRetryPolicy) { this.collectionCache = collectionCache; collectionLink = Utils.getCollectionName(resourceFullName); this.resourceThrottleRetryPolicy = resourceThrottleRetryPolicy; } final Mono<ShouldRetryResult> shouldRetry(final TransactionalBatchOperationResult result) { checkNotNull(result, "expected non-null result"); CosmosException exception = BridgeInternal.createCosmosException( null, result.getStatusCode(), null, BulkExecutorUtil.getResponseHeadersFromBatchOperationResult(result)); if (this.resourceThrottleRetryPolicy == null) { return Mono.just(ShouldRetryResult.noRetry()); } return this.resourceThrottleRetryPolicy.shouldRetry(exception); } @Override public Mono<ShouldRetryResult> shouldRetry(Exception exception) { if (this.resourceThrottleRetryPolicy == null) { return Mono.just(ShouldRetryResult.noRetry()); } return this.resourceThrottleRetryPolicy.shouldRetry(exception); } /** * TODO(rakkuma): metaDataDiagnosticContext is passed null in collectionCache.refresh function. Fix it while adding * support for an operation wise Diagnostic. The value here should be merged in the individual diagnostic. * Issue: https: */ private void refreshCollectionCache() { this.collectionCache.refresh( null, this.collectionLink, null); } }
1. I can see different refreshes for different substatus codes. For pk range, I don't see any thing to handle forceRefresh. Even in function RxPartitionKeyRangeCache::getPartitionKeyRange function, forceRefresh variable is unused. Will make some changes to address collection cache refresh. 2. Will add COMPLETING_SPLIT and COMPLETING_PARTITION_MIGRATION. Thanks.
boolean shouldRetryForGone(int statusCode, int subStatusCode) { if (statusCode == StatusCodes.GONE && (subStatusCode == SubStatusCodes.PARTITION_KEY_RANGE_GONE || subStatusCode == SubStatusCodes.NAME_CACHE_IS_STALE) && this.attemptedRetries < MAX_RETRIES) { this.attemptedRetries++; return true; } return false; }
&& this.attemptedRetries < MAX_RETRIES) {
boolean shouldRetryForGone(int statusCode, int subStatusCode) { if (statusCode == StatusCodes.GONE && (subStatusCode == SubStatusCodes.PARTITION_KEY_RANGE_GONE || subStatusCode == SubStatusCodes.NAME_CACHE_IS_STALE || subStatusCode == SubStatusCodes.COMPLETING_SPLIT || subStatusCode == SubStatusCodes.COMPLETING_PARTITION_MIGRATION) && this.attemptedRetries < MAX_RETRIES) { this.attemptedRetries++; if (subStatusCode == SubStatusCodes.NAME_CACHE_IS_STALE) { refreshCollectionCache(); } return true; } return false; }
class BulkOperationRetryPolicy extends RetryPolicyWithDiagnostics { private static final int MAX_RETRIES = 1; private final ResourceThrottleRetryPolicy resourceThrottleRetryPolicy; private int attemptedRetries; BulkOperationRetryPolicy(ResourceThrottleRetryPolicy resourceThrottleRetryPolicy) { this.resourceThrottleRetryPolicy = resourceThrottleRetryPolicy; } final Mono<IRetryPolicy.ShouldRetryResult> shouldRetry(final TransactionalBatchOperationResult result) { checkNotNull(result, "expected non-null result"); CosmosException exception = BridgeInternal.createCosmosException( result.getStatusCode(), null, BulkExecutorUtil.getResponseHeadersFromBatchOperationResult(result)); if (this.resourceThrottleRetryPolicy == null) { return Mono.just(IRetryPolicy.ShouldRetryResult.noRetry()); } return this.resourceThrottleRetryPolicy.shouldRetry(exception); } @Override public Mono<IRetryPolicy.ShouldRetryResult> shouldRetry(Exception exception) { if (this.resourceThrottleRetryPolicy == null) { return Mono.just(IRetryPolicy.ShouldRetryResult.noRetry()); } return this.resourceThrottleRetryPolicy.shouldRetry(exception); } }
class BulkOperationRetryPolicy extends RetryPolicyWithDiagnostics { private static final int MAX_RETRIES = 1; private final RxCollectionCache collectionCache; private final String collectionLink; private final ResourceThrottleRetryPolicy resourceThrottleRetryPolicy; private int attemptedRetries; BulkOperationRetryPolicy( RxCollectionCache collectionCache, String resourceFullName, ResourceThrottleRetryPolicy resourceThrottleRetryPolicy) { this.collectionCache = collectionCache; collectionLink = Utils.getCollectionName(resourceFullName); this.resourceThrottleRetryPolicy = resourceThrottleRetryPolicy; } final Mono<ShouldRetryResult> shouldRetry(final TransactionalBatchOperationResult result) { checkNotNull(result, "expected non-null result"); CosmosException exception = BridgeInternal.createCosmosException( null, result.getStatusCode(), null, BulkExecutorUtil.getResponseHeadersFromBatchOperationResult(result)); if (this.resourceThrottleRetryPolicy == null) { return Mono.just(ShouldRetryResult.noRetry()); } return this.resourceThrottleRetryPolicy.shouldRetry(exception); } @Override public Mono<ShouldRetryResult> shouldRetry(Exception exception) { if (this.resourceThrottleRetryPolicy == null) { return Mono.just(ShouldRetryResult.noRetry()); } return this.resourceThrottleRetryPolicy.shouldRetry(exception); } /** * TODO(rakkuma): metaDataDiagnosticContext is passed null in collectionCache.refresh function. Fix it while adding * support for an operation wise Diagnostic. The value here should be merged in the individual diagnostic. * Issue: https: */ private void refreshCollectionCache() { this.collectionCache.refresh( null, this.collectionLink, null); } }
Thanks Rakesh. I read the flow one more time, and I think I understand the general flow better now. So ultimately it will be an RxDocumentServiceRequest, underlying with GoneAndRetryWithRetryPolicy, RenameCollectionAwareClientRetryPolicy will be applied. Since we do not retry for COMPLETING_SPLIT etc for write operations(but will refresh the range cache), so the exceptions will be throwned and eventually captured in BulkOperationRetryPolicy for each bulkItemOperation. By the way, InvalidPartitionExceptionRetryPolicy is doing similar collection cache refresh~
boolean shouldRetryForGone(int statusCode, int subStatusCode) { if (statusCode == StatusCodes.GONE && (subStatusCode == SubStatusCodes.PARTITION_KEY_RANGE_GONE || subStatusCode == SubStatusCodes.NAME_CACHE_IS_STALE) && this.attemptedRetries < MAX_RETRIES) { this.attemptedRetries++; return true; } return false; }
&& this.attemptedRetries < MAX_RETRIES) {
boolean shouldRetryForGone(int statusCode, int subStatusCode) { if (statusCode == StatusCodes.GONE && (subStatusCode == SubStatusCodes.PARTITION_KEY_RANGE_GONE || subStatusCode == SubStatusCodes.NAME_CACHE_IS_STALE || subStatusCode == SubStatusCodes.COMPLETING_SPLIT || subStatusCode == SubStatusCodes.COMPLETING_PARTITION_MIGRATION) && this.attemptedRetries < MAX_RETRIES) { this.attemptedRetries++; if (subStatusCode == SubStatusCodes.NAME_CACHE_IS_STALE) { refreshCollectionCache(); } return true; } return false; }
class BulkOperationRetryPolicy extends RetryPolicyWithDiagnostics { private static final int MAX_RETRIES = 1; private final ResourceThrottleRetryPolicy resourceThrottleRetryPolicy; private int attemptedRetries; BulkOperationRetryPolicy(ResourceThrottleRetryPolicy resourceThrottleRetryPolicy) { this.resourceThrottleRetryPolicy = resourceThrottleRetryPolicy; } final Mono<IRetryPolicy.ShouldRetryResult> shouldRetry(final TransactionalBatchOperationResult result) { checkNotNull(result, "expected non-null result"); CosmosException exception = BridgeInternal.createCosmosException( result.getStatusCode(), null, BulkExecutorUtil.getResponseHeadersFromBatchOperationResult(result)); if (this.resourceThrottleRetryPolicy == null) { return Mono.just(IRetryPolicy.ShouldRetryResult.noRetry()); } return this.resourceThrottleRetryPolicy.shouldRetry(exception); } @Override public Mono<IRetryPolicy.ShouldRetryResult> shouldRetry(Exception exception) { if (this.resourceThrottleRetryPolicy == null) { return Mono.just(IRetryPolicy.ShouldRetryResult.noRetry()); } return this.resourceThrottleRetryPolicy.shouldRetry(exception); } }
class BulkOperationRetryPolicy extends RetryPolicyWithDiagnostics { private static final int MAX_RETRIES = 1; private final RxCollectionCache collectionCache; private final String collectionLink; private final ResourceThrottleRetryPolicy resourceThrottleRetryPolicy; private int attemptedRetries; BulkOperationRetryPolicy( RxCollectionCache collectionCache, String resourceFullName, ResourceThrottleRetryPolicy resourceThrottleRetryPolicy) { this.collectionCache = collectionCache; collectionLink = Utils.getCollectionName(resourceFullName); this.resourceThrottleRetryPolicy = resourceThrottleRetryPolicy; } final Mono<ShouldRetryResult> shouldRetry(final TransactionalBatchOperationResult result) { checkNotNull(result, "expected non-null result"); CosmosException exception = BridgeInternal.createCosmosException( null, result.getStatusCode(), null, BulkExecutorUtil.getResponseHeadersFromBatchOperationResult(result)); if (this.resourceThrottleRetryPolicy == null) { return Mono.just(ShouldRetryResult.noRetry()); } return this.resourceThrottleRetryPolicy.shouldRetry(exception); } @Override public Mono<ShouldRetryResult> shouldRetry(Exception exception) { if (this.resourceThrottleRetryPolicy == null) { return Mono.just(ShouldRetryResult.noRetry()); } return this.resourceThrottleRetryPolicy.shouldRetry(exception); } /** * TODO(rakkuma): metaDataDiagnosticContext is passed null in collectionCache.refresh function. Fix it while adding * support for an operation wise Diagnostic. The value here should be merged in the individual diagnostic. * Issue: https: */ private void refreshCollectionCache() { this.collectionCache.refresh( null, this.collectionLink, null); } }
This should throw NPE and the error message should be `"'serviceBusMessage' cannot be null"`. Update the JavaDoc too.
public boolean tryAddMessage(final ServiceBusMessage serviceBusMessage) { if (serviceBusMessage == null) { throw logger.logExceptionAsWarning(new IllegalArgumentException("message cannot be null")); } ServiceBusMessage serviceBusMessageUpdated = tracerProvider.isEnabled() ? traceMessageSpan(serviceBusMessage) : serviceBusMessage; final int size; try { size = getSize(serviceBusMessageUpdated, serviceBusMessageList.isEmpty()); } catch (BufferOverflowException exception) { throw logger.logExceptionAsWarning(new AmqpException(false, AmqpErrorCondition.LINK_PAYLOAD_SIZE_EXCEEDED, String.format(Locale.US, "Size of the payload exceeded maximum message size: %s kb", maxMessageSize / 1024), contextProvider.getErrorContext())); } synchronized (lock) { if (this.sizeInBytes + size > this.maxMessageSize) { return false; } this.sizeInBytes += size; } this.serviceBusMessageList.add(serviceBusMessageUpdated); return true; }
throw logger.logExceptionAsWarning(new IllegalArgumentException("message cannot be null"));
public boolean tryAddMessage(final ServiceBusMessage serviceBusMessage) { if (serviceBusMessage == null) { throw logger.logExceptionAsWarning(new NullPointerException("'serviceBusMessage' cannot be null")); } ServiceBusMessage serviceBusMessageUpdated = tracerProvider.isEnabled() ? traceMessageSpan(serviceBusMessage) : serviceBusMessage; final int size; try { size = getSize(serviceBusMessageUpdated, serviceBusMessageList.isEmpty()); } catch (BufferOverflowException exception) { throw logger.logExceptionAsWarning(new AmqpException(false, AmqpErrorCondition.LINK_PAYLOAD_SIZE_EXCEEDED, String.format(Locale.US, "Size of the payload exceeded maximum message size: %s kb", maxMessageSize / 1024), contextProvider.getErrorContext())); } synchronized (lock) { if (this.sizeInBytes + size > this.maxMessageSize) { return false; } this.sizeInBytes += size; } this.serviceBusMessageList.add(serviceBusMessageUpdated); return true; }
class ServiceBusMessageBatch { private static final String AZ_TRACING_NAMESPACE_VALUE = "Microsoft.ServiceBus"; private final ClientLogger logger = new ClientLogger(ServiceBusMessageBatch.class); private final Object lock = new Object(); private final int maxMessageSize; private final ErrorContextProvider contextProvider; private final MessageSerializer serializer; private final List<ServiceBusMessage> serviceBusMessageList; private final byte[] eventBytes; private int sizeInBytes; private final TracerProvider tracerProvider; private final String entityPath; private final String hostname; ServiceBusMessageBatch(int maxMessageSize, ErrorContextProvider contextProvider, TracerProvider tracerProvider, MessageSerializer serializer, String entityPath, String hostname) { this.maxMessageSize = maxMessageSize; this.contextProvider = contextProvider; this.serializer = serializer; this.serviceBusMessageList = new LinkedList<>(); this.sizeInBytes = (maxMessageSize / 65536) * 1024; this.eventBytes = new byte[maxMessageSize]; this.tracerProvider = tracerProvider; this.entityPath = entityPath; this.hostname = hostname; } /** * Gets the number of {@link ServiceBusMessage events} in the batch. * * @return The number of {@link ServiceBusMessage events} in the batch. */ public int getCount() { return serviceBusMessageList.size(); } /** * Gets the maximum size, in bytes, of the {@link ServiceBusMessageBatch}. * * @return The maximum size, in bytes, of the {@link ServiceBusMessageBatch}. */ public int getMaxSizeInBytes() { return maxMessageSize; } /** * Gets the size of the {@link ServiceBusMessageBatch} in bytes. * * @return the size of the {@link ServiceBusMessageBatch} in bytes. */ public int getSizeInBytes() { return this.sizeInBytes; } /** * Tries to add an {@link ServiceBusMessage message} to the batch. * * @param serviceBusMessage The {@link ServiceBusMessage} to add to the batch. * * @return {@code true} if the message could be added to the batch; {@code false} if the event was too large to fit * in the batch. * * @throws IllegalArgumentException if {@code message} is {@code null}. * @throws AmqpException if {@code message} is larger than the maximum size of the {@link * ServiceBusMessageBatch}. */ /** * Gets the messages in the batch. * * @return The messages in the message batch. */ List<ServiceBusMessage> getMessages() { return serviceBusMessageList; } /** * Method to start and end a "Azure.EventHubs.message" span and add the "DiagnosticId" as a property of the * message. * * @param serviceBusMessage The Message to add tracing span for. * * @return the updated Message data object. */ private ServiceBusMessage traceMessageSpan(ServiceBusMessage serviceBusMessage) { Optional<Object> eventContextData = serviceBusMessage.getContext().getData(SPAN_CONTEXT_KEY); if (eventContextData.isPresent()) { return serviceBusMessage; } else { Context messageContext = serviceBusMessage.getContext() .addData(AZ_TRACING_NAMESPACE_KEY, AZ_TRACING_NAMESPACE_VALUE) .addData(ENTITY_PATH_KEY, entityPath) .addData(HOST_NAME_KEY, hostname); Context eventSpanContext = tracerProvider.startSpan(AZ_TRACING_SERVICE_NAME, messageContext, ProcessKind.MESSAGE); Optional<Object> eventDiagnosticIdOptional = eventSpanContext.getData(DIAGNOSTIC_ID_KEY); if (eventDiagnosticIdOptional.isPresent()) { serviceBusMessage.getApplicationProperties().put(DIAGNOSTIC_ID_KEY, eventDiagnosticIdOptional.get() .toString()); tracerProvider.endSpan(eventSpanContext, Signal.complete()); serviceBusMessage.addContext(SPAN_CONTEXT_KEY, eventSpanContext); } } return serviceBusMessage; } private int getSize(final ServiceBusMessage serviceBusMessage, final boolean isFirst) { Objects.requireNonNull(serviceBusMessage, "'serviceBusMessage' cannot be null."); final org.apache.qpid.proton.message.Message amqpMessage = serializer.serialize(serviceBusMessage); int eventSize = amqpMessage.encode(this.eventBytes, 0, maxMessageSize); eventSize += 16; if (isFirst) { amqpMessage.setBody(null); amqpMessage.setApplicationProperties(null); amqpMessage.setProperties(null); amqpMessage.setDeliveryAnnotations(null); eventSize += amqpMessage.encode(this.eventBytes, 0, maxMessageSize); } return eventSize; } }
class ServiceBusMessageBatch { private static final String AZ_TRACING_NAMESPACE_VALUE = "Microsoft.ServiceBus"; private final ClientLogger logger = new ClientLogger(ServiceBusMessageBatch.class); private final Object lock = new Object(); private final int maxMessageSize; private final ErrorContextProvider contextProvider; private final MessageSerializer serializer; private final List<ServiceBusMessage> serviceBusMessageList; private final byte[] eventBytes; private int sizeInBytes; private final TracerProvider tracerProvider; private final String entityPath; private final String hostname; ServiceBusMessageBatch(int maxMessageSize, ErrorContextProvider contextProvider, TracerProvider tracerProvider, MessageSerializer serializer, String entityPath, String hostname) { this.maxMessageSize = maxMessageSize; this.contextProvider = contextProvider; this.serializer = serializer; this.serviceBusMessageList = new LinkedList<>(); this.sizeInBytes = (maxMessageSize / 65536) * 1024; this.eventBytes = new byte[maxMessageSize]; this.tracerProvider = tracerProvider; this.entityPath = entityPath; this.hostname = hostname; } /** * Gets the number of {@link ServiceBusMessage events} in the batch. * * @return The number of {@link ServiceBusMessage events} in the batch. */ public int getCount() { return serviceBusMessageList.size(); } /** * Gets the maximum size, in bytes, of the {@link ServiceBusMessageBatch}. * * @return The maximum size, in bytes, of the {@link ServiceBusMessageBatch}. */ public int getMaxSizeInBytes() { return maxMessageSize; } /** * Gets the size of the {@link ServiceBusMessageBatch} in bytes. * * @return the size of the {@link ServiceBusMessageBatch} in bytes. */ public int getSizeInBytes() { return this.sizeInBytes; } /** * Tries to add an {@link ServiceBusMessage message} to the batch. * * @param serviceBusMessage The {@link ServiceBusMessage} to add to the batch. * * @return {@code true} if the message could be added to the batch; {@code false} if the event was too large to fit * in the batch. * * @throws NullPointerException if {@code message} is {@code null}. * @throws AmqpException if {@code message} is larger than the maximum size of the {@link * ServiceBusMessageBatch}. */ /** * Gets the messages in the batch. * * @return The messages in the message batch. */ List<ServiceBusMessage> getMessages() { return serviceBusMessageList; } /** * Method to start and end a "Azure.EventHubs.message" span and add the "DiagnosticId" as a property of the * message. * * @param serviceBusMessage The Message to add tracing span for. * * @return the updated Message data object. */ private ServiceBusMessage traceMessageSpan(ServiceBusMessage serviceBusMessage) { Optional<Object> eventContextData = serviceBusMessage.getContext().getData(SPAN_CONTEXT_KEY); if (eventContextData.isPresent()) { return serviceBusMessage; } else { Context messageContext = serviceBusMessage.getContext() .addData(AZ_TRACING_NAMESPACE_KEY, AZ_TRACING_NAMESPACE_VALUE) .addData(ENTITY_PATH_KEY, entityPath) .addData(HOST_NAME_KEY, hostname); Context eventSpanContext = tracerProvider.startSpan(AZ_TRACING_SERVICE_NAME, messageContext, ProcessKind.MESSAGE); Optional<Object> eventDiagnosticIdOptional = eventSpanContext.getData(DIAGNOSTIC_ID_KEY); if (eventDiagnosticIdOptional.isPresent()) { serviceBusMessage.getApplicationProperties().put(DIAGNOSTIC_ID_KEY, eventDiagnosticIdOptional.get() .toString()); tracerProvider.endSpan(eventSpanContext, Signal.complete()); serviceBusMessage.addContext(SPAN_CONTEXT_KEY, eventSpanContext); } } return serviceBusMessage; } private int getSize(final ServiceBusMessage serviceBusMessage, final boolean isFirst) { Objects.requireNonNull(serviceBusMessage, "'serviceBusMessage' cannot be null."); final org.apache.qpid.proton.message.Message amqpMessage = serializer.serialize(serviceBusMessage); int eventSize = amqpMessage.encode(this.eventBytes, 0, maxMessageSize); eventSize += 16; if (isFirst) { amqpMessage.setBody(null); amqpMessage.setApplicationProperties(null); amqpMessage.setProperties(null); amqpMessage.setDeliveryAnnotations(null); eventSize += amqpMessage.encode(this.eventBytes, 0, maxMessageSize); } return eventSize; } }
updated
public boolean tryAddMessage(final ServiceBusMessage serviceBusMessage) { if (serviceBusMessage == null) { throw logger.logExceptionAsWarning(new IllegalArgumentException("message cannot be null")); } ServiceBusMessage serviceBusMessageUpdated = tracerProvider.isEnabled() ? traceMessageSpan(serviceBusMessage) : serviceBusMessage; final int size; try { size = getSize(serviceBusMessageUpdated, serviceBusMessageList.isEmpty()); } catch (BufferOverflowException exception) { throw logger.logExceptionAsWarning(new AmqpException(false, AmqpErrorCondition.LINK_PAYLOAD_SIZE_EXCEEDED, String.format(Locale.US, "Size of the payload exceeded maximum message size: %s kb", maxMessageSize / 1024), contextProvider.getErrorContext())); } synchronized (lock) { if (this.sizeInBytes + size > this.maxMessageSize) { return false; } this.sizeInBytes += size; } this.serviceBusMessageList.add(serviceBusMessageUpdated); return true; }
throw logger.logExceptionAsWarning(new IllegalArgumentException("message cannot be null"));
public boolean tryAddMessage(final ServiceBusMessage serviceBusMessage) { if (serviceBusMessage == null) { throw logger.logExceptionAsWarning(new NullPointerException("'serviceBusMessage' cannot be null")); } ServiceBusMessage serviceBusMessageUpdated = tracerProvider.isEnabled() ? traceMessageSpan(serviceBusMessage) : serviceBusMessage; final int size; try { size = getSize(serviceBusMessageUpdated, serviceBusMessageList.isEmpty()); } catch (BufferOverflowException exception) { throw logger.logExceptionAsWarning(new AmqpException(false, AmqpErrorCondition.LINK_PAYLOAD_SIZE_EXCEEDED, String.format(Locale.US, "Size of the payload exceeded maximum message size: %s kb", maxMessageSize / 1024), contextProvider.getErrorContext())); } synchronized (lock) { if (this.sizeInBytes + size > this.maxMessageSize) { return false; } this.sizeInBytes += size; } this.serviceBusMessageList.add(serviceBusMessageUpdated); return true; }
class ServiceBusMessageBatch { private static final String AZ_TRACING_NAMESPACE_VALUE = "Microsoft.ServiceBus"; private final ClientLogger logger = new ClientLogger(ServiceBusMessageBatch.class); private final Object lock = new Object(); private final int maxMessageSize; private final ErrorContextProvider contextProvider; private final MessageSerializer serializer; private final List<ServiceBusMessage> serviceBusMessageList; private final byte[] eventBytes; private int sizeInBytes; private final TracerProvider tracerProvider; private final String entityPath; private final String hostname; ServiceBusMessageBatch(int maxMessageSize, ErrorContextProvider contextProvider, TracerProvider tracerProvider, MessageSerializer serializer, String entityPath, String hostname) { this.maxMessageSize = maxMessageSize; this.contextProvider = contextProvider; this.serializer = serializer; this.serviceBusMessageList = new LinkedList<>(); this.sizeInBytes = (maxMessageSize / 65536) * 1024; this.eventBytes = new byte[maxMessageSize]; this.tracerProvider = tracerProvider; this.entityPath = entityPath; this.hostname = hostname; } /** * Gets the number of {@link ServiceBusMessage events} in the batch. * * @return The number of {@link ServiceBusMessage events} in the batch. */ public int getCount() { return serviceBusMessageList.size(); } /** * Gets the maximum size, in bytes, of the {@link ServiceBusMessageBatch}. * * @return The maximum size, in bytes, of the {@link ServiceBusMessageBatch}. */ public int getMaxSizeInBytes() { return maxMessageSize; } /** * Gets the size of the {@link ServiceBusMessageBatch} in bytes. * * @return the size of the {@link ServiceBusMessageBatch} in bytes. */ public int getSizeInBytes() { return this.sizeInBytes; } /** * Tries to add an {@link ServiceBusMessage message} to the batch. * * @param serviceBusMessage The {@link ServiceBusMessage} to add to the batch. * * @return {@code true} if the message could be added to the batch; {@code false} if the event was too large to fit * in the batch. * * @throws IllegalArgumentException if {@code message} is {@code null}. * @throws AmqpException if {@code message} is larger than the maximum size of the {@link * ServiceBusMessageBatch}. */ /** * Gets the messages in the batch. * * @return The messages in the message batch. */ List<ServiceBusMessage> getMessages() { return serviceBusMessageList; } /** * Method to start and end a "Azure.EventHubs.message" span and add the "DiagnosticId" as a property of the * message. * * @param serviceBusMessage The Message to add tracing span for. * * @return the updated Message data object. */ private ServiceBusMessage traceMessageSpan(ServiceBusMessage serviceBusMessage) { Optional<Object> eventContextData = serviceBusMessage.getContext().getData(SPAN_CONTEXT_KEY); if (eventContextData.isPresent()) { return serviceBusMessage; } else { Context messageContext = serviceBusMessage.getContext() .addData(AZ_TRACING_NAMESPACE_KEY, AZ_TRACING_NAMESPACE_VALUE) .addData(ENTITY_PATH_KEY, entityPath) .addData(HOST_NAME_KEY, hostname); Context eventSpanContext = tracerProvider.startSpan(AZ_TRACING_SERVICE_NAME, messageContext, ProcessKind.MESSAGE); Optional<Object> eventDiagnosticIdOptional = eventSpanContext.getData(DIAGNOSTIC_ID_KEY); if (eventDiagnosticIdOptional.isPresent()) { serviceBusMessage.getApplicationProperties().put(DIAGNOSTIC_ID_KEY, eventDiagnosticIdOptional.get() .toString()); tracerProvider.endSpan(eventSpanContext, Signal.complete()); serviceBusMessage.addContext(SPAN_CONTEXT_KEY, eventSpanContext); } } return serviceBusMessage; } private int getSize(final ServiceBusMessage serviceBusMessage, final boolean isFirst) { Objects.requireNonNull(serviceBusMessage, "'serviceBusMessage' cannot be null."); final org.apache.qpid.proton.message.Message amqpMessage = serializer.serialize(serviceBusMessage); int eventSize = amqpMessage.encode(this.eventBytes, 0, maxMessageSize); eventSize += 16; if (isFirst) { amqpMessage.setBody(null); amqpMessage.setApplicationProperties(null); amqpMessage.setProperties(null); amqpMessage.setDeliveryAnnotations(null); eventSize += amqpMessage.encode(this.eventBytes, 0, maxMessageSize); } return eventSize; } }
class ServiceBusMessageBatch { private static final String AZ_TRACING_NAMESPACE_VALUE = "Microsoft.ServiceBus"; private final ClientLogger logger = new ClientLogger(ServiceBusMessageBatch.class); private final Object lock = new Object(); private final int maxMessageSize; private final ErrorContextProvider contextProvider; private final MessageSerializer serializer; private final List<ServiceBusMessage> serviceBusMessageList; private final byte[] eventBytes; private int sizeInBytes; private final TracerProvider tracerProvider; private final String entityPath; private final String hostname; ServiceBusMessageBatch(int maxMessageSize, ErrorContextProvider contextProvider, TracerProvider tracerProvider, MessageSerializer serializer, String entityPath, String hostname) { this.maxMessageSize = maxMessageSize; this.contextProvider = contextProvider; this.serializer = serializer; this.serviceBusMessageList = new LinkedList<>(); this.sizeInBytes = (maxMessageSize / 65536) * 1024; this.eventBytes = new byte[maxMessageSize]; this.tracerProvider = tracerProvider; this.entityPath = entityPath; this.hostname = hostname; } /** * Gets the number of {@link ServiceBusMessage events} in the batch. * * @return The number of {@link ServiceBusMessage events} in the batch. */ public int getCount() { return serviceBusMessageList.size(); } /** * Gets the maximum size, in bytes, of the {@link ServiceBusMessageBatch}. * * @return The maximum size, in bytes, of the {@link ServiceBusMessageBatch}. */ public int getMaxSizeInBytes() { return maxMessageSize; } /** * Gets the size of the {@link ServiceBusMessageBatch} in bytes. * * @return the size of the {@link ServiceBusMessageBatch} in bytes. */ public int getSizeInBytes() { return this.sizeInBytes; } /** * Tries to add an {@link ServiceBusMessage message} to the batch. * * @param serviceBusMessage The {@link ServiceBusMessage} to add to the batch. * * @return {@code true} if the message could be added to the batch; {@code false} if the event was too large to fit * in the batch. * * @throws NullPointerException if {@code message} is {@code null}. * @throws AmqpException if {@code message} is larger than the maximum size of the {@link * ServiceBusMessageBatch}. */ /** * Gets the messages in the batch. * * @return The messages in the message batch. */ List<ServiceBusMessage> getMessages() { return serviceBusMessageList; } /** * Method to start and end a "Azure.EventHubs.message" span and add the "DiagnosticId" as a property of the * message. * * @param serviceBusMessage The Message to add tracing span for. * * @return the updated Message data object. */ private ServiceBusMessage traceMessageSpan(ServiceBusMessage serviceBusMessage) { Optional<Object> eventContextData = serviceBusMessage.getContext().getData(SPAN_CONTEXT_KEY); if (eventContextData.isPresent()) { return serviceBusMessage; } else { Context messageContext = serviceBusMessage.getContext() .addData(AZ_TRACING_NAMESPACE_KEY, AZ_TRACING_NAMESPACE_VALUE) .addData(ENTITY_PATH_KEY, entityPath) .addData(HOST_NAME_KEY, hostname); Context eventSpanContext = tracerProvider.startSpan(AZ_TRACING_SERVICE_NAME, messageContext, ProcessKind.MESSAGE); Optional<Object> eventDiagnosticIdOptional = eventSpanContext.getData(DIAGNOSTIC_ID_KEY); if (eventDiagnosticIdOptional.isPresent()) { serviceBusMessage.getApplicationProperties().put(DIAGNOSTIC_ID_KEY, eventDiagnosticIdOptional.get() .toString()); tracerProvider.endSpan(eventSpanContext, Signal.complete()); serviceBusMessage.addContext(SPAN_CONTEXT_KEY, eventSpanContext); } } return serviceBusMessage; } private int getSize(final ServiceBusMessage serviceBusMessage, final boolean isFirst) { Objects.requireNonNull(serviceBusMessage, "'serviceBusMessage' cannot be null."); final org.apache.qpid.proton.message.Message amqpMessage = serializer.serialize(serviceBusMessage); int eventSize = amqpMessage.encode(this.eventBytes, 0, maxMessageSize); eventSize += 16; if (isFirst) { amqpMessage.setBody(null); amqpMessage.setApplicationProperties(null); amqpMessage.setProperties(null); amqpMessage.setDeliveryAnnotations(null); eventSize += amqpMessage.encode(this.eventBytes, 0, maxMessageSize); } return eventSize; } }
Update javadoc to include the exceptions thrown if the connection string is invalid.
public static ServiceBusConnectionStringProperties parse(String connectionString) { return new ServiceBusConnectionStringProperties(new ConnectionStringProperties(connectionString)); }
return new ServiceBusConnectionStringProperties(new ConnectionStringProperties(connectionString));
public static ServiceBusConnectionStringProperties parse(String connectionString) { return new ServiceBusConnectionStringProperties(new ConnectionStringProperties(connectionString)); }
class ServiceBusConnectionStringProperties { private final URI endpoint; private final String entityPath; private final String sharedAccessKeyName; private final String sharedAccessKey; private final String sharedAccessSignature; private ServiceBusConnectionStringProperties(ConnectionStringProperties properties) { this.endpoint = properties.getEndpoint(); this.entityPath = properties.getEntityPath(); this.sharedAccessKeyName = properties.getSharedAccessKeyName(); this.sharedAccessKey = properties.getSharedAccessKey(); this.sharedAccessSignature = properties.getSharedAccessSignature(); } /** * Parse a ServiceBus connection string into an instance of this class. * @param connectionString The connection string to be parsed. * @return An instance of this class. */ /** * Get the "EntityPath" value of the connection string. * @return The entity path, or {@code null} if the connection string doesn't have an "EntityPath". */ public String getEntityPath() { return entityPath; } /** * Get the "Endpoint" value of the connection string. * @return The endpoint. */ public String getEndpoint() { return String.format("%s: } /** * Get the fully qualified namespace, or hostname, from the connection string "Endpoint" section. * @return The fully qualified namespace. */ public String getFullyQualifiedNamespace() { return this.endpoint.getHost(); } /** * Get the "SharedAccessKeyName" section of the connection string. * @return The shared access key name, or {@code null} if the connection string doesn't have an * "SharedAccessKeyName". */ public String getSharedAccessKeyName() { return this.sharedAccessKeyName; } /** * Get the "SharedAccessSignature" section of the connection string. * @return The shared access key value, or {@code null} if the connection string doesn't have an * "SharedAccessSignature". */ public String getSharedAccessKey() { return this.sharedAccessKey; } /** * Get the "SharedAccessSignature" section of the connection string. * @return The shared access signature, or {@code null} if the connection string doesn't have an * "SharedAccessSignature". */ public String getSharedAccessSignature() { return this.sharedAccessSignature; } }
class ServiceBusConnectionStringProperties { private final URI endpoint; private final String entityPath; private final String sharedAccessKeyName; private final String sharedAccessKey; private final String sharedAccessSignature; private ServiceBusConnectionStringProperties(ConnectionStringProperties properties) { this.endpoint = properties.getEndpoint(); this.entityPath = properties.getEntityPath(); this.sharedAccessKeyName = properties.getSharedAccessKeyName(); this.sharedAccessKey = properties.getSharedAccessKey(); this.sharedAccessSignature = properties.getSharedAccessSignature(); } /** * Parse a ServiceBus connection string into an instance of this class. * @param connectionString The connection string to be parsed. * @return An instance of this class. * @throws NullPointerException if {@code connectionString} is null. * @throws IllegalArgumentException if the {@code connectionString} is empty or malformatted. */ /** * Get the "EntityPath" value of the connection string. * @return The entity path, or {@code null} if the connection string doesn't have an "EntityPath". */ public String getEntityPath() { return entityPath; } /** * Get the "Endpoint" value of the connection string. * @return The endpoint. */ public String getEndpoint() { return String.format("%s: } /** * Get the fully qualified namespace, or hostname, from the connection string "Endpoint" section. * @return The fully qualified namespace. */ public String getFullyQualifiedNamespace() { return this.endpoint.getHost(); } /** * Get the "SharedAccessKeyName" section of the connection string. * @return The shared access key name, or {@code null} if the connection string doesn't have an * "SharedAccessKeyName". */ public String getSharedAccessKeyName() { return this.sharedAccessKeyName; } /** * Get the "SharedAccessSignature" section of the connection string. * @return The shared access key value, or {@code null} if the connection string doesn't have an * "SharedAccessSignature". */ public String getSharedAccessKey() { return this.sharedAccessKey; } /** * Get the "SharedAccessSignature" section of the connection string. * @return The shared access signature, or {@code null} if the connection string doesn't have an * "SharedAccessSignature". */ public String getSharedAccessSignature() { return this.sharedAccessSignature; } }
Added
public static ServiceBusConnectionStringProperties parse(String connectionString) { return new ServiceBusConnectionStringProperties(new ConnectionStringProperties(connectionString)); }
return new ServiceBusConnectionStringProperties(new ConnectionStringProperties(connectionString));
public static ServiceBusConnectionStringProperties parse(String connectionString) { return new ServiceBusConnectionStringProperties(new ConnectionStringProperties(connectionString)); }
class ServiceBusConnectionStringProperties { private final URI endpoint; private final String entityPath; private final String sharedAccessKeyName; private final String sharedAccessKey; private final String sharedAccessSignature; private ServiceBusConnectionStringProperties(ConnectionStringProperties properties) { this.endpoint = properties.getEndpoint(); this.entityPath = properties.getEntityPath(); this.sharedAccessKeyName = properties.getSharedAccessKeyName(); this.sharedAccessKey = properties.getSharedAccessKey(); this.sharedAccessSignature = properties.getSharedAccessSignature(); } /** * Parse a ServiceBus connection string into an instance of this class. * @param connectionString The connection string to be parsed. * @return An instance of this class. */ /** * Get the "EntityPath" value of the connection string. * @return The entity path, or {@code null} if the connection string doesn't have an "EntityPath". */ public String getEntityPath() { return entityPath; } /** * Get the "Endpoint" value of the connection string. * @return The endpoint. */ public String getEndpoint() { return String.format("%s: } /** * Get the fully qualified namespace, or hostname, from the connection string "Endpoint" section. * @return The fully qualified namespace. */ public String getFullyQualifiedNamespace() { return this.endpoint.getHost(); } /** * Get the "SharedAccessKeyName" section of the connection string. * @return The shared access key name, or {@code null} if the connection string doesn't have an * "SharedAccessKeyName". */ public String getSharedAccessKeyName() { return this.sharedAccessKeyName; } /** * Get the "SharedAccessSignature" section of the connection string. * @return The shared access key value, or {@code null} if the connection string doesn't have an * "SharedAccessSignature". */ public String getSharedAccessKey() { return this.sharedAccessKey; } /** * Get the "SharedAccessSignature" section of the connection string. * @return The shared access signature, or {@code null} if the connection string doesn't have an * "SharedAccessSignature". */ public String getSharedAccessSignature() { return this.sharedAccessSignature; } }
class ServiceBusConnectionStringProperties { private final URI endpoint; private final String entityPath; private final String sharedAccessKeyName; private final String sharedAccessKey; private final String sharedAccessSignature; private ServiceBusConnectionStringProperties(ConnectionStringProperties properties) { this.endpoint = properties.getEndpoint(); this.entityPath = properties.getEntityPath(); this.sharedAccessKeyName = properties.getSharedAccessKeyName(); this.sharedAccessKey = properties.getSharedAccessKey(); this.sharedAccessSignature = properties.getSharedAccessSignature(); } /** * Parse a ServiceBus connection string into an instance of this class. * @param connectionString The connection string to be parsed. * @return An instance of this class. * @throws NullPointerException if {@code connectionString} is null. * @throws IllegalArgumentException if the {@code connectionString} is empty or malformatted. */ /** * Get the "EntityPath" value of the connection string. * @return The entity path, or {@code null} if the connection string doesn't have an "EntityPath". */ public String getEntityPath() { return entityPath; } /** * Get the "Endpoint" value of the connection string. * @return The endpoint. */ public String getEndpoint() { return String.format("%s: } /** * Get the fully qualified namespace, or hostname, from the connection string "Endpoint" section. * @return The fully qualified namespace. */ public String getFullyQualifiedNamespace() { return this.endpoint.getHost(); } /** * Get the "SharedAccessKeyName" section of the connection string. * @return The shared access key name, or {@code null} if the connection string doesn't have an * "SharedAccessKeyName". */ public String getSharedAccessKeyName() { return this.sharedAccessKeyName; } /** * Get the "SharedAccessSignature" section of the connection string. * @return The shared access key value, or {@code null} if the connection string doesn't have an * "SharedAccessSignature". */ public String getSharedAccessKey() { return this.sharedAccessKey; } /** * Get the "SharedAccessSignature" section of the connection string. * @return The shared access signature, or {@code null} if the connection string doesn't have an * "SharedAccessSignature". */ public String getSharedAccessSignature() { return this.sharedAccessSignature; } }
Didnt we say that we will not block and instead use stepVerifier in tests?
public void beginReleasePhoneNumbers(HttpClient httpClient) { PhoneNumber phoneNumber = new PhoneNumber(PHONENUMBER_TO_RELEASE); List<PhoneNumber> phoneNumbers = new ArrayList<>(); phoneNumbers.add(phoneNumber); Duration pollInterval = Duration.ofSeconds(5); PollerFlux<PhoneNumberRelease, PhoneNumberRelease> poller = this.getClient(httpClient).beginReleasePhoneNumbers(phoneNumbers, pollInterval); AsyncPollResponse<PhoneNumberRelease, PhoneNumberRelease> asyncRes = poller.takeUntil(apr -> apr.getStatus() == LongRunningOperationStatus.SUCCESSFULLY_COMPLETED). blockLast(); PhoneNumberRelease testResult = asyncRes.getValue(); assertEquals(ReleaseStatus.COMPLETE, testResult.getStatus()); }
AsyncPollResponse<PhoneNumberRelease, PhoneNumberRelease> asyncRes =
public void beginReleasePhoneNumbers(HttpClient httpClient) { PhoneNumber phoneNumber = new PhoneNumber(PHONENUMBER_TO_RELEASE); List<PhoneNumber> phoneNumbers = new ArrayList<>(); phoneNumbers.add(phoneNumber); Duration pollInterval = Duration.ofSeconds(1); PollerFlux<PhoneNumberRelease, PhoneNumberRelease> poller = this.getClient(httpClient).beginReleasePhoneNumbers(phoneNumbers, pollInterval); Mono<AsyncPollResponse<PhoneNumberRelease, PhoneNumberRelease>> asyncRes = poller.last(); StepVerifier.create(asyncRes) .assertNext(item -> { assertEquals(ReleaseStatus.COMPLETE, item.getValue().getStatus()); }) .verifyComplete(); }
class PhoneNumberAsyncClientIntegrationTest extends PhoneNumberIntegrationTestBase { @ParameterizedTest @MethodSource("com.azure.core.test.TestBase public void createAsyncPhoneNumberClientWithConnectionString(HttpClient httpClient) { PhoneNumberAsyncClient phoneNumberAsyncClient = getClientBuilderWithConnectionString(httpClient).buildAsyncClient(); assertNotNull(phoneNumberAsyncClient); PagedFlux<AcquiredPhoneNumber> pagedFlux = phoneNumberAsyncClient.listAllPhoneNumbers(LOCALE); StepVerifier.create(pagedFlux.next()) .assertNext(item -> { assertNotNull(item.getPhoneNumber()); }) .verifyComplete(); } @ParameterizedTest @MethodSource("com.azure.core.test.TestBase public void listAllPhoneNumbers(HttpClient httpClient) { PagedFlux<AcquiredPhoneNumber> pagedFlux = this.getClient(httpClient).listAllPhoneNumbers(LOCALE); StepVerifier.create(pagedFlux.next()) .assertNext(item -> { assertNotNull(item.getPhoneNumber()); }) .verifyComplete(); } @ParameterizedTest @MethodSource("com.azure.core.test.TestBase public void listPhonePlanGroups(HttpClient httpClient) { PagedFlux<PhonePlanGroup> pagedFlux = this.getClient(httpClient).listPhonePlanGroups(COUNTRY_CODE, LOCALE, true); StepVerifier.create(pagedFlux.next()) .assertNext(item -> { assertNotNull(item.getPhonePlanGroupId()); }) .verifyComplete(); } @ParameterizedTest @MethodSource("com.azure.core.test.TestBase public void listPhonePlans(HttpClient httpClient) { PagedFlux<PhonePlan> pagedFlux = this.getClient(httpClient).listPhonePlans(COUNTRY_CODE, PHONE_PLAN_GROUP_ID, LOCALE); StepVerifier.create(pagedFlux.next()) .assertNext(item -> { assertNotNull(item.getPhonePlanId()); }) .verifyComplete(); } @ParameterizedTest @MethodSource("com.azure.core.test.TestBase public void listAllReleases(HttpClient httpClient) { PagedFlux<PhoneNumberEntity> pagedFlux = this.getClient(httpClient).listAllReleases(); StepVerifier.create(pagedFlux.next()) .assertNext(item -> { assertNotNull(item.getId()); }) .verifyComplete(); } @ParameterizedTest @MethodSource("com.azure.core.test.TestBase public void listAllSearches(HttpClient httpClient) { PagedFlux<PhoneNumberEntity> pagedFlux = this.getClient(httpClient).listAllSearches(); StepVerifier.create(pagedFlux.next()) .assertNext(item -> { assertNotNull(item.getId()); }) .verifyComplete(); } @ParameterizedTest @MethodSource("com.azure.core.test.TestBase public void listAllSupportedCountries(HttpClient httpClient) { PagedFlux<PhoneNumberCountry> pagedFlux = this.getClient(httpClient).listAllSupportedCountries(LOCALE); StepVerifier.create(pagedFlux.next()) .assertNext(item -> { assertNotNull(item.getCountryCode()); }) .verifyComplete(); } @ParameterizedTest @MethodSource("com.azure.core.test.TestBase public void getPhonePlanLocationOptions(HttpClient httpClient) { Mono<LocationOptionsResponse> mono = this.getClient(httpClient).getPhonePlanLocationOptions(COUNTRY_CODE, PHONE_PLAN_GROUP_ID, PHONE_PLAN_ID, LOCALE); StepVerifier.create(mono) .assertNext(item -> { assertNotNull(item.getLocationOptions().getLabelId()); }) .verifyComplete(); } @ParameterizedTest @MethodSource("com.azure.core.test.TestBase public void getAllAreaCodes(HttpClient httpClient) { List<LocationOptionsQuery> locationOptions = new ArrayList<>(); LocationOptionsQuery query = new LocationOptionsQuery(); query.setLabelId("state"); query.setOptionsValue(LOCATION_OPTION_STATE); locationOptions.add(query); query = new LocationOptionsQuery(); query.setLabelId("city"); query.setOptionsValue(LOCATION_OPTION_CITY); locationOptions.add(query); Mono<AreaCodes> mono = this.getClient(httpClient).getAllAreaCodes("selection", COUNTRY_CODE, PHONE_PLAN_ID, locationOptions); StepVerifier.create(mono) .assertNext(item -> { assertTrue(item.getPrimaryAreaCodes().size() > 0); }) .verifyComplete(); } @ParameterizedTest @MethodSource("com.azure.core.test.TestBase public void getAllAreaCodesWithResponse(HttpClient httpClient) { List<LocationOptionsQuery> locationOptions = new ArrayList<>(); LocationOptionsQuery query = new LocationOptionsQuery(); query.setLabelId("state"); query.setOptionsValue(LOCATION_OPTION_STATE); locationOptions.add(query); query = new LocationOptionsQuery(); query.setLabelId("city"); query.setOptionsValue(LOCATION_OPTION_CITY); locationOptions.add(query); Mono<Response<AreaCodes>> mono = this.getClient(httpClient).getAllAreaCodesWithResponse( "selection", COUNTRY_CODE, PHONE_PLAN_ID, locationOptions, Context.NONE); StepVerifier.create(mono) .assertNext(item -> { assertEquals(200, item.getStatusCode()); assertTrue(item.getValue().getPrimaryAreaCodes().size() > 0); }) .verifyComplete(); } @ParameterizedTest @MethodSource("com.azure.core.test.TestBase public void updateCapabilities(HttpClient httpClient) { List<Capability> capabilitiesToAdd = new ArrayList<>(); capabilitiesToAdd.add(Capability.INBOUND_CALLING); NumberUpdateCapabilities update = new NumberUpdateCapabilities(); update.setAdd(capabilitiesToAdd); Map<PhoneNumber, NumberUpdateCapabilities> updateMap = new HashMap<>(); updateMap.put(new PhoneNumber(PHONENUMBER_FOR_CAPABILITIES), update); Mono<UpdateNumberCapabilitiesResponse> mono = this.getClient(httpClient).updateCapabilities(updateMap); StepVerifier.create(mono) .assertNext(item -> { assertNotNull(item.getCapabilitiesUpdateId()); }) .verifyComplete(); } @ParameterizedTest @MethodSource("com.azure.core.test.TestBase public void updateCapabilitiesWithResponse(HttpClient httpClient) { List<Capability> capabilitiesToAdd = new ArrayList<>(); capabilitiesToAdd.add(Capability.INBOUND_CALLING); NumberUpdateCapabilities update = new NumberUpdateCapabilities(); update.setAdd(capabilitiesToAdd); Map<PhoneNumber, NumberUpdateCapabilities> updateMap = new HashMap<>(); updateMap.put(new PhoneNumber(PHONENUMBER_FOR_CAPABILITIES), update); Mono<Response<UpdateNumberCapabilitiesResponse>> mono = this.getClient(httpClient).updateCapabilitiesWithResponse(updateMap, Context.NONE); StepVerifier.create(mono) .assertNext(item -> { assertEquals(200, item.getStatusCode()); assertNotNull(item.getValue().getCapabilitiesUpdateId()); }) .verifyComplete(); } @ParameterizedTest @MethodSource("com.azure.core.test.TestBase public void getCapabilitiesUpdate(HttpClient httpClient) { Mono<UpdatePhoneNumberCapabilitiesResponse> mono = this.getClient(httpClient).getCapabilitiesUpdate(CAPABILITIES_ID); StepVerifier.create(mono) .assertNext(item -> { assertNotNull(item.getCapabilitiesUpdateId()); }) .verifyComplete(); } @ParameterizedTest @MethodSource("com.azure.core.test.TestBase public void getCapabilitiesUpdateWithResponse(HttpClient httpClient) { Mono<Response<UpdatePhoneNumberCapabilitiesResponse>> mono = this.getClient(httpClient).getCapabilitiesUpdateWithResponse(CAPABILITIES_ID, Context.NONE); StepVerifier.create(mono) .assertNext(item -> { assertEquals(200, item.getStatusCode()); assertNotNull(item.getValue().getCapabilitiesUpdateId()); }) .verifyComplete(); } @ParameterizedTest @MethodSource("com.azure.core.test.TestBase public void createSearch(HttpClient httpClient) { List<String> phonePlanIds = new ArrayList<>(); phonePlanIds.add(PHONE_PLAN_ID); CreateSearchOptions createSearchOptions = new CreateSearchOptions(); createSearchOptions .setAreaCode(AREA_CODE_FOR_SEARCH) .setDescription("testsearch20200014") .setDisplayName("testsearch20200014") .setPhonePlanIds(phonePlanIds) .setQuantity(1); Mono<CreateSearchResponse> mono = this.getClient(httpClient).createSearch(createSearchOptions); StepVerifier.create(mono) .assertNext(item -> { assertNotNull(item.getSearchId()); }) .verifyComplete(); } @ParameterizedTest @MethodSource("com.azure.core.test.TestBase public void createSearchWithResponse(HttpClient httpClient) { List<String> phonePlanIds = new ArrayList<>(); phonePlanIds.add(PHONE_PLAN_ID); CreateSearchOptions createSearchOptions = new CreateSearchOptions(); createSearchOptions .setAreaCode(AREA_CODE_FOR_SEARCH) .setDescription("testsearch20200014") .setDisplayName("testsearch20200014") .setPhonePlanIds(phonePlanIds) .setQuantity(1); Mono<Response<CreateSearchResponse>> mono = this.getClient(httpClient).createSearchWithResponse(createSearchOptions, Context.NONE); StepVerifier.create(mono) .assertNext(item -> { assertEquals(201, item.getStatusCode()); assertNotNull(item.getValue().getSearchId()); }) .verifyComplete(); } @ParameterizedTest @MethodSource("com.azure.core.test.TestBase public void getSearchById(HttpClient httpClient) { Mono<PhoneNumberSearch> mono = this.getClient(httpClient).getSearchById(SEARCH_ID); StepVerifier.create(mono) .assertNext(item -> { assertEquals(SEARCH_ID, item.getSearchId()); }) .verifyComplete(); } @ParameterizedTest @MethodSource("com.azure.core.test.TestBase public void getSearchByIdWithResponse(HttpClient httpClient) { Mono<Response<PhoneNumberSearch>> mono = this.getClient(httpClient).getSearchByIdWithResponse(SEARCH_ID, Context.NONE); StepVerifier.create(mono) .assertNext(item -> { assertEquals(200, item.getStatusCode()); assertEquals(SEARCH_ID, item.getValue().getSearchId()); }) .verifyComplete(); } @ParameterizedTest @MethodSource("com.azure.core.test.TestBase public void cancelSearch(HttpClient httpClient) { Mono<Void> mono = this.getClient(httpClient).cancelSearch(SEARCH_ID_TO_CANCEL); StepVerifier.create(mono).verifyComplete(); } @ParameterizedTest @MethodSource("com.azure.core.test.TestBase public void cancelSearchWithResponse(HttpClient httpClient) { Mono<Response<Void>> mono = this.getClient(httpClient).cancelSearchWithResponse(SEARCH_ID_TO_CANCEL, Context.NONE); StepVerifier.create(mono) .assertNext(item -> { assertEquals(202, item.getStatusCode()); }) .verifyComplete(); } @ParameterizedTest @MethodSource("com.azure.core.test.TestBase public void configureNumber(HttpClient httpClient) { PhoneNumber number = new PhoneNumber(PHONENUMBER_TO_CONFIGURE); PstnConfiguration pstnConfiguration = new PstnConfiguration(); pstnConfiguration.setApplicationId("ApplicationId"); pstnConfiguration.setCallbackUrl("https: Mono<Void> mono = this.getClient(httpClient).configureNumber(number, pstnConfiguration); StepVerifier.create(mono).verifyComplete(); } @ParameterizedTest @MethodSource("com.azure.core.test.TestBase public void configureNumberWithResponse(HttpClient httpClient) { PhoneNumber number = new PhoneNumber(PHONENUMBER_TO_CONFIGURE); PstnConfiguration pstnConfiguration = new PstnConfiguration(); pstnConfiguration.setApplicationId("ApplicationId"); pstnConfiguration.setCallbackUrl("https: Mono<Response<Void>> mono = this.getClient(httpClient).configureNumberWithResponse(number, pstnConfiguration, Context.NONE); StepVerifier.create(mono) .assertNext(item -> { assertEquals(200, item.getStatusCode()); }) .verifyComplete(); } @ParameterizedTest @MethodSource("com.azure.core.test.TestBase public void getNumberConfiguration(HttpClient httpClient) { PhoneNumber number = new PhoneNumber(PHONENUMBER_TO_GET_CONFIG); Mono<NumberConfigurationResponse> mono = this.getClient(httpClient).getNumberConfiguration(number); StepVerifier.create(mono) .assertNext(item -> { assertEquals("ApplicationId", item.getPstnConfiguration().getApplicationId()); }) .verifyComplete(); } @ParameterizedTest @MethodSource("com.azure.core.test.TestBase public void getNumberConfigurationWithResponse(HttpClient httpClient) { PhoneNumber number = new PhoneNumber(PHONENUMBER_TO_GET_CONFIG); Mono<Response<NumberConfigurationResponse>> mono = this.getClient(httpClient).getNumberConfigurationWithResponse(number, Context.NONE); StepVerifier.create(mono) .assertNext(item -> { assertEquals(200, item.getStatusCode()); assertEquals("ApplicationId", item.getValue().getPstnConfiguration().getApplicationId()); }) .verifyComplete(); } @ParameterizedTest @MethodSource("com.azure.core.test.TestBase public void unconfigureNumber(HttpClient httpClient) { PhoneNumber number = new PhoneNumber(PHONENUMBER_TO_UNCONFIGURE); Mono<Void> mono = this.getClient(httpClient).unconfigureNumber(number); StepVerifier.create(mono).verifyComplete(); } @ParameterizedTest @MethodSource("com.azure.core.test.TestBase public void unconfigureNumberWithResponse(HttpClient httpClient) { PhoneNumber number = new PhoneNumber(PHONENUMBER_TO_UNCONFIGURE); Mono<Response<Void>> mono = this.getClient(httpClient).unconfigureNumberWithResponse(number, Context.NONE); StepVerifier.create(mono) .assertNext(item -> { assertEquals(200, item.getStatusCode()); }) .verifyComplete(); } @ParameterizedTest @MethodSource("com.azure.core.test.TestBase public void beginCreateSearch(HttpClient httpClient) { List<String> phonePlanIds = new ArrayList<>(); phonePlanIds.add(PHONE_PLAN_ID); CreateSearchOptions createSearchOptions = new CreateSearchOptions(); createSearchOptions .setAreaCode(AREA_CODE_FOR_SEARCH) .setDescription(SEARCH_OPTIONS_DESCRIPTION) .setDisplayName(SEARCH_OPTIONS_NAME) .setPhonePlanIds(phonePlanIds) .setQuantity(2); Duration duration = Duration.ofSeconds(1); PhoneNumberAsyncClient client = this.getClient(httpClient); PollerFlux<PhoneNumberSearch, PhoneNumberSearch> poller = client.beginCreateSearch(createSearchOptions, duration); AsyncPollResponse<PhoneNumberSearch, PhoneNumberSearch> asyncRes = poller.takeUntil(apr -> apr.getStatus() == LongRunningOperationStatus.SUCCESSFULLY_COMPLETED) .blockLast(); PhoneNumberSearch testResult = asyncRes.getValue(); assertEquals(testResult.getPhoneNumbers().size(), 2); assertNotNull(testResult.getSearchId()); } @ParameterizedTest @MethodSource("com.azure.core.test.TestBase public void beginPurchaseSearch(HttpClient httpClient) { Duration pollInterval = Duration.ofSeconds(1); PhoneNumberAsyncClient client = this.getClient(httpClient); PollerFlux<Void, Void> poller = client.beginPurchaseSearch(SEARCH_ID, pollInterval); poller.takeUntil(apr -> apr.getStatus() == LongRunningOperationStatus.SUCCESSFULLY_COMPLETED) .blockLast(); Mono<PhoneNumberSearch> testResult = client.getSearchById(SEARCH_ID); StepVerifier.create(testResult) .assertNext(item -> { assertEquals(SearchStatus.SUCCESS, item.getStatus()); }) .verifyComplete(); } @ParameterizedTest @MethodSource("com.azure.core.test.TestBase private PhoneNumberAsyncClient getClient(HttpClient httpClient) { return super.getClientBuilder(httpClient).buildAsyncClient(); } }
class PhoneNumberAsyncClientIntegrationTest extends PhoneNumberIntegrationTestBase { @ParameterizedTest @MethodSource("com.azure.core.test.TestBase public void createAsyncPhoneNumberClientWithConnectionString(HttpClient httpClient) { PhoneNumberAsyncClient phoneNumberAsyncClient = getClientBuilderWithConnectionString(httpClient).buildAsyncClient(); assertNotNull(phoneNumberAsyncClient); PagedFlux<AcquiredPhoneNumber> pagedFlux = phoneNumberAsyncClient.listAllPhoneNumbers(LOCALE); StepVerifier.create(pagedFlux.next()) .assertNext(item -> { assertNotNull(item.getPhoneNumber()); }) .verifyComplete(); } @ParameterizedTest @MethodSource("com.azure.core.test.TestBase public void listAllPhoneNumbers(HttpClient httpClient) { PagedFlux<AcquiredPhoneNumber> pagedFlux = this.getClient(httpClient).listAllPhoneNumbers(LOCALE); StepVerifier.create(pagedFlux.next()) .assertNext(item -> { assertNotNull(item.getPhoneNumber()); }) .verifyComplete(); } @ParameterizedTest @MethodSource("com.azure.core.test.TestBase public void listPhonePlanGroups(HttpClient httpClient) { PagedFlux<PhonePlanGroup> pagedFlux = this.getClient(httpClient).listPhonePlanGroups(COUNTRY_CODE, LOCALE, true); StepVerifier.create(pagedFlux.next()) .assertNext(item -> { assertNotNull(item.getPhonePlanGroupId()); }) .verifyComplete(); } @ParameterizedTest @MethodSource("com.azure.core.test.TestBase public void listPhonePlans(HttpClient httpClient) { PagedFlux<PhonePlan> pagedFlux = this.getClient(httpClient).listPhonePlans(COUNTRY_CODE, PHONE_PLAN_GROUP_ID, LOCALE); StepVerifier.create(pagedFlux.next()) .assertNext(item -> { assertNotNull(item.getPhonePlanId()); }) .verifyComplete(); } @ParameterizedTest @MethodSource("com.azure.core.test.TestBase public void listAllReleases(HttpClient httpClient) { PagedFlux<PhoneNumberEntity> pagedFlux = this.getClient(httpClient).listAllReleases(); StepVerifier.create(pagedFlux.next()) .assertNext(item -> { assertNotNull(item.getId()); }) .verifyComplete(); } @ParameterizedTest @MethodSource("com.azure.core.test.TestBase public void listAllSearches(HttpClient httpClient) { PagedFlux<PhoneNumberEntity> pagedFlux = this.getClient(httpClient).listAllSearches(); StepVerifier.create(pagedFlux.next()) .assertNext(item -> { assertNotNull(item.getId()); }) .verifyComplete(); } @ParameterizedTest @MethodSource("com.azure.core.test.TestBase public void listAllSupportedCountries(HttpClient httpClient) { PagedFlux<PhoneNumberCountry> pagedFlux = this.getClient(httpClient).listAllSupportedCountries(LOCALE); StepVerifier.create(pagedFlux.next()) .assertNext(item -> { assertNotNull(item.getCountryCode()); }) .verifyComplete(); } @ParameterizedTest @MethodSource("com.azure.core.test.TestBase public void getPhonePlanLocationOptions(HttpClient httpClient) { Mono<LocationOptionsResponse> mono = this.getClient(httpClient).getPhonePlanLocationOptions(COUNTRY_CODE, PHONE_PLAN_GROUP_ID, PHONE_PLAN_ID, LOCALE); StepVerifier.create(mono) .assertNext(item -> { assertNotNull(item.getLocationOptions().getLabelId()); }) .verifyComplete(); } @ParameterizedTest @MethodSource("com.azure.core.test.TestBase public void getAllAreaCodes(HttpClient httpClient) { List<LocationOptionsQuery> locationOptions = new ArrayList<>(); LocationOptionsQuery query = new LocationOptionsQuery(); query.setLabelId("state"); query.setOptionsValue(LOCATION_OPTION_STATE); locationOptions.add(query); query = new LocationOptionsQuery(); query.setLabelId("city"); query.setOptionsValue(LOCATION_OPTION_CITY); locationOptions.add(query); Mono<AreaCodes> mono = this.getClient(httpClient).getAllAreaCodes("selection", COUNTRY_CODE, PHONE_PLAN_ID, locationOptions); StepVerifier.create(mono) .assertNext(item -> { assertTrue(item.getPrimaryAreaCodes().size() > 0); }) .verifyComplete(); } @ParameterizedTest @MethodSource("com.azure.core.test.TestBase public void getAllAreaCodesWithResponse(HttpClient httpClient) { List<LocationOptionsQuery> locationOptions = new ArrayList<>(); LocationOptionsQuery query = new LocationOptionsQuery(); query.setLabelId("state"); query.setOptionsValue(LOCATION_OPTION_STATE); locationOptions.add(query); query = new LocationOptionsQuery(); query.setLabelId("city"); query.setOptionsValue(LOCATION_OPTION_CITY); locationOptions.add(query); Mono<Response<AreaCodes>> mono = this.getClient(httpClient).getAllAreaCodesWithResponse( "selection", COUNTRY_CODE, PHONE_PLAN_ID, locationOptions, Context.NONE); StepVerifier.create(mono) .assertNext(item -> { assertEquals(200, item.getStatusCode()); assertTrue(item.getValue().getPrimaryAreaCodes().size() > 0); }) .verifyComplete(); } @ParameterizedTest @MethodSource("com.azure.core.test.TestBase public void updateCapabilities(HttpClient httpClient) { List<Capability> capabilitiesToAdd = new ArrayList<>(); capabilitiesToAdd.add(Capability.INBOUND_CALLING); NumberUpdateCapabilities update = new NumberUpdateCapabilities(); update.setAdd(capabilitiesToAdd); Map<PhoneNumber, NumberUpdateCapabilities> updateMap = new HashMap<>(); updateMap.put(new PhoneNumber(PHONENUMBER_FOR_CAPABILITIES), update); Mono<UpdateNumberCapabilitiesResponse> mono = this.getClient(httpClient).updateCapabilities(updateMap); StepVerifier.create(mono) .assertNext(item -> { assertNotNull(item.getCapabilitiesUpdateId()); }) .verifyComplete(); } @ParameterizedTest @MethodSource("com.azure.core.test.TestBase public void updateCapabilitiesWithResponse(HttpClient httpClient) { List<Capability> capabilitiesToAdd = new ArrayList<>(); capabilitiesToAdd.add(Capability.INBOUND_CALLING); NumberUpdateCapabilities update = new NumberUpdateCapabilities(); update.setAdd(capabilitiesToAdd); Map<PhoneNumber, NumberUpdateCapabilities> updateMap = new HashMap<>(); updateMap.put(new PhoneNumber(PHONENUMBER_FOR_CAPABILITIES), update); Mono<Response<UpdateNumberCapabilitiesResponse>> mono = this.getClient(httpClient).updateCapabilitiesWithResponse(updateMap, Context.NONE); StepVerifier.create(mono) .assertNext(item -> { assertEquals(200, item.getStatusCode()); assertNotNull(item.getValue().getCapabilitiesUpdateId()); }) .verifyComplete(); } @ParameterizedTest @MethodSource("com.azure.core.test.TestBase public void getCapabilitiesUpdate(HttpClient httpClient) { Mono<UpdatePhoneNumberCapabilitiesResponse> mono = this.getClient(httpClient).getCapabilitiesUpdate(CAPABILITIES_ID); StepVerifier.create(mono) .assertNext(item -> { assertNotNull(item.getCapabilitiesUpdateId()); }) .verifyComplete(); } @ParameterizedTest @MethodSource("com.azure.core.test.TestBase public void getCapabilitiesUpdateWithResponse(HttpClient httpClient) { Mono<Response<UpdatePhoneNumberCapabilitiesResponse>> mono = this.getClient(httpClient).getCapabilitiesUpdateWithResponse(CAPABILITIES_ID, Context.NONE); StepVerifier.create(mono) .assertNext(item -> { assertEquals(200, item.getStatusCode()); assertNotNull(item.getValue().getCapabilitiesUpdateId()); }) .verifyComplete(); } @ParameterizedTest @MethodSource("com.azure.core.test.TestBase public void createSearch(HttpClient httpClient) { List<String> phonePlanIds = new ArrayList<>(); phonePlanIds.add(PHONE_PLAN_ID); CreateSearchOptions createSearchOptions = new CreateSearchOptions(); createSearchOptions .setAreaCode(AREA_CODE_FOR_SEARCH) .setDescription("testsearch20200014") .setDisplayName("testsearch20200014") .setPhonePlanIds(phonePlanIds) .setQuantity(1); Mono<CreateSearchResponse> mono = this.getClient(httpClient).createSearch(createSearchOptions); StepVerifier.create(mono) .assertNext(item -> { assertNotNull(item.getSearchId()); }) .verifyComplete(); } @ParameterizedTest @MethodSource("com.azure.core.test.TestBase public void createSearchWithResponse(HttpClient httpClient) { List<String> phonePlanIds = new ArrayList<>(); phonePlanIds.add(PHONE_PLAN_ID); CreateSearchOptions createSearchOptions = new CreateSearchOptions(); createSearchOptions .setAreaCode(AREA_CODE_FOR_SEARCH) .setDescription("testsearch20200014") .setDisplayName("testsearch20200014") .setPhonePlanIds(phonePlanIds) .setQuantity(1); Mono<Response<CreateSearchResponse>> mono = this.getClient(httpClient).createSearchWithResponse(createSearchOptions, Context.NONE); StepVerifier.create(mono) .assertNext(item -> { assertEquals(201, item.getStatusCode()); assertNotNull(item.getValue().getSearchId()); }) .verifyComplete(); } @ParameterizedTest @MethodSource("com.azure.core.test.TestBase public void getSearchById(HttpClient httpClient) { Mono<PhoneNumberSearch> mono = this.getClient(httpClient).getSearchById(SEARCH_ID); StepVerifier.create(mono) .assertNext(item -> { assertEquals(SEARCH_ID, item.getSearchId()); }) .verifyComplete(); } @ParameterizedTest @MethodSource("com.azure.core.test.TestBase public void getSearchByIdWithResponse(HttpClient httpClient) { Mono<Response<PhoneNumberSearch>> mono = this.getClient(httpClient).getSearchByIdWithResponse(SEARCH_ID, Context.NONE); StepVerifier.create(mono) .assertNext(item -> { assertEquals(200, item.getStatusCode()); assertEquals(SEARCH_ID, item.getValue().getSearchId()); }) .verifyComplete(); } @ParameterizedTest @MethodSource("com.azure.core.test.TestBase public void cancelSearch(HttpClient httpClient) { Mono<Void> mono = this.getClient(httpClient).cancelSearch(SEARCH_ID_TO_CANCEL); StepVerifier.create(mono).verifyComplete(); } @ParameterizedTest @MethodSource("com.azure.core.test.TestBase public void cancelSearchWithResponse(HttpClient httpClient) { Mono<Response<Void>> mono = this.getClient(httpClient).cancelSearchWithResponse(SEARCH_ID_TO_CANCEL, Context.NONE); StepVerifier.create(mono) .assertNext(item -> { assertEquals(202, item.getStatusCode()); }) .verifyComplete(); } @ParameterizedTest @MethodSource("com.azure.core.test.TestBase public void configureNumber(HttpClient httpClient) { PhoneNumber number = new PhoneNumber(PHONENUMBER_TO_CONFIGURE); PstnConfiguration pstnConfiguration = new PstnConfiguration(); pstnConfiguration.setApplicationId("ApplicationId"); pstnConfiguration.setCallbackUrl("https: Mono<Void> mono = this.getClient(httpClient).configureNumber(number, pstnConfiguration); StepVerifier.create(mono).verifyComplete(); } @ParameterizedTest @MethodSource("com.azure.core.test.TestBase public void configureNumberWithResponse(HttpClient httpClient) { PhoneNumber number = new PhoneNumber(PHONENUMBER_TO_CONFIGURE); PstnConfiguration pstnConfiguration = new PstnConfiguration(); pstnConfiguration.setApplicationId("ApplicationId"); pstnConfiguration.setCallbackUrl("https: Mono<Response<Void>> mono = this.getClient(httpClient).configureNumberWithResponse(number, pstnConfiguration, Context.NONE); StepVerifier.create(mono) .assertNext(item -> { assertEquals(200, item.getStatusCode()); }) .verifyComplete(); } @ParameterizedTest @MethodSource("com.azure.core.test.TestBase public void getNumberConfiguration(HttpClient httpClient) { PhoneNumber number = new PhoneNumber(PHONENUMBER_TO_GET_CONFIG); Mono<NumberConfigurationResponse> mono = this.getClient(httpClient).getNumberConfiguration(number); StepVerifier.create(mono) .assertNext(item -> { assertEquals("ApplicationId", item.getPstnConfiguration().getApplicationId()); }) .verifyComplete(); } @ParameterizedTest @MethodSource("com.azure.core.test.TestBase public void getNumberConfigurationWithResponse(HttpClient httpClient) { PhoneNumber number = new PhoneNumber(PHONENUMBER_TO_GET_CONFIG); Mono<Response<NumberConfigurationResponse>> mono = this.getClient(httpClient).getNumberConfigurationWithResponse(number, Context.NONE); StepVerifier.create(mono) .assertNext(item -> { assertEquals(200, item.getStatusCode()); assertEquals("ApplicationId", item.getValue().getPstnConfiguration().getApplicationId()); }) .verifyComplete(); } @ParameterizedTest @MethodSource("com.azure.core.test.TestBase public void unconfigureNumber(HttpClient httpClient) { PhoneNumber number = new PhoneNumber(PHONENUMBER_TO_UNCONFIGURE); Mono<Void> mono = this.getClient(httpClient).unconfigureNumber(number); StepVerifier.create(mono).verifyComplete(); } @ParameterizedTest @MethodSource("com.azure.core.test.TestBase public void unconfigureNumberWithResponse(HttpClient httpClient) { PhoneNumber number = new PhoneNumber(PHONENUMBER_TO_UNCONFIGURE); Mono<Response<Void>> mono = this.getClient(httpClient).unconfigureNumberWithResponse(number, Context.NONE); StepVerifier.create(mono) .assertNext(item -> { assertEquals(200, item.getStatusCode()); }) .verifyComplete(); } @ParameterizedTest @MethodSource("com.azure.core.test.TestBase public void beginCreateSearch(HttpClient httpClient) { List<String> phonePlanIds = new ArrayList<>(); phonePlanIds.add(PHONE_PLAN_ID); CreateSearchOptions createSearchOptions = new CreateSearchOptions(); createSearchOptions .setAreaCode(AREA_CODE_FOR_SEARCH) .setDescription(SEARCH_OPTIONS_DESCRIPTION) .setDisplayName(SEARCH_OPTIONS_NAME) .setPhonePlanIds(phonePlanIds) .setQuantity(2); Duration duration = Duration.ofSeconds(1); PhoneNumberAsyncClient client = this.getClient(httpClient); PollerFlux<PhoneNumberSearch, PhoneNumberSearch> poller = client.beginCreateSearch(createSearchOptions, duration); Mono<AsyncPollResponse<PhoneNumberSearch, PhoneNumberSearch>> asyncRes = poller.last(); StepVerifier.create(asyncRes) .assertNext(item -> { assertEquals(item.getValue().getPhoneNumbers().size(), 2); assertNotNull(item.getValue().getSearchId()); }) .verifyComplete(); } @ParameterizedTest @MethodSource("com.azure.core.test.TestBase public void beginPurchaseSearch(HttpClient httpClient) { Duration pollInterval = Duration.ofSeconds(1); PhoneNumberAsyncClient client = this.getClient(httpClient); PollerFlux<Void, Void> poller = client.beginPurchaseSearch(SEARCH_ID, pollInterval); poller.takeUntil(apr -> apr.getStatus() == LongRunningOperationStatus.SUCCESSFULLY_COMPLETED) .blockLast(); Mono<PhoneNumberSearch> testResult = client.getSearchById(SEARCH_ID); StepVerifier.create(testResult) .assertNext(item -> { assertEquals(SearchStatus.SUCCESS, item.getStatus()); }) .verifyComplete(); } @ParameterizedTest @MethodSource("com.azure.core.test.TestBase private PhoneNumberAsyncClient getClient(HttpClient httpClient) { return super.getClientBuilder(httpClient).buildAsyncClient(); } }
why aren't we capturing the exception stacktace?
public static void recordValue(DoubleHistogram doubleHistogram, long value) { try { doubleHistogram.recordValue(value); } catch (Exception ex) { logger.warn("Error while recording value for client telemetry", ex.getMessage()); } }
logger.warn("Error while recording value for client telemetry", ex.getMessage());
public static void recordValue(DoubleHistogram doubleHistogram, long value) { try { doubleHistogram.recordValue(value); } catch (Exception ex) { logger.warn("Error while recording value for client telemetry. ", ex); } }
class ClientTelemetry { public final static int REQUEST_LATENCY_MAX = 300000000; public final static int REQUEST_LATENCY_SUCCESS_PRECISION = 4; public final static int REQUEST_LATENCY_FAILURE_PRECISION = 2; public final static String REQUEST_LATENCY_NAME = "RequestLatency"; public final static String REQUEST_LATENCY_UNIT = "MicroSec"; public final static int REQUEST_CHARGE_MAX = 10000; public final static int REQUEST_CHARGE_PRECISION = 2; public final static String REQUEST_CHARGE_NAME = "RequestCharge"; public final static String REQUEST_CHARGE_UNIT = "RU"; public final static int CPU_MAX = 100; public final static int CPU_PRECISION = 2; private final static String CPU_NAME = "CPU"; private final static String CPU_UNIT = "Percentage"; public final static int MEMORY_MAX = 102400; public final static int MEMORY_PRECISION = 2; private final static String MEMORY_NAME = "MemoryRemaining"; private final static String MEMORY_UNIT = "MB"; private static final ObjectMapper OBJECT_MAPPER = new ObjectMapper(); private ClientTelemetryInfo clientTelemetryInfo; private HttpClient httpClient; private final ExecutorService executor = Executors.newSingleThreadExecutor(); private final Scheduler scheduler = Schedulers.fromExecutor(executor); private static final Logger logger = LoggerFactory.getLogger(GlobalEndpointManager.class); private volatile boolean isClosed; private volatile boolean isClientTelemetryEnabled; private static String AZURE_VM_METADATA = "http: private static final double PERCENTILE_50 = 50.0; private static final double PERCENTILE_90 = 90.0; private static final double PERCENTILE_95 = 95.0; private static final double PERCENTILE_99 = 99.0; private static final double PERCENTILE_999 = 99.9; private final int clientTelemetrySchedulingSec; public ClientTelemetry(Boolean acceleratedNetworking, String clientId, String processId, String userAgent, ConnectionMode connectionMode, String globalDatabaseAccountName, String applicationRegion, String hostEnvInfo, HttpClient httpClient, boolean isClientTelemetryEnabled ) { clientTelemetryInfo = new ClientTelemetryInfo(clientId, processId, userAgent, connectionMode, globalDatabaseAccountName, applicationRegion, hostEnvInfo, acceleratedNetworking); this.isClosed = false; this.httpClient = httpClient; this.isClientTelemetryEnabled = isClientTelemetryEnabled; this.clientTelemetrySchedulingSec = Configs.getClientTelemetrySchedulingInSec(); } public ClientTelemetryInfo getClientTelemetryInfo() { return clientTelemetryInfo; } public static void recordValue(DoubleHistogram doubleHistogram, double value) { try { doubleHistogram.recordValue(value); } catch (Exception ex) { logger.warn("Error while recording value for client telemetry", ex.getMessage()); } } public void init() { loadAzureVmMetaData(); sendClientTelemetry().subscribe(); } public void close() { this.isClosed = true; this.executor.shutdown(); logger.debug("GlobalEndpointManager closed."); } private Mono<Void> sendClientTelemetry() { return Mono.delay(Duration.ofSeconds(clientTelemetrySchedulingSec)) .flatMap(t -> { if (this.isClosed) { logger.warn("client already closed"); return Mono.empty(); } if(!Configs.isClientTelemetryEnabled(this.isClientTelemetryEnabled)) { logger.trace("client telemetry not enabled"); return Mono.empty(); } readHistogram(); try { logger.info("ClientTelemetry {}", OBJECT_MAPPER.writeValueAsString(this.clientTelemetryInfo)); } catch (JsonProcessingException e) { e.printStackTrace(); } clearDataForNextRun(); return this.sendClientTelemetry(); }).onErrorResume(ex -> { logger.error("sendClientTelemetry() - Unable to send client telemetry" + ". Exception: {}", ex.toString(), ex); clearDataForNextRun(); return this.sendClientTelemetry(); }).subscribeOn(scheduler); } private void loadAzureVmMetaData() { URI targetEndpoint = null; try { targetEndpoint = new URI(AZURE_VM_METADATA); } catch (URISyntaxException ex) { logger.info("Unable to parse azure vm metadata url"); return; } HashMap<String, String> headers = new HashMap<>(); headers.put("Metadata", "true"); HttpHeaders httpHeaders = new HttpHeaders(headers); HttpRequest httpRequest = new HttpRequest(HttpMethod.GET, targetEndpoint, targetEndpoint.getPort(), httpHeaders); Mono<HttpResponse> httpResponseMono = this.httpClient.send(httpRequest); httpResponseMono.flatMap(response -> response.bodyAsString()).map(metadataJson -> Utils.parse(metadataJson, AzureVMMetadata.class)).doOnSuccess(azureVMMetadata -> { this.clientTelemetryInfo.setApplicationRegion(azureVMMetadata.getLocation()); }).onErrorResume(throwable -> { logger.info("Unable to get azure vm metadata"); return Mono.empty(); }).subscribe(); } private void clearDataForNextRun() { this.clientTelemetryInfo.getOperationInfoMap().clear(); this.clientTelemetryInfo.getCacheRefreshInfoMap().clear(); for (DoubleHistogram histogram : this.clientTelemetryInfo.getSystemInfoMap().values()) { histogram.reset(); } } private void readHistogram() { DoubleHistogram cpuHistogram = new DoubleHistogram(ClientTelemetry.CPU_MAX, ClientTelemetry.CPU_PRECISION); for(double val : CpuMemoryMonitor.getClientTelemetryCpuLatestList()) { recordValue(cpuHistogram, val); } ReportPayload cpuReportPayload = new ReportPayload(CPU_NAME, CPU_UNIT); clientTelemetryInfo.getSystemInfoMap().put(cpuReportPayload, cpuHistogram); DoubleHistogram memoryHistogram = new DoubleHistogram(ClientTelemetry.MEMORY_MAX, ClientTelemetry.MEMORY_PRECISION); for(double val : CpuMemoryMonitor.getClientTelemetryMemoryLatestList()) { recordValue(memoryHistogram, val); } ReportPayload memoryReportPayload = new ReportPayload(MEMORY_NAME, MEMORY_UNIT); clientTelemetryInfo.getSystemInfoMap().put(memoryReportPayload, memoryHistogram); this.clientTelemetryInfo.setTimeStamp(Instant.now().toString()); for (Map.Entry<ReportPayload, DoubleHistogram> entry : this.clientTelemetryInfo.getSystemInfoMap().entrySet()) { fillMetricsInfo(entry.getKey(), entry.getValue()); } for (Map.Entry<ReportPayload, DoubleHistogram> entry : this.clientTelemetryInfo.getCacheRefreshInfoMap().entrySet()) { fillMetricsInfo(entry.getKey(), entry.getValue()); } for (Map.Entry<ReportPayload, DoubleHistogram> entry : this.clientTelemetryInfo.getOperationInfoMap().entrySet()) { fillMetricsInfo(entry.getKey(), entry.getValue()); } } private void fillMetricsInfo(ReportPayload payload, DoubleHistogram histogram) { payload.getMetricInfo().setCount(histogram.getTotalCount()); payload.getMetricInfo().setMax(histogram.getMaxValue()); payload.getMetricInfo().setMin(histogram.getMinValue()); payload.getMetricInfo().setMean(histogram.getMean()); Map<Double, Double> percentile = new HashMap<>(); percentile.put(PERCENTILE_50, histogram.getValueAtPercentile(PERCENTILE_50)); percentile.put(PERCENTILE_90, histogram.getValueAtPercentile(PERCENTILE_90)); percentile.put(PERCENTILE_95, histogram.getValueAtPercentile(PERCENTILE_95)); percentile.put(PERCENTILE_99, histogram.getValueAtPercentile(PERCENTILE_99)); percentile.put(PERCENTILE_999, histogram.getValueAtPercentile(PERCENTILE_999)); payload.getMetricInfo().setPercentiles(percentile); } }
class ClientTelemetry { public final static int ONE_KB_TO_BYTES = 1024; public final static int REQUEST_LATENCY_MAX_MICRO_SEC = 300000000; public final static int REQUEST_LATENCY_SUCCESS_PRECISION = 4; public final static int REQUEST_LATENCY_FAILURE_PRECISION = 2; public final static String REQUEST_LATENCY_NAME = "RequestLatency"; public final static String REQUEST_LATENCY_UNIT = "MicroSec"; public final static int REQUEST_CHARGE_MAX = 10000; public final static int REQUEST_CHARGE_PRECISION = 2; public final static String REQUEST_CHARGE_NAME = "RequestCharge"; public final static String REQUEST_CHARGE_UNIT = "RU"; public final static int CPU_MAX = 100; public final static int CPU_PRECISION = 2; private final static String CPU_NAME = "CPU"; private final static String CPU_UNIT = "Percentage"; public final static int MEMORY_MAX_IN_MB = 102400; public final static int MEMORY_PRECISION = 2; private final static String MEMORY_NAME = "MemoryRemaining"; private final static String MEMORY_UNIT = "MB"; private static final ObjectMapper OBJECT_MAPPER = new ObjectMapper(); private ClientTelemetryInfo clientTelemetryInfo; private HttpClient httpClient; private final ScheduledThreadPoolExecutor scheduledExecutorService = new ScheduledThreadPoolExecutor(1, new DaemonThreadFactory()); private final Scheduler scheduler = Schedulers.fromExecutor(scheduledExecutorService); private static final Logger logger = LoggerFactory.getLogger(GlobalEndpointManager.class); private volatile boolean isClosed; private volatile boolean isClientTelemetryEnabled; private static String AZURE_VM_METADATA = "http: private static final double PERCENTILE_50 = 50.0; private static final double PERCENTILE_90 = 90.0; private static final double PERCENTILE_95 = 95.0; private static final double PERCENTILE_99 = 99.0; private static final double PERCENTILE_999 = 99.9; private final int clientTelemetrySchedulingSec; public ClientTelemetry(Boolean acceleratedNetworking, String clientId, String processId, String userAgent, ConnectionMode connectionMode, String globalDatabaseAccountName, String applicationRegion, String hostEnvInfo, HttpClient httpClient, boolean isClientTelemetryEnabled ) { clientTelemetryInfo = new ClientTelemetryInfo(clientId, processId, userAgent, connectionMode, globalDatabaseAccountName, applicationRegion, hostEnvInfo, acceleratedNetworking); this.isClosed = false; this.httpClient = httpClient; this.isClientTelemetryEnabled = isClientTelemetryEnabled; this.clientTelemetrySchedulingSec = Configs.getClientTelemetrySchedulingInSec(); } public ClientTelemetryInfo getClientTelemetryInfo() { return clientTelemetryInfo; } public static void recordValue(DoubleHistogram doubleHistogram, double value) { try { doubleHistogram.recordValue(value); } catch (Exception ex) { logger.warn("Error while recording value for client telemetry. ", ex); } } public void init() { loadAzureVmMetaData(); sendClientTelemetry().subscribe(); } public void close() { this.isClosed = true; this.scheduledExecutorService.shutdown(); logger.debug("GlobalEndpointManager closed."); } private Mono<Void> sendClientTelemetry() { return Mono.delay(Duration.ofSeconds(clientTelemetrySchedulingSec)) .flatMap(t -> { if (this.isClosed) { logger.warn("client already closed"); return Mono.empty(); } if(!Configs.isClientTelemetryEnabled(this.isClientTelemetryEnabled)) { logger.trace("client telemetry not enabled"); return Mono.empty(); } readHistogram(); try { logger.info("ClientTelemetry {}", OBJECT_MAPPER.writeValueAsString(this.clientTelemetryInfo)); } catch (JsonProcessingException e) { logger.error("Error which parsing client telemetry into json. ", e); } clearDataForNextRun(); return this.sendClientTelemetry(); }).onErrorResume(ex -> { logger.error("sendClientTelemetry() - Unable to send client telemetry" + ". Exception: ", ex); clearDataForNextRun(); return this.sendClientTelemetry(); }).subscribeOn(scheduler); } private void loadAzureVmMetaData() { URI targetEndpoint = null; try { targetEndpoint = new URI(AZURE_VM_METADATA); } catch (URISyntaxException ex) { logger.info("Unable to parse azure vm metadata url"); return; } HashMap<String, String> headers = new HashMap<>(); headers.put("Metadata", "true"); HttpHeaders httpHeaders = new HttpHeaders(headers); HttpRequest httpRequest = new HttpRequest(HttpMethod.GET, targetEndpoint, targetEndpoint.getPort(), httpHeaders); Mono<HttpResponse> httpResponseMono = this.httpClient.send(httpRequest); httpResponseMono.flatMap(response -> response.bodyAsString()).map(metadataJson -> Utils.parse(metadataJson, AzureVMMetadata.class)).doOnSuccess(azureVMMetadata -> { this.clientTelemetryInfo.setApplicationRegion(azureVMMetadata.getLocation()); this.clientTelemetryInfo.setHostEnvInfo(azureVMMetadata.getOsType() + "|" + azureVMMetadata.getSku() + "|" + azureVMMetadata.getVmSize() + "|" + azureVMMetadata.getAzEnvironment()); }).onErrorResume(throwable -> { logger.info("Unable to get azure vm metadata"); return Mono.empty(); }).subscribe(); } private void clearDataForNextRun() { this.clientTelemetryInfo.getOperationInfoMap().clear(); this.clientTelemetryInfo.getCacheRefreshInfoMap().clear(); for (DoubleHistogram histogram : this.clientTelemetryInfo.getSystemInfoMap().values()) { histogram.reset(); } } private void readHistogram() { ConcurrentDoubleHistogram cpuHistogram = new ConcurrentDoubleHistogram(ClientTelemetry.CPU_MAX, ClientTelemetry.CPU_PRECISION); cpuHistogram.setAutoResize(true); for(double val : CpuMemoryMonitor.getClientTelemetryCpuLatestList()) { recordValue(cpuHistogram, val); } ReportPayload cpuReportPayload = new ReportPayload(CPU_NAME, CPU_UNIT); clientTelemetryInfo.getSystemInfoMap().put(cpuReportPayload, cpuHistogram); ConcurrentDoubleHistogram memoryHistogram = new ConcurrentDoubleHistogram(ClientTelemetry.MEMORY_MAX_IN_MB, ClientTelemetry.MEMORY_PRECISION); memoryHistogram.setAutoResize(true); for(double val : CpuMemoryMonitor.getClientTelemetryMemoryLatestList()) { recordValue(memoryHistogram, val); } ReportPayload memoryReportPayload = new ReportPayload(MEMORY_NAME, MEMORY_UNIT); clientTelemetryInfo.getSystemInfoMap().put(memoryReportPayload, memoryHistogram); this.clientTelemetryInfo.setTimeStamp(Instant.now().toString()); for (Map.Entry<ReportPayload, ConcurrentDoubleHistogram> entry : this.clientTelemetryInfo.getSystemInfoMap().entrySet()) { fillMetricsInfo(entry.getKey(), entry.getValue()); } for (Map.Entry<ReportPayload, ConcurrentDoubleHistogram> entry : this.clientTelemetryInfo.getCacheRefreshInfoMap().entrySet()) { fillMetricsInfo(entry.getKey(), entry.getValue()); } for (Map.Entry<ReportPayload, ConcurrentDoubleHistogram> entry : this.clientTelemetryInfo.getOperationInfoMap().entrySet()) { fillMetricsInfo(entry.getKey(), entry.getValue()); } } private void fillMetricsInfo(ReportPayload payload, DoubleHistogram histogram) { payload.getMetricInfo().setCount(histogram.getTotalCount()); payload.getMetricInfo().setMax(histogram.getMaxValue()); payload.getMetricInfo().setMin(histogram.getMinValue()); payload.getMetricInfo().setMean(histogram.getMean()); Map<Double, Double> percentile = new HashMap<>(); percentile.put(PERCENTILE_50, histogram.getValueAtPercentile(PERCENTILE_50)); percentile.put(PERCENTILE_90, histogram.getValueAtPercentile(PERCENTILE_90)); percentile.put(PERCENTILE_95, histogram.getValueAtPercentile(PERCENTILE_95)); percentile.put(PERCENTILE_99, histogram.getValueAtPercentile(PERCENTILE_99)); percentile.put(PERCENTILE_999, histogram.getValueAtPercentile(PERCENTILE_999)); payload.getMetricInfo().setPercentiles(percentile); } private static class DaemonThreadFactory implements ThreadFactory { @Override public Thread newThread(Runnable r) { Thread t = new Thread(r); t.setDaemon(true); return t; } } }
ditto
public static void recordValue(DoubleHistogram doubleHistogram, double value) { try { doubleHistogram.recordValue(value); } catch (Exception ex) { logger.warn("Error while recording value for client telemetry", ex.getMessage()); } }
logger.warn("Error while recording value for client telemetry", ex.getMessage());
public static void recordValue(DoubleHistogram doubleHistogram, double value) { try { doubleHistogram.recordValue(value); } catch (Exception ex) { logger.warn("Error while recording value for client telemetry. ", ex); } }
class ClientTelemetry { public final static int REQUEST_LATENCY_MAX = 300000000; public final static int REQUEST_LATENCY_SUCCESS_PRECISION = 4; public final static int REQUEST_LATENCY_FAILURE_PRECISION = 2; public final static String REQUEST_LATENCY_NAME = "RequestLatency"; public final static String REQUEST_LATENCY_UNIT = "MicroSec"; public final static int REQUEST_CHARGE_MAX = 10000; public final static int REQUEST_CHARGE_PRECISION = 2; public final static String REQUEST_CHARGE_NAME = "RequestCharge"; public final static String REQUEST_CHARGE_UNIT = "RU"; public final static int CPU_MAX = 100; public final static int CPU_PRECISION = 2; private final static String CPU_NAME = "CPU"; private final static String CPU_UNIT = "Percentage"; public final static int MEMORY_MAX = 102400; public final static int MEMORY_PRECISION = 2; private final static String MEMORY_NAME = "MemoryRemaining"; private final static String MEMORY_UNIT = "MB"; private static final ObjectMapper OBJECT_MAPPER = new ObjectMapper(); private ClientTelemetryInfo clientTelemetryInfo; private HttpClient httpClient; private final ExecutorService executor = Executors.newSingleThreadExecutor(); private final Scheduler scheduler = Schedulers.fromExecutor(executor); private static final Logger logger = LoggerFactory.getLogger(GlobalEndpointManager.class); private volatile boolean isClosed; private volatile boolean isClientTelemetryEnabled; private static String AZURE_VM_METADATA = "http: private static final double PERCENTILE_50 = 50.0; private static final double PERCENTILE_90 = 90.0; private static final double PERCENTILE_95 = 95.0; private static final double PERCENTILE_99 = 99.0; private static final double PERCENTILE_999 = 99.9; private final int clientTelemetrySchedulingSec; public ClientTelemetry(Boolean acceleratedNetworking, String clientId, String processId, String userAgent, ConnectionMode connectionMode, String globalDatabaseAccountName, String applicationRegion, String hostEnvInfo, HttpClient httpClient, boolean isClientTelemetryEnabled ) { clientTelemetryInfo = new ClientTelemetryInfo(clientId, processId, userAgent, connectionMode, globalDatabaseAccountName, applicationRegion, hostEnvInfo, acceleratedNetworking); this.isClosed = false; this.httpClient = httpClient; this.isClientTelemetryEnabled = isClientTelemetryEnabled; this.clientTelemetrySchedulingSec = Configs.getClientTelemetrySchedulingInSec(); } public ClientTelemetryInfo getClientTelemetryInfo() { return clientTelemetryInfo; } public static void recordValue(DoubleHistogram doubleHistogram, long value) { try { doubleHistogram.recordValue(value); } catch (Exception ex) { logger.warn("Error while recording value for client telemetry", ex.getMessage()); } } public void init() { loadAzureVmMetaData(); sendClientTelemetry().subscribe(); } public void close() { this.isClosed = true; this.executor.shutdown(); logger.debug("GlobalEndpointManager closed."); } private Mono<Void> sendClientTelemetry() { return Mono.delay(Duration.ofSeconds(clientTelemetrySchedulingSec)) .flatMap(t -> { if (this.isClosed) { logger.warn("client already closed"); return Mono.empty(); } if(!Configs.isClientTelemetryEnabled(this.isClientTelemetryEnabled)) { logger.trace("client telemetry not enabled"); return Mono.empty(); } readHistogram(); try { logger.info("ClientTelemetry {}", OBJECT_MAPPER.writeValueAsString(this.clientTelemetryInfo)); } catch (JsonProcessingException e) { e.printStackTrace(); } clearDataForNextRun(); return this.sendClientTelemetry(); }).onErrorResume(ex -> { logger.error("sendClientTelemetry() - Unable to send client telemetry" + ". Exception: {}", ex.toString(), ex); clearDataForNextRun(); return this.sendClientTelemetry(); }).subscribeOn(scheduler); } private void loadAzureVmMetaData() { URI targetEndpoint = null; try { targetEndpoint = new URI(AZURE_VM_METADATA); } catch (URISyntaxException ex) { logger.info("Unable to parse azure vm metadata url"); return; } HashMap<String, String> headers = new HashMap<>(); headers.put("Metadata", "true"); HttpHeaders httpHeaders = new HttpHeaders(headers); HttpRequest httpRequest = new HttpRequest(HttpMethod.GET, targetEndpoint, targetEndpoint.getPort(), httpHeaders); Mono<HttpResponse> httpResponseMono = this.httpClient.send(httpRequest); httpResponseMono.flatMap(response -> response.bodyAsString()).map(metadataJson -> Utils.parse(metadataJson, AzureVMMetadata.class)).doOnSuccess(azureVMMetadata -> { this.clientTelemetryInfo.setApplicationRegion(azureVMMetadata.getLocation()); }).onErrorResume(throwable -> { logger.info("Unable to get azure vm metadata"); return Mono.empty(); }).subscribe(); } private void clearDataForNextRun() { this.clientTelemetryInfo.getOperationInfoMap().clear(); this.clientTelemetryInfo.getCacheRefreshInfoMap().clear(); for (DoubleHistogram histogram : this.clientTelemetryInfo.getSystemInfoMap().values()) { histogram.reset(); } } private void readHistogram() { DoubleHistogram cpuHistogram = new DoubleHistogram(ClientTelemetry.CPU_MAX, ClientTelemetry.CPU_PRECISION); for(double val : CpuMemoryMonitor.getClientTelemetryCpuLatestList()) { recordValue(cpuHistogram, val); } ReportPayload cpuReportPayload = new ReportPayload(CPU_NAME, CPU_UNIT); clientTelemetryInfo.getSystemInfoMap().put(cpuReportPayload, cpuHistogram); DoubleHistogram memoryHistogram = new DoubleHistogram(ClientTelemetry.MEMORY_MAX, ClientTelemetry.MEMORY_PRECISION); for(double val : CpuMemoryMonitor.getClientTelemetryMemoryLatestList()) { recordValue(memoryHistogram, val); } ReportPayload memoryReportPayload = new ReportPayload(MEMORY_NAME, MEMORY_UNIT); clientTelemetryInfo.getSystemInfoMap().put(memoryReportPayload, memoryHistogram); this.clientTelemetryInfo.setTimeStamp(Instant.now().toString()); for (Map.Entry<ReportPayload, DoubleHistogram> entry : this.clientTelemetryInfo.getSystemInfoMap().entrySet()) { fillMetricsInfo(entry.getKey(), entry.getValue()); } for (Map.Entry<ReportPayload, DoubleHistogram> entry : this.clientTelemetryInfo.getCacheRefreshInfoMap().entrySet()) { fillMetricsInfo(entry.getKey(), entry.getValue()); } for (Map.Entry<ReportPayload, DoubleHistogram> entry : this.clientTelemetryInfo.getOperationInfoMap().entrySet()) { fillMetricsInfo(entry.getKey(), entry.getValue()); } } private void fillMetricsInfo(ReportPayload payload, DoubleHistogram histogram) { payload.getMetricInfo().setCount(histogram.getTotalCount()); payload.getMetricInfo().setMax(histogram.getMaxValue()); payload.getMetricInfo().setMin(histogram.getMinValue()); payload.getMetricInfo().setMean(histogram.getMean()); Map<Double, Double> percentile = new HashMap<>(); percentile.put(PERCENTILE_50, histogram.getValueAtPercentile(PERCENTILE_50)); percentile.put(PERCENTILE_90, histogram.getValueAtPercentile(PERCENTILE_90)); percentile.put(PERCENTILE_95, histogram.getValueAtPercentile(PERCENTILE_95)); percentile.put(PERCENTILE_99, histogram.getValueAtPercentile(PERCENTILE_99)); percentile.put(PERCENTILE_999, histogram.getValueAtPercentile(PERCENTILE_999)); payload.getMetricInfo().setPercentiles(percentile); } }
class ClientTelemetry { public final static int ONE_KB_TO_BYTES = 1024; public final static int REQUEST_LATENCY_MAX_MICRO_SEC = 300000000; public final static int REQUEST_LATENCY_SUCCESS_PRECISION = 4; public final static int REQUEST_LATENCY_FAILURE_PRECISION = 2; public final static String REQUEST_LATENCY_NAME = "RequestLatency"; public final static String REQUEST_LATENCY_UNIT = "MicroSec"; public final static int REQUEST_CHARGE_MAX = 10000; public final static int REQUEST_CHARGE_PRECISION = 2; public final static String REQUEST_CHARGE_NAME = "RequestCharge"; public final static String REQUEST_CHARGE_UNIT = "RU"; public final static int CPU_MAX = 100; public final static int CPU_PRECISION = 2; private final static String CPU_NAME = "CPU"; private final static String CPU_UNIT = "Percentage"; public final static int MEMORY_MAX_IN_MB = 102400; public final static int MEMORY_PRECISION = 2; private final static String MEMORY_NAME = "MemoryRemaining"; private final static String MEMORY_UNIT = "MB"; private static final ObjectMapper OBJECT_MAPPER = new ObjectMapper(); private ClientTelemetryInfo clientTelemetryInfo; private HttpClient httpClient; private final ScheduledThreadPoolExecutor scheduledExecutorService = new ScheduledThreadPoolExecutor(1, new DaemonThreadFactory()); private final Scheduler scheduler = Schedulers.fromExecutor(scheduledExecutorService); private static final Logger logger = LoggerFactory.getLogger(GlobalEndpointManager.class); private volatile boolean isClosed; private volatile boolean isClientTelemetryEnabled; private static String AZURE_VM_METADATA = "http: private static final double PERCENTILE_50 = 50.0; private static final double PERCENTILE_90 = 90.0; private static final double PERCENTILE_95 = 95.0; private static final double PERCENTILE_99 = 99.0; private static final double PERCENTILE_999 = 99.9; private final int clientTelemetrySchedulingSec; public ClientTelemetry(Boolean acceleratedNetworking, String clientId, String processId, String userAgent, ConnectionMode connectionMode, String globalDatabaseAccountName, String applicationRegion, String hostEnvInfo, HttpClient httpClient, boolean isClientTelemetryEnabled ) { clientTelemetryInfo = new ClientTelemetryInfo(clientId, processId, userAgent, connectionMode, globalDatabaseAccountName, applicationRegion, hostEnvInfo, acceleratedNetworking); this.isClosed = false; this.httpClient = httpClient; this.isClientTelemetryEnabled = isClientTelemetryEnabled; this.clientTelemetrySchedulingSec = Configs.getClientTelemetrySchedulingInSec(); } public ClientTelemetryInfo getClientTelemetryInfo() { return clientTelemetryInfo; } public static void recordValue(DoubleHistogram doubleHistogram, long value) { try { doubleHistogram.recordValue(value); } catch (Exception ex) { logger.warn("Error while recording value for client telemetry. ", ex); } } public void init() { loadAzureVmMetaData(); sendClientTelemetry().subscribe(); } public void close() { this.isClosed = true; this.scheduledExecutorService.shutdown(); logger.debug("GlobalEndpointManager closed."); } private Mono<Void> sendClientTelemetry() { return Mono.delay(Duration.ofSeconds(clientTelemetrySchedulingSec)) .flatMap(t -> { if (this.isClosed) { logger.warn("client already closed"); return Mono.empty(); } if(!Configs.isClientTelemetryEnabled(this.isClientTelemetryEnabled)) { logger.trace("client telemetry not enabled"); return Mono.empty(); } readHistogram(); try { logger.info("ClientTelemetry {}", OBJECT_MAPPER.writeValueAsString(this.clientTelemetryInfo)); } catch (JsonProcessingException e) { logger.error("Error which parsing client telemetry into json. ", e); } clearDataForNextRun(); return this.sendClientTelemetry(); }).onErrorResume(ex -> { logger.error("sendClientTelemetry() - Unable to send client telemetry" + ". Exception: ", ex); clearDataForNextRun(); return this.sendClientTelemetry(); }).subscribeOn(scheduler); } private void loadAzureVmMetaData() { URI targetEndpoint = null; try { targetEndpoint = new URI(AZURE_VM_METADATA); } catch (URISyntaxException ex) { logger.info("Unable to parse azure vm metadata url"); return; } HashMap<String, String> headers = new HashMap<>(); headers.put("Metadata", "true"); HttpHeaders httpHeaders = new HttpHeaders(headers); HttpRequest httpRequest = new HttpRequest(HttpMethod.GET, targetEndpoint, targetEndpoint.getPort(), httpHeaders); Mono<HttpResponse> httpResponseMono = this.httpClient.send(httpRequest); httpResponseMono.flatMap(response -> response.bodyAsString()).map(metadataJson -> Utils.parse(metadataJson, AzureVMMetadata.class)).doOnSuccess(azureVMMetadata -> { this.clientTelemetryInfo.setApplicationRegion(azureVMMetadata.getLocation()); this.clientTelemetryInfo.setHostEnvInfo(azureVMMetadata.getOsType() + "|" + azureVMMetadata.getSku() + "|" + azureVMMetadata.getVmSize() + "|" + azureVMMetadata.getAzEnvironment()); }).onErrorResume(throwable -> { logger.info("Unable to get azure vm metadata"); return Mono.empty(); }).subscribe(); } private void clearDataForNextRun() { this.clientTelemetryInfo.getOperationInfoMap().clear(); this.clientTelemetryInfo.getCacheRefreshInfoMap().clear(); for (DoubleHistogram histogram : this.clientTelemetryInfo.getSystemInfoMap().values()) { histogram.reset(); } } private void readHistogram() { ConcurrentDoubleHistogram cpuHistogram = new ConcurrentDoubleHistogram(ClientTelemetry.CPU_MAX, ClientTelemetry.CPU_PRECISION); cpuHistogram.setAutoResize(true); for(double val : CpuMemoryMonitor.getClientTelemetryCpuLatestList()) { recordValue(cpuHistogram, val); } ReportPayload cpuReportPayload = new ReportPayload(CPU_NAME, CPU_UNIT); clientTelemetryInfo.getSystemInfoMap().put(cpuReportPayload, cpuHistogram); ConcurrentDoubleHistogram memoryHistogram = new ConcurrentDoubleHistogram(ClientTelemetry.MEMORY_MAX_IN_MB, ClientTelemetry.MEMORY_PRECISION); memoryHistogram.setAutoResize(true); for(double val : CpuMemoryMonitor.getClientTelemetryMemoryLatestList()) { recordValue(memoryHistogram, val); } ReportPayload memoryReportPayload = new ReportPayload(MEMORY_NAME, MEMORY_UNIT); clientTelemetryInfo.getSystemInfoMap().put(memoryReportPayload, memoryHistogram); this.clientTelemetryInfo.setTimeStamp(Instant.now().toString()); for (Map.Entry<ReportPayload, ConcurrentDoubleHistogram> entry : this.clientTelemetryInfo.getSystemInfoMap().entrySet()) { fillMetricsInfo(entry.getKey(), entry.getValue()); } for (Map.Entry<ReportPayload, ConcurrentDoubleHistogram> entry : this.clientTelemetryInfo.getCacheRefreshInfoMap().entrySet()) { fillMetricsInfo(entry.getKey(), entry.getValue()); } for (Map.Entry<ReportPayload, ConcurrentDoubleHistogram> entry : this.clientTelemetryInfo.getOperationInfoMap().entrySet()) { fillMetricsInfo(entry.getKey(), entry.getValue()); } } private void fillMetricsInfo(ReportPayload payload, DoubleHistogram histogram) { payload.getMetricInfo().setCount(histogram.getTotalCount()); payload.getMetricInfo().setMax(histogram.getMaxValue()); payload.getMetricInfo().setMin(histogram.getMinValue()); payload.getMetricInfo().setMean(histogram.getMean()); Map<Double, Double> percentile = new HashMap<>(); percentile.put(PERCENTILE_50, histogram.getValueAtPercentile(PERCENTILE_50)); percentile.put(PERCENTILE_90, histogram.getValueAtPercentile(PERCENTILE_90)); percentile.put(PERCENTILE_95, histogram.getValueAtPercentile(PERCENTILE_95)); percentile.put(PERCENTILE_99, histogram.getValueAtPercentile(PERCENTILE_99)); percentile.put(PERCENTILE_999, histogram.getValueAtPercentile(PERCENTILE_999)); payload.getMetricInfo().setPercentiles(percentile); } private static class DaemonThreadFactory implements ThreadFactory { @Override public Thread newThread(Runnable r) { Thread t = new Thread(r); t.setDaemon(true); return t; } } }
you are capturing the exception message twice.
private Mono<Void> sendClientTelemetry() { return Mono.delay(Duration.ofSeconds(clientTelemetrySchedulingSec)) .flatMap(t -> { if (this.isClosed) { logger.warn("client already closed"); return Mono.empty(); } if(!Configs.isClientTelemetryEnabled(this.isClientTelemetryEnabled)) { logger.trace("client telemetry not enabled"); return Mono.empty(); } readHistogram(); try { logger.info("ClientTelemetry {}", OBJECT_MAPPER.writeValueAsString(this.clientTelemetryInfo)); } catch (JsonProcessingException e) { e.printStackTrace(); } clearDataForNextRun(); return this.sendClientTelemetry(); }).onErrorResume(ex -> { logger.error("sendClientTelemetry() - Unable to send client telemetry" + ". Exception: {}", ex.toString(), ex); clearDataForNextRun(); return this.sendClientTelemetry(); }).subscribeOn(scheduler); }
". Exception: {}", ex.toString(), ex);
private Mono<Void> sendClientTelemetry() { return Mono.delay(Duration.ofSeconds(clientTelemetrySchedulingSec)) .flatMap(t -> { if (this.isClosed) { logger.warn("client already closed"); return Mono.empty(); } if(!Configs.isClientTelemetryEnabled(this.isClientTelemetryEnabled)) { logger.trace("client telemetry not enabled"); return Mono.empty(); } readHistogram(); try { logger.info("ClientTelemetry {}", OBJECT_MAPPER.writeValueAsString(this.clientTelemetryInfo)); } catch (JsonProcessingException e) { logger.error("Error which parsing client telemetry into json. ", e); } clearDataForNextRun(); return this.sendClientTelemetry(); }).onErrorResume(ex -> { logger.error("sendClientTelemetry() - Unable to send client telemetry" + ". Exception: ", ex); clearDataForNextRun(); return this.sendClientTelemetry(); }).subscribeOn(scheduler); }
class ClientTelemetry { public final static int REQUEST_LATENCY_MAX = 300000000; public final static int REQUEST_LATENCY_SUCCESS_PRECISION = 4; public final static int REQUEST_LATENCY_FAILURE_PRECISION = 2; public final static String REQUEST_LATENCY_NAME = "RequestLatency"; public final static String REQUEST_LATENCY_UNIT = "MicroSec"; public final static int REQUEST_CHARGE_MAX = 10000; public final static int REQUEST_CHARGE_PRECISION = 2; public final static String REQUEST_CHARGE_NAME = "RequestCharge"; public final static String REQUEST_CHARGE_UNIT = "RU"; public final static int CPU_MAX = 100; public final static int CPU_PRECISION = 2; private final static String CPU_NAME = "CPU"; private final static String CPU_UNIT = "Percentage"; public final static int MEMORY_MAX = 102400; public final static int MEMORY_PRECISION = 2; private final static String MEMORY_NAME = "MemoryRemaining"; private final static String MEMORY_UNIT = "MB"; private static final ObjectMapper OBJECT_MAPPER = new ObjectMapper(); private ClientTelemetryInfo clientTelemetryInfo; private HttpClient httpClient; private final ExecutorService executor = Executors.newSingleThreadExecutor(); private final Scheduler scheduler = Schedulers.fromExecutor(executor); private static final Logger logger = LoggerFactory.getLogger(GlobalEndpointManager.class); private volatile boolean isClosed; private volatile boolean isClientTelemetryEnabled; private static String AZURE_VM_METADATA = "http: private static final double PERCENTILE_50 = 50.0; private static final double PERCENTILE_90 = 90.0; private static final double PERCENTILE_95 = 95.0; private static final double PERCENTILE_99 = 99.0; private static final double PERCENTILE_999 = 99.9; private final int clientTelemetrySchedulingSec; public ClientTelemetry(Boolean acceleratedNetworking, String clientId, String processId, String userAgent, ConnectionMode connectionMode, String globalDatabaseAccountName, String applicationRegion, String hostEnvInfo, HttpClient httpClient, boolean isClientTelemetryEnabled ) { clientTelemetryInfo = new ClientTelemetryInfo(clientId, processId, userAgent, connectionMode, globalDatabaseAccountName, applicationRegion, hostEnvInfo, acceleratedNetworking); this.isClosed = false; this.httpClient = httpClient; this.isClientTelemetryEnabled = isClientTelemetryEnabled; this.clientTelemetrySchedulingSec = Configs.getClientTelemetrySchedulingInSec(); } public ClientTelemetryInfo getClientTelemetryInfo() { return clientTelemetryInfo; } public static void recordValue(DoubleHistogram doubleHistogram, long value) { try { doubleHistogram.recordValue(value); } catch (Exception ex) { logger.warn("Error while recording value for client telemetry", ex.getMessage()); } } public static void recordValue(DoubleHistogram doubleHistogram, double value) { try { doubleHistogram.recordValue(value); } catch (Exception ex) { logger.warn("Error while recording value for client telemetry", ex.getMessage()); } } public void init() { loadAzureVmMetaData(); sendClientTelemetry().subscribe(); } public void close() { this.isClosed = true; this.executor.shutdown(); logger.debug("GlobalEndpointManager closed."); } private void loadAzureVmMetaData() { URI targetEndpoint = null; try { targetEndpoint = new URI(AZURE_VM_METADATA); } catch (URISyntaxException ex) { logger.info("Unable to parse azure vm metadata url"); return; } HashMap<String, String> headers = new HashMap<>(); headers.put("Metadata", "true"); HttpHeaders httpHeaders = new HttpHeaders(headers); HttpRequest httpRequest = new HttpRequest(HttpMethod.GET, targetEndpoint, targetEndpoint.getPort(), httpHeaders); Mono<HttpResponse> httpResponseMono = this.httpClient.send(httpRequest); httpResponseMono.flatMap(response -> response.bodyAsString()).map(metadataJson -> Utils.parse(metadataJson, AzureVMMetadata.class)).doOnSuccess(azureVMMetadata -> { this.clientTelemetryInfo.setApplicationRegion(azureVMMetadata.getLocation()); }).onErrorResume(throwable -> { logger.info("Unable to get azure vm metadata"); return Mono.empty(); }).subscribe(); } private void clearDataForNextRun() { this.clientTelemetryInfo.getOperationInfoMap().clear(); this.clientTelemetryInfo.getCacheRefreshInfoMap().clear(); for (DoubleHistogram histogram : this.clientTelemetryInfo.getSystemInfoMap().values()) { histogram.reset(); } } private void readHistogram() { DoubleHistogram cpuHistogram = new DoubleHistogram(ClientTelemetry.CPU_MAX, ClientTelemetry.CPU_PRECISION); for(double val : CpuMemoryMonitor.getClientTelemetryCpuLatestList()) { recordValue(cpuHistogram, val); } ReportPayload cpuReportPayload = new ReportPayload(CPU_NAME, CPU_UNIT); clientTelemetryInfo.getSystemInfoMap().put(cpuReportPayload, cpuHistogram); DoubleHistogram memoryHistogram = new DoubleHistogram(ClientTelemetry.MEMORY_MAX, ClientTelemetry.MEMORY_PRECISION); for(double val : CpuMemoryMonitor.getClientTelemetryMemoryLatestList()) { recordValue(memoryHistogram, val); } ReportPayload memoryReportPayload = new ReportPayload(MEMORY_NAME, MEMORY_UNIT); clientTelemetryInfo.getSystemInfoMap().put(memoryReportPayload, memoryHistogram); this.clientTelemetryInfo.setTimeStamp(Instant.now().toString()); for (Map.Entry<ReportPayload, DoubleHistogram> entry : this.clientTelemetryInfo.getSystemInfoMap().entrySet()) { fillMetricsInfo(entry.getKey(), entry.getValue()); } for (Map.Entry<ReportPayload, DoubleHistogram> entry : this.clientTelemetryInfo.getCacheRefreshInfoMap().entrySet()) { fillMetricsInfo(entry.getKey(), entry.getValue()); } for (Map.Entry<ReportPayload, DoubleHistogram> entry : this.clientTelemetryInfo.getOperationInfoMap().entrySet()) { fillMetricsInfo(entry.getKey(), entry.getValue()); } } private void fillMetricsInfo(ReportPayload payload, DoubleHistogram histogram) { payload.getMetricInfo().setCount(histogram.getTotalCount()); payload.getMetricInfo().setMax(histogram.getMaxValue()); payload.getMetricInfo().setMin(histogram.getMinValue()); payload.getMetricInfo().setMean(histogram.getMean()); Map<Double, Double> percentile = new HashMap<>(); percentile.put(PERCENTILE_50, histogram.getValueAtPercentile(PERCENTILE_50)); percentile.put(PERCENTILE_90, histogram.getValueAtPercentile(PERCENTILE_90)); percentile.put(PERCENTILE_95, histogram.getValueAtPercentile(PERCENTILE_95)); percentile.put(PERCENTILE_99, histogram.getValueAtPercentile(PERCENTILE_99)); percentile.put(PERCENTILE_999, histogram.getValueAtPercentile(PERCENTILE_999)); payload.getMetricInfo().setPercentiles(percentile); } }
class ClientTelemetry { public final static int ONE_KB_TO_BYTES = 1024; public final static int REQUEST_LATENCY_MAX_MICRO_SEC = 300000000; public final static int REQUEST_LATENCY_SUCCESS_PRECISION = 4; public final static int REQUEST_LATENCY_FAILURE_PRECISION = 2; public final static String REQUEST_LATENCY_NAME = "RequestLatency"; public final static String REQUEST_LATENCY_UNIT = "MicroSec"; public final static int REQUEST_CHARGE_MAX = 10000; public final static int REQUEST_CHARGE_PRECISION = 2; public final static String REQUEST_CHARGE_NAME = "RequestCharge"; public final static String REQUEST_CHARGE_UNIT = "RU"; public final static int CPU_MAX = 100; public final static int CPU_PRECISION = 2; private final static String CPU_NAME = "CPU"; private final static String CPU_UNIT = "Percentage"; public final static int MEMORY_MAX_IN_MB = 102400; public final static int MEMORY_PRECISION = 2; private final static String MEMORY_NAME = "MemoryRemaining"; private final static String MEMORY_UNIT = "MB"; private static final ObjectMapper OBJECT_MAPPER = new ObjectMapper(); private ClientTelemetryInfo clientTelemetryInfo; private HttpClient httpClient; private final ScheduledThreadPoolExecutor scheduledExecutorService = new ScheduledThreadPoolExecutor(1, new DaemonThreadFactory()); private final Scheduler scheduler = Schedulers.fromExecutor(scheduledExecutorService); private static final Logger logger = LoggerFactory.getLogger(GlobalEndpointManager.class); private volatile boolean isClosed; private volatile boolean isClientTelemetryEnabled; private static String AZURE_VM_METADATA = "http: private static final double PERCENTILE_50 = 50.0; private static final double PERCENTILE_90 = 90.0; private static final double PERCENTILE_95 = 95.0; private static final double PERCENTILE_99 = 99.0; private static final double PERCENTILE_999 = 99.9; private final int clientTelemetrySchedulingSec; public ClientTelemetry(Boolean acceleratedNetworking, String clientId, String processId, String userAgent, ConnectionMode connectionMode, String globalDatabaseAccountName, String applicationRegion, String hostEnvInfo, HttpClient httpClient, boolean isClientTelemetryEnabled ) { clientTelemetryInfo = new ClientTelemetryInfo(clientId, processId, userAgent, connectionMode, globalDatabaseAccountName, applicationRegion, hostEnvInfo, acceleratedNetworking); this.isClosed = false; this.httpClient = httpClient; this.isClientTelemetryEnabled = isClientTelemetryEnabled; this.clientTelemetrySchedulingSec = Configs.getClientTelemetrySchedulingInSec(); } public ClientTelemetryInfo getClientTelemetryInfo() { return clientTelemetryInfo; } public static void recordValue(DoubleHistogram doubleHistogram, long value) { try { doubleHistogram.recordValue(value); } catch (Exception ex) { logger.warn("Error while recording value for client telemetry. ", ex); } } public static void recordValue(DoubleHistogram doubleHistogram, double value) { try { doubleHistogram.recordValue(value); } catch (Exception ex) { logger.warn("Error while recording value for client telemetry. ", ex); } } public void init() { loadAzureVmMetaData(); sendClientTelemetry().subscribe(); } public void close() { this.isClosed = true; this.scheduledExecutorService.shutdown(); logger.debug("GlobalEndpointManager closed."); } private void loadAzureVmMetaData() { URI targetEndpoint = null; try { targetEndpoint = new URI(AZURE_VM_METADATA); } catch (URISyntaxException ex) { logger.info("Unable to parse azure vm metadata url"); return; } HashMap<String, String> headers = new HashMap<>(); headers.put("Metadata", "true"); HttpHeaders httpHeaders = new HttpHeaders(headers); HttpRequest httpRequest = new HttpRequest(HttpMethod.GET, targetEndpoint, targetEndpoint.getPort(), httpHeaders); Mono<HttpResponse> httpResponseMono = this.httpClient.send(httpRequest); httpResponseMono.flatMap(response -> response.bodyAsString()).map(metadataJson -> Utils.parse(metadataJson, AzureVMMetadata.class)).doOnSuccess(azureVMMetadata -> { this.clientTelemetryInfo.setApplicationRegion(azureVMMetadata.getLocation()); this.clientTelemetryInfo.setHostEnvInfo(azureVMMetadata.getOsType() + "|" + azureVMMetadata.getSku() + "|" + azureVMMetadata.getVmSize() + "|" + azureVMMetadata.getAzEnvironment()); }).onErrorResume(throwable -> { logger.info("Unable to get azure vm metadata"); return Mono.empty(); }).subscribe(); } private void clearDataForNextRun() { this.clientTelemetryInfo.getOperationInfoMap().clear(); this.clientTelemetryInfo.getCacheRefreshInfoMap().clear(); for (DoubleHistogram histogram : this.clientTelemetryInfo.getSystemInfoMap().values()) { histogram.reset(); } } private void readHistogram() { ConcurrentDoubleHistogram cpuHistogram = new ConcurrentDoubleHistogram(ClientTelemetry.CPU_MAX, ClientTelemetry.CPU_PRECISION); cpuHistogram.setAutoResize(true); for(double val : CpuMemoryMonitor.getClientTelemetryCpuLatestList()) { recordValue(cpuHistogram, val); } ReportPayload cpuReportPayload = new ReportPayload(CPU_NAME, CPU_UNIT); clientTelemetryInfo.getSystemInfoMap().put(cpuReportPayload, cpuHistogram); ConcurrentDoubleHistogram memoryHistogram = new ConcurrentDoubleHistogram(ClientTelemetry.MEMORY_MAX_IN_MB, ClientTelemetry.MEMORY_PRECISION); memoryHistogram.setAutoResize(true); for(double val : CpuMemoryMonitor.getClientTelemetryMemoryLatestList()) { recordValue(memoryHistogram, val); } ReportPayload memoryReportPayload = new ReportPayload(MEMORY_NAME, MEMORY_UNIT); clientTelemetryInfo.getSystemInfoMap().put(memoryReportPayload, memoryHistogram); this.clientTelemetryInfo.setTimeStamp(Instant.now().toString()); for (Map.Entry<ReportPayload, ConcurrentDoubleHistogram> entry : this.clientTelemetryInfo.getSystemInfoMap().entrySet()) { fillMetricsInfo(entry.getKey(), entry.getValue()); } for (Map.Entry<ReportPayload, ConcurrentDoubleHistogram> entry : this.clientTelemetryInfo.getCacheRefreshInfoMap().entrySet()) { fillMetricsInfo(entry.getKey(), entry.getValue()); } for (Map.Entry<ReportPayload, ConcurrentDoubleHistogram> entry : this.clientTelemetryInfo.getOperationInfoMap().entrySet()) { fillMetricsInfo(entry.getKey(), entry.getValue()); } } private void fillMetricsInfo(ReportPayload payload, DoubleHistogram histogram) { payload.getMetricInfo().setCount(histogram.getTotalCount()); payload.getMetricInfo().setMax(histogram.getMaxValue()); payload.getMetricInfo().setMin(histogram.getMinValue()); payload.getMetricInfo().setMean(histogram.getMean()); Map<Double, Double> percentile = new HashMap<>(); percentile.put(PERCENTILE_50, histogram.getValueAtPercentile(PERCENTILE_50)); percentile.put(PERCENTILE_90, histogram.getValueAtPercentile(PERCENTILE_90)); percentile.put(PERCENTILE_95, histogram.getValueAtPercentile(PERCENTILE_95)); percentile.put(PERCENTILE_99, histogram.getValueAtPercentile(PERCENTILE_99)); percentile.put(PERCENTILE_999, histogram.getValueAtPercentile(PERCENTILE_999)); payload.getMetricInfo().setPercentiles(percentile); } private static class DaemonThreadFactory implements ThreadFactory { @Override public Thread newThread(Runnable r) { Thread t = new Thread(r); t.setDaemon(true); return t; } } }
I see, this is trace logging, while others are warning and info, is there a reason for this?
private Mono<Void> sendClientTelemetry() { return Mono.delay(Duration.ofSeconds(clientTelemetrySchedulingSec)) .flatMap(t -> { if (this.isClosed) { logger.warn("client already closed"); return Mono.empty(); } if(!Configs.isClientTelemetryEnabled(this.isClientTelemetryEnabled)) { logger.trace("client telemetry not enabled"); return Mono.empty(); } readHistogram(); try { logger.info("ClientTelemetry {}", OBJECT_MAPPER.writeValueAsString(this.clientTelemetryInfo)); } catch (JsonProcessingException e) { e.printStackTrace(); } clearDataForNextRun(); return this.sendClientTelemetry(); }).onErrorResume(ex -> { logger.error("sendClientTelemetry() - Unable to send client telemetry" + ". Exception: {}", ex.toString(), ex); clearDataForNextRun(); return this.sendClientTelemetry(); }).subscribeOn(scheduler); }
logger.trace("client telemetry not enabled");
private Mono<Void> sendClientTelemetry() { return Mono.delay(Duration.ofSeconds(clientTelemetrySchedulingSec)) .flatMap(t -> { if (this.isClosed) { logger.warn("client already closed"); return Mono.empty(); } if(!Configs.isClientTelemetryEnabled(this.isClientTelemetryEnabled)) { logger.trace("client telemetry not enabled"); return Mono.empty(); } readHistogram(); try { logger.info("ClientTelemetry {}", OBJECT_MAPPER.writeValueAsString(this.clientTelemetryInfo)); } catch (JsonProcessingException e) { logger.error("Error which parsing client telemetry into json. ", e); } clearDataForNextRun(); return this.sendClientTelemetry(); }).onErrorResume(ex -> { logger.error("sendClientTelemetry() - Unable to send client telemetry" + ". Exception: ", ex); clearDataForNextRun(); return this.sendClientTelemetry(); }).subscribeOn(scheduler); }
class ClientTelemetry { public final static int REQUEST_LATENCY_MAX = 300000000; public final static int REQUEST_LATENCY_SUCCESS_PRECISION = 4; public final static int REQUEST_LATENCY_FAILURE_PRECISION = 2; public final static String REQUEST_LATENCY_NAME = "RequestLatency"; public final static String REQUEST_LATENCY_UNIT = "MicroSec"; public final static int REQUEST_CHARGE_MAX = 10000; public final static int REQUEST_CHARGE_PRECISION = 2; public final static String REQUEST_CHARGE_NAME = "RequestCharge"; public final static String REQUEST_CHARGE_UNIT = "RU"; public final static int CPU_MAX = 100; public final static int CPU_PRECISION = 2; private final static String CPU_NAME = "CPU"; private final static String CPU_UNIT = "Percentage"; public final static int MEMORY_MAX = 102400; public final static int MEMORY_PRECISION = 2; private final static String MEMORY_NAME = "MemoryRemaining"; private final static String MEMORY_UNIT = "MB"; private static final ObjectMapper OBJECT_MAPPER = new ObjectMapper(); private ClientTelemetryInfo clientTelemetryInfo; private HttpClient httpClient; private final ExecutorService executor = Executors.newSingleThreadExecutor(); private final Scheduler scheduler = Schedulers.fromExecutor(executor); private static final Logger logger = LoggerFactory.getLogger(GlobalEndpointManager.class); private volatile boolean isClosed; private volatile boolean isClientTelemetryEnabled; private static String AZURE_VM_METADATA = "http: private static final double PERCENTILE_50 = 50.0; private static final double PERCENTILE_90 = 90.0; private static final double PERCENTILE_95 = 95.0; private static final double PERCENTILE_99 = 99.0; private static final double PERCENTILE_999 = 99.9; private final int clientTelemetrySchedulingSec; public ClientTelemetry(Boolean acceleratedNetworking, String clientId, String processId, String userAgent, ConnectionMode connectionMode, String globalDatabaseAccountName, String applicationRegion, String hostEnvInfo, HttpClient httpClient, boolean isClientTelemetryEnabled ) { clientTelemetryInfo = new ClientTelemetryInfo(clientId, processId, userAgent, connectionMode, globalDatabaseAccountName, applicationRegion, hostEnvInfo, acceleratedNetworking); this.isClosed = false; this.httpClient = httpClient; this.isClientTelemetryEnabled = isClientTelemetryEnabled; this.clientTelemetrySchedulingSec = Configs.getClientTelemetrySchedulingInSec(); } public ClientTelemetryInfo getClientTelemetryInfo() { return clientTelemetryInfo; } public static void recordValue(DoubleHistogram doubleHistogram, long value) { try { doubleHistogram.recordValue(value); } catch (Exception ex) { logger.warn("Error while recording value for client telemetry", ex.getMessage()); } } public static void recordValue(DoubleHistogram doubleHistogram, double value) { try { doubleHistogram.recordValue(value); } catch (Exception ex) { logger.warn("Error while recording value for client telemetry", ex.getMessage()); } } public void init() { loadAzureVmMetaData(); sendClientTelemetry().subscribe(); } public void close() { this.isClosed = true; this.executor.shutdown(); logger.debug("GlobalEndpointManager closed."); } private void loadAzureVmMetaData() { URI targetEndpoint = null; try { targetEndpoint = new URI(AZURE_VM_METADATA); } catch (URISyntaxException ex) { logger.info("Unable to parse azure vm metadata url"); return; } HashMap<String, String> headers = new HashMap<>(); headers.put("Metadata", "true"); HttpHeaders httpHeaders = new HttpHeaders(headers); HttpRequest httpRequest = new HttpRequest(HttpMethod.GET, targetEndpoint, targetEndpoint.getPort(), httpHeaders); Mono<HttpResponse> httpResponseMono = this.httpClient.send(httpRequest); httpResponseMono.flatMap(response -> response.bodyAsString()).map(metadataJson -> Utils.parse(metadataJson, AzureVMMetadata.class)).doOnSuccess(azureVMMetadata -> { this.clientTelemetryInfo.setApplicationRegion(azureVMMetadata.getLocation()); }).onErrorResume(throwable -> { logger.info("Unable to get azure vm metadata"); return Mono.empty(); }).subscribe(); } private void clearDataForNextRun() { this.clientTelemetryInfo.getOperationInfoMap().clear(); this.clientTelemetryInfo.getCacheRefreshInfoMap().clear(); for (DoubleHistogram histogram : this.clientTelemetryInfo.getSystemInfoMap().values()) { histogram.reset(); } } private void readHistogram() { ConcurrentDoubleHistogram cpuHistogram = new ConcurrentDoubleHistogram(ClientTelemetry.CPU_MAX, ClientTelemetry.CPU_PRECISION); cpuHistogram.setAutoResize(true); for(double val : CpuMemoryMonitor.getClientTelemetryCpuLatestList()) { recordValue(cpuHistogram, val); } ReportPayload cpuReportPayload = new ReportPayload(CPU_NAME, CPU_UNIT); clientTelemetryInfo.getSystemInfoMap().put(cpuReportPayload, cpuHistogram); ConcurrentDoubleHistogram memoryHistogram = new ConcurrentDoubleHistogram(ClientTelemetry.MEMORY_MAX, ClientTelemetry.MEMORY_PRECISION); memoryHistogram.setAutoResize(true); for(double val : CpuMemoryMonitor.getClientTelemetryMemoryLatestList()) { recordValue(memoryHistogram, val); } ReportPayload memoryReportPayload = new ReportPayload(MEMORY_NAME, MEMORY_UNIT); clientTelemetryInfo.getSystemInfoMap().put(memoryReportPayload, memoryHistogram); this.clientTelemetryInfo.setTimeStamp(Instant.now().toString()); for (Map.Entry<ReportPayload, ConcurrentDoubleHistogram> entry : this.clientTelemetryInfo.getSystemInfoMap().entrySet()) { fillMetricsInfo(entry.getKey(), entry.getValue()); } for (Map.Entry<ReportPayload, ConcurrentDoubleHistogram> entry : this.clientTelemetryInfo.getCacheRefreshInfoMap().entrySet()) { fillMetricsInfo(entry.getKey(), entry.getValue()); } for (Map.Entry<ReportPayload, ConcurrentDoubleHistogram> entry : this.clientTelemetryInfo.getOperationInfoMap().entrySet()) { fillMetricsInfo(entry.getKey(), entry.getValue()); } } private void fillMetricsInfo(ReportPayload payload, DoubleHistogram histogram) { payload.getMetricInfo().setCount(histogram.getTotalCount()); payload.getMetricInfo().setMax(histogram.getMaxValue()); payload.getMetricInfo().setMin(histogram.getMinValue()); payload.getMetricInfo().setMean(histogram.getMean()); Map<Double, Double> percentile = new HashMap<>(); percentile.put(PERCENTILE_50, histogram.getValueAtPercentile(PERCENTILE_50)); percentile.put(PERCENTILE_90, histogram.getValueAtPercentile(PERCENTILE_90)); percentile.put(PERCENTILE_95, histogram.getValueAtPercentile(PERCENTILE_95)); percentile.put(PERCENTILE_99, histogram.getValueAtPercentile(PERCENTILE_99)); percentile.put(PERCENTILE_999, histogram.getValueAtPercentile(PERCENTILE_999)); payload.getMetricInfo().setPercentiles(percentile); } }
class ClientTelemetry { public final static int ONE_KB_TO_BYTES = 1024; public final static int REQUEST_LATENCY_MAX_MICRO_SEC = 300000000; public final static int REQUEST_LATENCY_SUCCESS_PRECISION = 4; public final static int REQUEST_LATENCY_FAILURE_PRECISION = 2; public final static String REQUEST_LATENCY_NAME = "RequestLatency"; public final static String REQUEST_LATENCY_UNIT = "MicroSec"; public final static int REQUEST_CHARGE_MAX = 10000; public final static int REQUEST_CHARGE_PRECISION = 2; public final static String REQUEST_CHARGE_NAME = "RequestCharge"; public final static String REQUEST_CHARGE_UNIT = "RU"; public final static int CPU_MAX = 100; public final static int CPU_PRECISION = 2; private final static String CPU_NAME = "CPU"; private final static String CPU_UNIT = "Percentage"; public final static int MEMORY_MAX_IN_MB = 102400; public final static int MEMORY_PRECISION = 2; private final static String MEMORY_NAME = "MemoryRemaining"; private final static String MEMORY_UNIT = "MB"; private static final ObjectMapper OBJECT_MAPPER = new ObjectMapper(); private ClientTelemetryInfo clientTelemetryInfo; private HttpClient httpClient; private final ScheduledThreadPoolExecutor scheduledExecutorService = new ScheduledThreadPoolExecutor(1, new DaemonThreadFactory()); private final Scheduler scheduler = Schedulers.fromExecutor(scheduledExecutorService); private static final Logger logger = LoggerFactory.getLogger(GlobalEndpointManager.class); private volatile boolean isClosed; private volatile boolean isClientTelemetryEnabled; private static String AZURE_VM_METADATA = "http: private static final double PERCENTILE_50 = 50.0; private static final double PERCENTILE_90 = 90.0; private static final double PERCENTILE_95 = 95.0; private static final double PERCENTILE_99 = 99.0; private static final double PERCENTILE_999 = 99.9; private final int clientTelemetrySchedulingSec; public ClientTelemetry(Boolean acceleratedNetworking, String clientId, String processId, String userAgent, ConnectionMode connectionMode, String globalDatabaseAccountName, String applicationRegion, String hostEnvInfo, HttpClient httpClient, boolean isClientTelemetryEnabled ) { clientTelemetryInfo = new ClientTelemetryInfo(clientId, processId, userAgent, connectionMode, globalDatabaseAccountName, applicationRegion, hostEnvInfo, acceleratedNetworking); this.isClosed = false; this.httpClient = httpClient; this.isClientTelemetryEnabled = isClientTelemetryEnabled; this.clientTelemetrySchedulingSec = Configs.getClientTelemetrySchedulingInSec(); } public ClientTelemetryInfo getClientTelemetryInfo() { return clientTelemetryInfo; } public static void recordValue(DoubleHistogram doubleHistogram, long value) { try { doubleHistogram.recordValue(value); } catch (Exception ex) { logger.warn("Error while recording value for client telemetry. ", ex); } } public static void recordValue(DoubleHistogram doubleHistogram, double value) { try { doubleHistogram.recordValue(value); } catch (Exception ex) { logger.warn("Error while recording value for client telemetry. ", ex); } } public void init() { loadAzureVmMetaData(); sendClientTelemetry().subscribe(); } public void close() { this.isClosed = true; this.scheduledExecutorService.shutdown(); logger.debug("GlobalEndpointManager closed."); } private void loadAzureVmMetaData() { URI targetEndpoint = null; try { targetEndpoint = new URI(AZURE_VM_METADATA); } catch (URISyntaxException ex) { logger.info("Unable to parse azure vm metadata url"); return; } HashMap<String, String> headers = new HashMap<>(); headers.put("Metadata", "true"); HttpHeaders httpHeaders = new HttpHeaders(headers); HttpRequest httpRequest = new HttpRequest(HttpMethod.GET, targetEndpoint, targetEndpoint.getPort(), httpHeaders); Mono<HttpResponse> httpResponseMono = this.httpClient.send(httpRequest); httpResponseMono.flatMap(response -> response.bodyAsString()).map(metadataJson -> Utils.parse(metadataJson, AzureVMMetadata.class)).doOnSuccess(azureVMMetadata -> { this.clientTelemetryInfo.setApplicationRegion(azureVMMetadata.getLocation()); this.clientTelemetryInfo.setHostEnvInfo(azureVMMetadata.getOsType() + "|" + azureVMMetadata.getSku() + "|" + azureVMMetadata.getVmSize() + "|" + azureVMMetadata.getAzEnvironment()); }).onErrorResume(throwable -> { logger.info("Unable to get azure vm metadata"); return Mono.empty(); }).subscribe(); } private void clearDataForNextRun() { this.clientTelemetryInfo.getOperationInfoMap().clear(); this.clientTelemetryInfo.getCacheRefreshInfoMap().clear(); for (DoubleHistogram histogram : this.clientTelemetryInfo.getSystemInfoMap().values()) { histogram.reset(); } } private void readHistogram() { ConcurrentDoubleHistogram cpuHistogram = new ConcurrentDoubleHistogram(ClientTelemetry.CPU_MAX, ClientTelemetry.CPU_PRECISION); cpuHistogram.setAutoResize(true); for(double val : CpuMemoryMonitor.getClientTelemetryCpuLatestList()) { recordValue(cpuHistogram, val); } ReportPayload cpuReportPayload = new ReportPayload(CPU_NAME, CPU_UNIT); clientTelemetryInfo.getSystemInfoMap().put(cpuReportPayload, cpuHistogram); ConcurrentDoubleHistogram memoryHistogram = new ConcurrentDoubleHistogram(ClientTelemetry.MEMORY_MAX_IN_MB, ClientTelemetry.MEMORY_PRECISION); memoryHistogram.setAutoResize(true); for(double val : CpuMemoryMonitor.getClientTelemetryMemoryLatestList()) { recordValue(memoryHistogram, val); } ReportPayload memoryReportPayload = new ReportPayload(MEMORY_NAME, MEMORY_UNIT); clientTelemetryInfo.getSystemInfoMap().put(memoryReportPayload, memoryHistogram); this.clientTelemetryInfo.setTimeStamp(Instant.now().toString()); for (Map.Entry<ReportPayload, ConcurrentDoubleHistogram> entry : this.clientTelemetryInfo.getSystemInfoMap().entrySet()) { fillMetricsInfo(entry.getKey(), entry.getValue()); } for (Map.Entry<ReportPayload, ConcurrentDoubleHistogram> entry : this.clientTelemetryInfo.getCacheRefreshInfoMap().entrySet()) { fillMetricsInfo(entry.getKey(), entry.getValue()); } for (Map.Entry<ReportPayload, ConcurrentDoubleHistogram> entry : this.clientTelemetryInfo.getOperationInfoMap().entrySet()) { fillMetricsInfo(entry.getKey(), entry.getValue()); } } private void fillMetricsInfo(ReportPayload payload, DoubleHistogram histogram) { payload.getMetricInfo().setCount(histogram.getTotalCount()); payload.getMetricInfo().setMax(histogram.getMaxValue()); payload.getMetricInfo().setMin(histogram.getMinValue()); payload.getMetricInfo().setMean(histogram.getMean()); Map<Double, Double> percentile = new HashMap<>(); percentile.put(PERCENTILE_50, histogram.getValueAtPercentile(PERCENTILE_50)); percentile.put(PERCENTILE_90, histogram.getValueAtPercentile(PERCENTILE_90)); percentile.put(PERCENTILE_95, histogram.getValueAtPercentile(PERCENTILE_95)); percentile.put(PERCENTILE_99, histogram.getValueAtPercentile(PERCENTILE_99)); percentile.put(PERCENTILE_999, histogram.getValueAtPercentile(PERCENTILE_999)); payload.getMetricInfo().setPercentiles(percentile); } private static class DaemonThreadFactory implements ThreadFactory { @Override public Thread newThread(Runnable r) { Thread t = new Thread(r); t.setDaemon(true); return t; } } }
Are we doing recursion inside `flatMap` ? There could be a better way to do this instead of doing it though `flatMap`- please take a look at `expand` operation from project reactor.
private Mono<Void> sendClientTelemetry() { return Mono.delay(Duration.ofSeconds(clientTelemetrySchedulingSec)) .flatMap(t -> { if (this.isClosed) { logger.warn("client already closed"); return Mono.empty(); } if(!Configs.isClientTelemetryEnabled(this.isClientTelemetryEnabled)) { logger.trace("client telemetry not enabled"); return Mono.empty(); } readHistogram(); try { logger.info("ClientTelemetry {}", OBJECT_MAPPER.writeValueAsString(this.clientTelemetryInfo)); } catch (JsonProcessingException e) { e.printStackTrace(); } clearDataForNextRun(); return this.sendClientTelemetry(); }).onErrorResume(ex -> { logger.error("sendClientTelemetry() - Unable to send client telemetry" + ". Exception: {}", ex.toString(), ex); clearDataForNextRun(); return this.sendClientTelemetry(); }).subscribeOn(scheduler); }
return this.sendClientTelemetry();
private Mono<Void> sendClientTelemetry() { return Mono.delay(Duration.ofSeconds(clientTelemetrySchedulingSec)) .flatMap(t -> { if (this.isClosed) { logger.warn("client already closed"); return Mono.empty(); } if(!Configs.isClientTelemetryEnabled(this.isClientTelemetryEnabled)) { logger.trace("client telemetry not enabled"); return Mono.empty(); } readHistogram(); try { logger.info("ClientTelemetry {}", OBJECT_MAPPER.writeValueAsString(this.clientTelemetryInfo)); } catch (JsonProcessingException e) { logger.error("Error which parsing client telemetry into json. ", e); } clearDataForNextRun(); return this.sendClientTelemetry(); }).onErrorResume(ex -> { logger.error("sendClientTelemetry() - Unable to send client telemetry" + ". Exception: ", ex); clearDataForNextRun(); return this.sendClientTelemetry(); }).subscribeOn(scheduler); }
class ClientTelemetry { public final static int REQUEST_LATENCY_MAX = 300000000; public final static int REQUEST_LATENCY_SUCCESS_PRECISION = 4; public final static int REQUEST_LATENCY_FAILURE_PRECISION = 2; public final static String REQUEST_LATENCY_NAME = "RequestLatency"; public final static String REQUEST_LATENCY_UNIT = "MicroSec"; public final static int REQUEST_CHARGE_MAX = 10000; public final static int REQUEST_CHARGE_PRECISION = 2; public final static String REQUEST_CHARGE_NAME = "RequestCharge"; public final static String REQUEST_CHARGE_UNIT = "RU"; public final static int CPU_MAX = 100; public final static int CPU_PRECISION = 2; private final static String CPU_NAME = "CPU"; private final static String CPU_UNIT = "Percentage"; public final static int MEMORY_MAX = 102400; public final static int MEMORY_PRECISION = 2; private final static String MEMORY_NAME = "MemoryRemaining"; private final static String MEMORY_UNIT = "MB"; private static final ObjectMapper OBJECT_MAPPER = new ObjectMapper(); private ClientTelemetryInfo clientTelemetryInfo; private HttpClient httpClient; private final ExecutorService executor = Executors.newSingleThreadExecutor(); private final Scheduler scheduler = Schedulers.fromExecutor(executor); private static final Logger logger = LoggerFactory.getLogger(GlobalEndpointManager.class); private volatile boolean isClosed; private volatile boolean isClientTelemetryEnabled; private static String AZURE_VM_METADATA = "http: private static final double PERCENTILE_50 = 50.0; private static final double PERCENTILE_90 = 90.0; private static final double PERCENTILE_95 = 95.0; private static final double PERCENTILE_99 = 99.0; private static final double PERCENTILE_999 = 99.9; private final int clientTelemetrySchedulingSec; public ClientTelemetry(Boolean acceleratedNetworking, String clientId, String processId, String userAgent, ConnectionMode connectionMode, String globalDatabaseAccountName, String applicationRegion, String hostEnvInfo, HttpClient httpClient, boolean isClientTelemetryEnabled ) { clientTelemetryInfo = new ClientTelemetryInfo(clientId, processId, userAgent, connectionMode, globalDatabaseAccountName, applicationRegion, hostEnvInfo, acceleratedNetworking); this.isClosed = false; this.httpClient = httpClient; this.isClientTelemetryEnabled = isClientTelemetryEnabled; this.clientTelemetrySchedulingSec = Configs.getClientTelemetrySchedulingInSec(); } public ClientTelemetryInfo getClientTelemetryInfo() { return clientTelemetryInfo; } public static void recordValue(DoubleHistogram doubleHistogram, long value) { try { doubleHistogram.recordValue(value); } catch (Exception ex) { logger.warn("Error while recording value for client telemetry", ex.getMessage()); } } public static void recordValue(DoubleHistogram doubleHistogram, double value) { try { doubleHistogram.recordValue(value); } catch (Exception ex) { logger.warn("Error while recording value for client telemetry", ex.getMessage()); } } public void init() { loadAzureVmMetaData(); sendClientTelemetry().subscribe(); } public void close() { this.isClosed = true; this.executor.shutdown(); logger.debug("GlobalEndpointManager closed."); } private void loadAzureVmMetaData() { URI targetEndpoint = null; try { targetEndpoint = new URI(AZURE_VM_METADATA); } catch (URISyntaxException ex) { logger.info("Unable to parse azure vm metadata url"); return; } HashMap<String, String> headers = new HashMap<>(); headers.put("Metadata", "true"); HttpHeaders httpHeaders = new HttpHeaders(headers); HttpRequest httpRequest = new HttpRequest(HttpMethod.GET, targetEndpoint, targetEndpoint.getPort(), httpHeaders); Mono<HttpResponse> httpResponseMono = this.httpClient.send(httpRequest); httpResponseMono.flatMap(response -> response.bodyAsString()).map(metadataJson -> Utils.parse(metadataJson, AzureVMMetadata.class)).doOnSuccess(azureVMMetadata -> { this.clientTelemetryInfo.setApplicationRegion(azureVMMetadata.getLocation()); }).onErrorResume(throwable -> { logger.info("Unable to get azure vm metadata"); return Mono.empty(); }).subscribe(); } private void clearDataForNextRun() { this.clientTelemetryInfo.getOperationInfoMap().clear(); this.clientTelemetryInfo.getCacheRefreshInfoMap().clear(); for (DoubleHistogram histogram : this.clientTelemetryInfo.getSystemInfoMap().values()) { histogram.reset(); } } private void readHistogram() { ConcurrentDoubleHistogram cpuHistogram = new ConcurrentDoubleHistogram(ClientTelemetry.CPU_MAX, ClientTelemetry.CPU_PRECISION); cpuHistogram.setAutoResize(true); for(double val : CpuMemoryMonitor.getClientTelemetryCpuLatestList()) { recordValue(cpuHistogram, val); } ReportPayload cpuReportPayload = new ReportPayload(CPU_NAME, CPU_UNIT); clientTelemetryInfo.getSystemInfoMap().put(cpuReportPayload, cpuHistogram); ConcurrentDoubleHistogram memoryHistogram = new ConcurrentDoubleHistogram(ClientTelemetry.MEMORY_MAX, ClientTelemetry.MEMORY_PRECISION); memoryHistogram.setAutoResize(true); for(double val : CpuMemoryMonitor.getClientTelemetryMemoryLatestList()) { recordValue(memoryHistogram, val); } ReportPayload memoryReportPayload = new ReportPayload(MEMORY_NAME, MEMORY_UNIT); clientTelemetryInfo.getSystemInfoMap().put(memoryReportPayload, memoryHistogram); this.clientTelemetryInfo.setTimeStamp(Instant.now().toString()); for (Map.Entry<ReportPayload, ConcurrentDoubleHistogram> entry : this.clientTelemetryInfo.getSystemInfoMap().entrySet()) { fillMetricsInfo(entry.getKey(), entry.getValue()); } for (Map.Entry<ReportPayload, ConcurrentDoubleHistogram> entry : this.clientTelemetryInfo.getCacheRefreshInfoMap().entrySet()) { fillMetricsInfo(entry.getKey(), entry.getValue()); } for (Map.Entry<ReportPayload, ConcurrentDoubleHistogram> entry : this.clientTelemetryInfo.getOperationInfoMap().entrySet()) { fillMetricsInfo(entry.getKey(), entry.getValue()); } } private void fillMetricsInfo(ReportPayload payload, DoubleHistogram histogram) { payload.getMetricInfo().setCount(histogram.getTotalCount()); payload.getMetricInfo().setMax(histogram.getMaxValue()); payload.getMetricInfo().setMin(histogram.getMinValue()); payload.getMetricInfo().setMean(histogram.getMean()); Map<Double, Double> percentile = new HashMap<>(); percentile.put(PERCENTILE_50, histogram.getValueAtPercentile(PERCENTILE_50)); percentile.put(PERCENTILE_90, histogram.getValueAtPercentile(PERCENTILE_90)); percentile.put(PERCENTILE_95, histogram.getValueAtPercentile(PERCENTILE_95)); percentile.put(PERCENTILE_99, histogram.getValueAtPercentile(PERCENTILE_99)); percentile.put(PERCENTILE_999, histogram.getValueAtPercentile(PERCENTILE_999)); payload.getMetricInfo().setPercentiles(percentile); } }
class ClientTelemetry { public final static int ONE_KB_TO_BYTES = 1024; public final static int REQUEST_LATENCY_MAX_MICRO_SEC = 300000000; public final static int REQUEST_LATENCY_SUCCESS_PRECISION = 4; public final static int REQUEST_LATENCY_FAILURE_PRECISION = 2; public final static String REQUEST_LATENCY_NAME = "RequestLatency"; public final static String REQUEST_LATENCY_UNIT = "MicroSec"; public final static int REQUEST_CHARGE_MAX = 10000; public final static int REQUEST_CHARGE_PRECISION = 2; public final static String REQUEST_CHARGE_NAME = "RequestCharge"; public final static String REQUEST_CHARGE_UNIT = "RU"; public final static int CPU_MAX = 100; public final static int CPU_PRECISION = 2; private final static String CPU_NAME = "CPU"; private final static String CPU_UNIT = "Percentage"; public final static int MEMORY_MAX_IN_MB = 102400; public final static int MEMORY_PRECISION = 2; private final static String MEMORY_NAME = "MemoryRemaining"; private final static String MEMORY_UNIT = "MB"; private static final ObjectMapper OBJECT_MAPPER = new ObjectMapper(); private ClientTelemetryInfo clientTelemetryInfo; private HttpClient httpClient; private final ScheduledThreadPoolExecutor scheduledExecutorService = new ScheduledThreadPoolExecutor(1, new DaemonThreadFactory()); private final Scheduler scheduler = Schedulers.fromExecutor(scheduledExecutorService); private static final Logger logger = LoggerFactory.getLogger(GlobalEndpointManager.class); private volatile boolean isClosed; private volatile boolean isClientTelemetryEnabled; private static String AZURE_VM_METADATA = "http: private static final double PERCENTILE_50 = 50.0; private static final double PERCENTILE_90 = 90.0; private static final double PERCENTILE_95 = 95.0; private static final double PERCENTILE_99 = 99.0; private static final double PERCENTILE_999 = 99.9; private final int clientTelemetrySchedulingSec; public ClientTelemetry(Boolean acceleratedNetworking, String clientId, String processId, String userAgent, ConnectionMode connectionMode, String globalDatabaseAccountName, String applicationRegion, String hostEnvInfo, HttpClient httpClient, boolean isClientTelemetryEnabled ) { clientTelemetryInfo = new ClientTelemetryInfo(clientId, processId, userAgent, connectionMode, globalDatabaseAccountName, applicationRegion, hostEnvInfo, acceleratedNetworking); this.isClosed = false; this.httpClient = httpClient; this.isClientTelemetryEnabled = isClientTelemetryEnabled; this.clientTelemetrySchedulingSec = Configs.getClientTelemetrySchedulingInSec(); } public ClientTelemetryInfo getClientTelemetryInfo() { return clientTelemetryInfo; } public static void recordValue(DoubleHistogram doubleHistogram, long value) { try { doubleHistogram.recordValue(value); } catch (Exception ex) { logger.warn("Error while recording value for client telemetry. ", ex); } } public static void recordValue(DoubleHistogram doubleHistogram, double value) { try { doubleHistogram.recordValue(value); } catch (Exception ex) { logger.warn("Error while recording value for client telemetry. ", ex); } } public void init() { loadAzureVmMetaData(); sendClientTelemetry().subscribe(); } public void close() { this.isClosed = true; this.scheduledExecutorService.shutdown(); logger.debug("GlobalEndpointManager closed."); } private void loadAzureVmMetaData() { URI targetEndpoint = null; try { targetEndpoint = new URI(AZURE_VM_METADATA); } catch (URISyntaxException ex) { logger.info("Unable to parse azure vm metadata url"); return; } HashMap<String, String> headers = new HashMap<>(); headers.put("Metadata", "true"); HttpHeaders httpHeaders = new HttpHeaders(headers); HttpRequest httpRequest = new HttpRequest(HttpMethod.GET, targetEndpoint, targetEndpoint.getPort(), httpHeaders); Mono<HttpResponse> httpResponseMono = this.httpClient.send(httpRequest); httpResponseMono.flatMap(response -> response.bodyAsString()).map(metadataJson -> Utils.parse(metadataJson, AzureVMMetadata.class)).doOnSuccess(azureVMMetadata -> { this.clientTelemetryInfo.setApplicationRegion(azureVMMetadata.getLocation()); this.clientTelemetryInfo.setHostEnvInfo(azureVMMetadata.getOsType() + "|" + azureVMMetadata.getSku() + "|" + azureVMMetadata.getVmSize() + "|" + azureVMMetadata.getAzEnvironment()); }).onErrorResume(throwable -> { logger.info("Unable to get azure vm metadata"); return Mono.empty(); }).subscribe(); } private void clearDataForNextRun() { this.clientTelemetryInfo.getOperationInfoMap().clear(); this.clientTelemetryInfo.getCacheRefreshInfoMap().clear(); for (DoubleHistogram histogram : this.clientTelemetryInfo.getSystemInfoMap().values()) { histogram.reset(); } } private void readHistogram() { ConcurrentDoubleHistogram cpuHistogram = new ConcurrentDoubleHistogram(ClientTelemetry.CPU_MAX, ClientTelemetry.CPU_PRECISION); cpuHistogram.setAutoResize(true); for(double val : CpuMemoryMonitor.getClientTelemetryCpuLatestList()) { recordValue(cpuHistogram, val); } ReportPayload cpuReportPayload = new ReportPayload(CPU_NAME, CPU_UNIT); clientTelemetryInfo.getSystemInfoMap().put(cpuReportPayload, cpuHistogram); ConcurrentDoubleHistogram memoryHistogram = new ConcurrentDoubleHistogram(ClientTelemetry.MEMORY_MAX_IN_MB, ClientTelemetry.MEMORY_PRECISION); memoryHistogram.setAutoResize(true); for(double val : CpuMemoryMonitor.getClientTelemetryMemoryLatestList()) { recordValue(memoryHistogram, val); } ReportPayload memoryReportPayload = new ReportPayload(MEMORY_NAME, MEMORY_UNIT); clientTelemetryInfo.getSystemInfoMap().put(memoryReportPayload, memoryHistogram); this.clientTelemetryInfo.setTimeStamp(Instant.now().toString()); for (Map.Entry<ReportPayload, ConcurrentDoubleHistogram> entry : this.clientTelemetryInfo.getSystemInfoMap().entrySet()) { fillMetricsInfo(entry.getKey(), entry.getValue()); } for (Map.Entry<ReportPayload, ConcurrentDoubleHistogram> entry : this.clientTelemetryInfo.getCacheRefreshInfoMap().entrySet()) { fillMetricsInfo(entry.getKey(), entry.getValue()); } for (Map.Entry<ReportPayload, ConcurrentDoubleHistogram> entry : this.clientTelemetryInfo.getOperationInfoMap().entrySet()) { fillMetricsInfo(entry.getKey(), entry.getValue()); } } private void fillMetricsInfo(ReportPayload payload, DoubleHistogram histogram) { payload.getMetricInfo().setCount(histogram.getTotalCount()); payload.getMetricInfo().setMax(histogram.getMaxValue()); payload.getMetricInfo().setMin(histogram.getMinValue()); payload.getMetricInfo().setMean(histogram.getMean()); Map<Double, Double> percentile = new HashMap<>(); percentile.put(PERCENTILE_50, histogram.getValueAtPercentile(PERCENTILE_50)); percentile.put(PERCENTILE_90, histogram.getValueAtPercentile(PERCENTILE_90)); percentile.put(PERCENTILE_95, histogram.getValueAtPercentile(PERCENTILE_95)); percentile.put(PERCENTILE_99, histogram.getValueAtPercentile(PERCENTILE_99)); percentile.put(PERCENTILE_999, histogram.getValueAtPercentile(PERCENTILE_999)); payload.getMetricInfo().setPercentiles(percentile); } private static class DaemonThreadFactory implements ThreadFactory { @Override public Thread newThread(Runnable r) { Thread t = new Thread(r); t.setDaemon(true); return t; } } }
I wonder why we can't keep it double in the client telemetry as well ?
private Flux<FeedResponse<T>> byPage(CosmosPagedFluxOptions pagedFluxOptions, Context context) { final AtomicReference<Context> parentContext = new AtomicReference<>(Context.NONE); AtomicReference<Instant> startTime = new AtomicReference<>(); return this.optionsFluxFunction.apply(pagedFluxOptions).doOnSubscribe(ignoredValue -> { if (pagedFluxOptions.getTracerProvider().isEnabled()) { parentContext.set(pagedFluxOptions.getTracerProvider().startSpan(pagedFluxOptions.getTracerSpanName(), pagedFluxOptions.getDatabaseId(), pagedFluxOptions.getServiceEndpoint(), context)); } startTime.set(Instant.now()); }).doOnComplete(() -> { if (pagedFluxOptions.getTracerProvider().isEnabled()) { pagedFluxOptions.getTracerProvider().endSpan(parentContext.get(), Signal.complete(), HttpConstants.StatusCodes.OK); } }).doOnError(throwable -> { if (pagedFluxOptions.getTracerProvider().isEnabled()) { pagedFluxOptions.getTracerProvider().endSpan(parentContext.get(), Signal.error(throwable), TracerProvider.ERROR_CODE); } if (pagedFluxOptions.getCosmosAsyncClient() != null && Configs.isClientTelemetryEnabled(BridgeInternal.isClientTelemetryEnabled(pagedFluxOptions.getCosmosAsyncClient())) && throwable instanceof CosmosException) { CosmosException cosmosException = (CosmosException) throwable; fillClientTelemetry(pagedFluxOptions.getCosmosAsyncClient(), 0, pagedFluxOptions.getContainerId(), pagedFluxOptions.getDatabaseId(), pagedFluxOptions.getOperationType(), pagedFluxOptions.getResourceType(), BridgeInternal.getContextClient(pagedFluxOptions.getCosmosAsyncClient()).getConsistencyLevel(), (float) cosmosException.getRequestCharge(), Duration.between(startTime.get(), Instant.now())); } startTime.set(Instant.now()); }).doOnNext(feedResponse -> { if (feedResponseConsumer != null) { feedResponseConsumer.accept(feedResponse); } if (pagedFluxOptions.getCosmosAsyncClient() != null && Configs.isClientTelemetryEnabled(BridgeInternal.isClientTelemetryEnabled(pagedFluxOptions.getCosmosAsyncClient()))) { fillClientTelemetry(pagedFluxOptions.getCosmosAsyncClient(), HttpConstants.StatusCodes.OK, pagedFluxOptions.getContainerId(), pagedFluxOptions.getDatabaseId(), pagedFluxOptions.getOperationType(), pagedFluxOptions.getResourceType(), BridgeInternal.getContextClient(pagedFluxOptions.getCosmosAsyncClient()).getConsistencyLevel(), (float) feedResponse.getRequestCharge(), Duration.between(startTime.get(), Instant.now())); startTime.set(Instant.now()); } }); }
(float) cosmosException.getRequestCharge(), Duration.between(startTime.get(), Instant.now()));
then call it with each feedResponse if (feedResponseConsumer != null) { feedResponseConsumer.accept(feedResponse); }
class CosmosPagedFlux<T> extends ContinuablePagedFlux<String, T, FeedResponse<T>> { private final Function<CosmosPagedFluxOptions, Flux<FeedResponse<T>>> optionsFluxFunction; private final Consumer<FeedResponse<T>> feedResponseConsumer; CosmosPagedFlux(Function<CosmosPagedFluxOptions, Flux<FeedResponse<T>>> optionsFluxFunction) { this.optionsFluxFunction = optionsFluxFunction; this.feedResponseConsumer = null; } CosmosPagedFlux(Function<CosmosPagedFluxOptions, Flux<FeedResponse<T>>> optionsFluxFunction, Consumer<FeedResponse<T>> feedResponseConsumer) { this.optionsFluxFunction = optionsFluxFunction; this.feedResponseConsumer = feedResponseConsumer; } /** * Handle for invoking "side-effects" on each FeedResponse returned by CosmosPagedFlux * * @param feedResponseConsumer handler * @return CosmosPagedFlux instance with attached handler */ @Beta(value = Beta.SinceVersion.V4_6_0) public CosmosPagedFlux<T> handle(Consumer<FeedResponse<T>> feedResponseConsumer) { return new CosmosPagedFlux<T>(this.optionsFluxFunction, feedResponseConsumer); } @Override public Flux<FeedResponse<T>> byPage() { CosmosPagedFluxOptions cosmosPagedFluxOptions = new CosmosPagedFluxOptions(); return FluxUtil.fluxContext(context -> byPage(cosmosPagedFluxOptions, context)); } @Override public Flux<FeedResponse<T>> byPage(String continuationToken) { CosmosPagedFluxOptions cosmosPagedFluxOptions = new CosmosPagedFluxOptions(); cosmosPagedFluxOptions.setRequestContinuation(continuationToken); return FluxUtil.fluxContext(context -> byPage(cosmosPagedFluxOptions, context)); } @Override public Flux<FeedResponse<T>> byPage(int preferredPageSize) { CosmosPagedFluxOptions cosmosPagedFluxOptions = new CosmosPagedFluxOptions(); cosmosPagedFluxOptions.setMaxItemCount(preferredPageSize); return FluxUtil.fluxContext(context -> byPage(cosmosPagedFluxOptions, context)); } @Override public Flux<FeedResponse<T>> byPage(String continuationToken, int preferredPageSize) { CosmosPagedFluxOptions cosmosPagedFluxOptions = new CosmosPagedFluxOptions(); cosmosPagedFluxOptions.setRequestContinuation(continuationToken); cosmosPagedFluxOptions.setMaxItemCount(preferredPageSize); return FluxUtil.fluxContext(context -> byPage(cosmosPagedFluxOptions, context)); } /** * Subscribe to consume all items of type {@code T} in the sequence respectively. This is recommended for most * common scenarios. This will seamlessly fetch next page when required and provide with a {@link Flux} of items. * * @param coreSubscriber The subscriber for this {@link CosmosPagedFlux} */ @Override public void subscribe(CoreSubscriber<? super T> coreSubscriber) { Flux<FeedResponse<T>> pagedResponse = this.byPage(); pagedResponse.flatMap(tFeedResponse -> { IterableStream<T> elements = tFeedResponse.getElements(); if (elements == null) { return Flux.empty(); } return Flux.fromIterable(elements); }).subscribe(coreSubscriber); } private Flux<FeedResponse<T>> byPage(CosmosPagedFluxOptions pagedFluxOptions, Context context) { final AtomicReference<Context> parentContext = new AtomicReference<>(Context.NONE); AtomicReference<Instant> startTime = new AtomicReference<>(); return this.optionsFluxFunction.apply(pagedFluxOptions).doOnSubscribe(ignoredValue -> { if (pagedFluxOptions.getTracerProvider().isEnabled()) { parentContext.set(pagedFluxOptions.getTracerProvider().startSpan(pagedFluxOptions.getTracerSpanName(), pagedFluxOptions.getDatabaseId(), pagedFluxOptions.getServiceEndpoint(), context)); } startTime.set(Instant.now()); }).doOnComplete(() -> { if (pagedFluxOptions.getTracerProvider().isEnabled()) { pagedFluxOptions.getTracerProvider().endSpan(parentContext.get(), Signal.complete(), HttpConstants.StatusCodes.OK); } }).doOnError(throwable -> { if (pagedFluxOptions.getTracerProvider().isEnabled()) { pagedFluxOptions.getTracerProvider().endSpan(parentContext.get(), Signal.error(throwable), TracerProvider.ERROR_CODE); } if (pagedFluxOptions.getCosmosAsyncClient() != null && Configs.isClientTelemetryEnabled(BridgeInternal.isClientTelemetryEnabled(pagedFluxOptions.getCosmosAsyncClient())) && throwable instanceof CosmosException) { CosmosException cosmosException = (CosmosException) throwable; fillClientTelemetry(pagedFluxOptions.getCosmosAsyncClient(), 0, pagedFluxOptions.getContainerId(), pagedFluxOptions.getDatabaseId(), pagedFluxOptions.getOperationType(), pagedFluxOptions.getResourceType(), BridgeInternal.getContextClient(pagedFluxOptions.getCosmosAsyncClient()).getConsistencyLevel(), (float) cosmosException.getRequestCharge(), Duration.between(startTime.get(), Instant.now())); } startTime.set(Instant.now()); }).doOnNext(feedResponse -> { if (pagedFluxOptions.getCosmosAsyncClient() != null && Configs.isClientTelemetryEnabled(BridgeInternal.isClientTelemetryEnabled(pagedFluxOptions.getCosmosAsyncClient()))) { fillClientTelemetry(pagedFluxOptions.getCosmosAsyncClient(), HttpConstants.StatusCodes.OK, pagedFluxOptions.getContainerId(), pagedFluxOptions.getDatabaseId(), pagedFluxOptions.getOperationType(), pagedFluxOptions.getResourceType(), BridgeInternal.getContextClient(pagedFluxOptions.getCosmosAsyncClient()).getConsistencyLevel(), (float) feedResponse.getRequestCharge(), Duration.between(startTime.get(), Instant.now())); startTime.set(Instant.now()); } }); } public void fillClientTelemetry(CosmosAsyncClient cosmosAsyncClient, int statusCode, String containerId, String databaseId, OperationType operationType, ResourceType resourceType, ConsistencyLevel consistencyLevel, float requestCharge, Duration latency) { ClientTelemetry telemetry = BridgeInternal.getContextClient(cosmosAsyncClient).getClientTelemetry(); ReportPayload reportPayloadLatency = createReportPayload(cosmosAsyncClient, statusCode, containerId, databaseId , operationType, resourceType, consistencyLevel, ClientTelemetry.REQUEST_LATENCY_NAME, ClientTelemetry.REQUEST_LATENCY_UNIT); ConcurrentDoubleHistogram latencyHistogram = telemetry.getClientTelemetryInfo().getOperationInfoMap().get(reportPayloadLatency); if (latencyHistogram != null) { ClientTelemetry.recordValue(latencyHistogram, latency.toNanos() / 1000); } else { if (statusCode == HttpConstants.StatusCodes.OK) { latencyHistogram = new ConcurrentDoubleHistogram(ClientTelemetry.REQUEST_LATENCY_MAX, ClientTelemetry.REQUEST_LATENCY_SUCCESS_PRECISION); } else { latencyHistogram = new ConcurrentDoubleHistogram(ClientTelemetry.REQUEST_LATENCY_MAX, ClientTelemetry.REQUEST_LATENCY_FAILURE_PRECISION); } latencyHistogram.setAutoResize(true); ClientTelemetry.recordValue(latencyHistogram, latency.toNanos() / 1000); telemetry.getClientTelemetryInfo().getOperationInfoMap().put(reportPayloadLatency, latencyHistogram); } ReportPayload reportPayloadRequestCharge = createReportPayload(cosmosAsyncClient, statusCode, containerId, databaseId , operationType, resourceType, consistencyLevel, ClientTelemetry.REQUEST_CHARGE_NAME, ClientTelemetry.REQUEST_CHARGE_UNIT); ConcurrentDoubleHistogram requestChargeHistogram = telemetry.getClientTelemetryInfo().getOperationInfoMap().get(reportPayloadRequestCharge); if (requestChargeHistogram != null) { ClientTelemetry.recordValue(requestChargeHistogram, requestCharge); } else { requestChargeHistogram = new ConcurrentDoubleHistogram(ClientTelemetry.REQUEST_CHARGE_MAX, ClientTelemetry.REQUEST_CHARGE_PRECISION); requestChargeHistogram.setAutoResize(true); ClientTelemetry.recordValue(requestChargeHistogram, requestCharge); telemetry.getClientTelemetryInfo().getOperationInfoMap().put(reportPayloadRequestCharge, requestChargeHistogram); } } private ReportPayload createReportPayload(CosmosAsyncClient cosmosAsyncClient, int statusCode, String containerId, String databaseId, OperationType operationType, ResourceType resourceType, ConsistencyLevel consistencyLevel, String metricsName, String unitName) { ReportPayload reportPayload = new ReportPayload(metricsName, unitName); reportPayload.setConsistency(consistencyLevel == null ? BridgeInternal.getContextClient(cosmosAsyncClient).getConsistencyLevel() : consistencyLevel); reportPayload.setDatabasesName(databaseId); reportPayload.setContainerName(containerId); reportPayload.setOperation(operationType); reportPayload.setResource(resourceType); reportPayload.setStatusCode(statusCode); return reportPayload; } }
class CosmosPagedFlux<T> extends ContinuablePagedFlux<String, T, FeedResponse<T>> { private final Function<CosmosPagedFluxOptions, Flux<FeedResponse<T>>> optionsFluxFunction; private final Consumer<FeedResponse<T>> feedResponseConsumer; CosmosPagedFlux(Function<CosmosPagedFluxOptions, Flux<FeedResponse<T>>> optionsFluxFunction) { this.optionsFluxFunction = optionsFluxFunction; this.feedResponseConsumer = null; } CosmosPagedFlux(Function<CosmosPagedFluxOptions, Flux<FeedResponse<T>>> optionsFluxFunction, Consumer<FeedResponse<T>> feedResponseConsumer) { this.optionsFluxFunction = optionsFluxFunction; this.feedResponseConsumer = feedResponseConsumer; } /** * Handle for invoking "side-effects" on each FeedResponse returned by CosmosPagedFlux * * @param feedResponseConsumer handler * @return CosmosPagedFlux instance with attached handler */ @Beta(value = Beta.SinceVersion.V4_6_0) public CosmosPagedFlux<T> handle(Consumer<FeedResponse<T>> feedResponseConsumer) { return new CosmosPagedFlux<T>(this.optionsFluxFunction, feedResponseConsumer); } @Override public Flux<FeedResponse<T>> byPage() { CosmosPagedFluxOptions cosmosPagedFluxOptions = new CosmosPagedFluxOptions(); return FluxUtil.fluxContext(context -> byPage(cosmosPagedFluxOptions, context)); } @Override public Flux<FeedResponse<T>> byPage(String continuationToken) { CosmosPagedFluxOptions cosmosPagedFluxOptions = new CosmosPagedFluxOptions(); cosmosPagedFluxOptions.setRequestContinuation(continuationToken); return FluxUtil.fluxContext(context -> byPage(cosmosPagedFluxOptions, context)); } @Override public Flux<FeedResponse<T>> byPage(int preferredPageSize) { CosmosPagedFluxOptions cosmosPagedFluxOptions = new CosmosPagedFluxOptions(); cosmosPagedFluxOptions.setMaxItemCount(preferredPageSize); return FluxUtil.fluxContext(context -> byPage(cosmosPagedFluxOptions, context)); } @Override public Flux<FeedResponse<T>> byPage(String continuationToken, int preferredPageSize) { CosmosPagedFluxOptions cosmosPagedFluxOptions = new CosmosPagedFluxOptions(); cosmosPagedFluxOptions.setRequestContinuation(continuationToken); cosmosPagedFluxOptions.setMaxItemCount(preferredPageSize); return FluxUtil.fluxContext(context -> byPage(cosmosPagedFluxOptions, context)); } /** * Subscribe to consume all items of type {@code T} in the sequence respectively. This is recommended for most * common scenarios. This will seamlessly fetch next page when required and provide with a {@link Flux} of items. * * @param coreSubscriber The subscriber for this {@link CosmosPagedFlux} */ @Override public void subscribe(CoreSubscriber<? super T> coreSubscriber) { Flux<FeedResponse<T>> pagedResponse = this.byPage(); pagedResponse.flatMap(tFeedResponse -> { IterableStream<T> elements = tFeedResponse.getElements(); if (elements == null) { return Flux.empty(); } return Flux.fromIterable(elements); }).subscribe(coreSubscriber); } private Flux<FeedResponse<T>> byPage(CosmosPagedFluxOptions pagedFluxOptions, Context context) { final AtomicReference<Context> parentContext = new AtomicReference<>(Context.NONE); AtomicReference<Instant> startTime = new AtomicReference<>(); return this.optionsFluxFunction.apply(pagedFluxOptions).doOnSubscribe(ignoredValue -> { if (pagedFluxOptions.getTracerProvider().isEnabled()) { parentContext.set(pagedFluxOptions.getTracerProvider().startSpan(pagedFluxOptions.getTracerSpanName(), pagedFluxOptions.getDatabaseId(), pagedFluxOptions.getServiceEndpoint(), context)); } startTime.set(Instant.now()); }).doOnComplete(() -> { if (pagedFluxOptions.getTracerProvider().isEnabled()) { pagedFluxOptions.getTracerProvider().endSpan(parentContext.get(), Signal.complete(), HttpConstants.StatusCodes.OK); } }).doOnError(throwable -> { if (pagedFluxOptions.getTracerProvider().isEnabled()) { pagedFluxOptions.getTracerProvider().endSpan(parentContext.get(), Signal.error(throwable), TracerProvider.ERROR_CODE); } if (pagedFluxOptions.getCosmosAsyncClient() != null && Configs.isClientTelemetryEnabled(BridgeInternal.isClientTelemetryEnabled(pagedFluxOptions.getCosmosAsyncClient())) && throwable instanceof CosmosException) { CosmosException cosmosException = (CosmosException) throwable; fillClientTelemetry(pagedFluxOptions.getCosmosAsyncClient(), 0, pagedFluxOptions.getContainerId(), pagedFluxOptions.getDatabaseId(), pagedFluxOptions.getOperationType(), pagedFluxOptions.getResourceType(), BridgeInternal.getContextClient(pagedFluxOptions.getCosmosAsyncClient()).getConsistencyLevel(), (float) cosmosException.getRequestCharge(), Duration.between(startTime.get(), Instant.now())); } startTime.set(Instant.now()); }).doOnNext(feedResponse -> { if (pagedFluxOptions.getCosmosAsyncClient() != null && Configs.isClientTelemetryEnabled(BridgeInternal.isClientTelemetryEnabled(pagedFluxOptions.getCosmosAsyncClient()))) { fillClientTelemetry(pagedFluxOptions.getCosmosAsyncClient(), HttpConstants.StatusCodes.OK, pagedFluxOptions.getContainerId(), pagedFluxOptions.getDatabaseId(), pagedFluxOptions.getOperationType(), pagedFluxOptions.getResourceType(), BridgeInternal.getContextClient(pagedFluxOptions.getCosmosAsyncClient()).getConsistencyLevel(), (float) feedResponse.getRequestCharge(), Duration.between(startTime.get(), Instant.now())); startTime.set(Instant.now()); } }); } public void fillClientTelemetry(CosmosAsyncClient cosmosAsyncClient, int statusCode, String containerId, String databaseId, OperationType operationType, ResourceType resourceType, ConsistencyLevel consistencyLevel, float requestCharge, Duration latency) { ClientTelemetry telemetry = BridgeInternal.getContextClient(cosmosAsyncClient).getClientTelemetry(); ReportPayload reportPayloadLatency = createReportPayload(cosmosAsyncClient, statusCode, containerId, databaseId , operationType, resourceType, consistencyLevel, ClientTelemetry.REQUEST_LATENCY_NAME, ClientTelemetry.REQUEST_LATENCY_UNIT); ConcurrentDoubleHistogram latencyHistogram = telemetry.getClientTelemetryInfo().getOperationInfoMap().get(reportPayloadLatency); if (latencyHistogram != null) { ClientTelemetry.recordValue(latencyHistogram, latency.toNanos() / 1000); } else { if (statusCode == HttpConstants.StatusCodes.OK) { latencyHistogram = new ConcurrentDoubleHistogram(ClientTelemetry.REQUEST_LATENCY_MAX_MICRO_SEC, ClientTelemetry.REQUEST_LATENCY_SUCCESS_PRECISION); } else { latencyHistogram = new ConcurrentDoubleHistogram(ClientTelemetry.REQUEST_LATENCY_MAX_MICRO_SEC, ClientTelemetry.REQUEST_LATENCY_FAILURE_PRECISION); } latencyHistogram.setAutoResize(true); ClientTelemetry.recordValue(latencyHistogram, latency.toNanos() / 1000); telemetry.getClientTelemetryInfo().getOperationInfoMap().put(reportPayloadLatency, latencyHistogram); } ReportPayload reportPayloadRequestCharge = createReportPayload(cosmosAsyncClient, statusCode, containerId, databaseId , operationType, resourceType, consistencyLevel, ClientTelemetry.REQUEST_CHARGE_NAME, ClientTelemetry.REQUEST_CHARGE_UNIT); ConcurrentDoubleHistogram requestChargeHistogram = telemetry.getClientTelemetryInfo().getOperationInfoMap().get(reportPayloadRequestCharge); if (requestChargeHistogram != null) { ClientTelemetry.recordValue(requestChargeHistogram, requestCharge); } else { requestChargeHistogram = new ConcurrentDoubleHistogram(ClientTelemetry.REQUEST_CHARGE_MAX, ClientTelemetry.REQUEST_CHARGE_PRECISION); requestChargeHistogram.setAutoResize(true); ClientTelemetry.recordValue(requestChargeHistogram, requestCharge); telemetry.getClientTelemetryInfo().getOperationInfoMap().put(reportPayloadRequestCharge, requestChargeHistogram); } } private ReportPayload createReportPayload(CosmosAsyncClient cosmosAsyncClient, int statusCode, String containerId, String databaseId, OperationType operationType, ResourceType resourceType, ConsistencyLevel consistencyLevel, String metricsName, String unitName) { ReportPayload reportPayload = new ReportPayload(metricsName, unitName); reportPayload.setConsistency(consistencyLevel == null ? BridgeInternal.getContextClient(cosmosAsyncClient).getConsistencyLevel() : consistencyLevel); reportPayload.setDatabaseName(databaseId); reportPayload.setContainerName(containerId); reportPayload.setOperation(operationType); reportPayload.setResource(resourceType); reportPayload.setStatusCode(statusCode); return reportPayload; } }
please use log instead of e.printSTackTrace
private Mono<Void> sendClientTelemetry() { return Mono.delay(Duration.ofSeconds(clientTelemetrySchedulingSec)) .flatMap(t -> { if (this.isClosed) { logger.warn("client already closed"); return Mono.empty(); } if(!Configs.isClientTelemetryEnabled(this.isClientTelemetryEnabled)) { logger.trace("client telemetry not enabled"); return Mono.empty(); } readHistogram(); try { logger.info("ClientTelemetry {}", OBJECT_MAPPER.writeValueAsString(this.clientTelemetryInfo)); } catch (JsonProcessingException e) { e.printStackTrace(); } clearDataForNextRun(); return this.sendClientTelemetry(); }).onErrorResume(ex -> { logger.error("sendClientTelemetry() - Unable to send client telemetry" + ". Exception: {}", ex.toString(), ex); clearDataForNextRun(); return this.sendClientTelemetry(); }).subscribeOn(scheduler); }
e.printStackTrace();
private Mono<Void> sendClientTelemetry() { return Mono.delay(Duration.ofSeconds(clientTelemetrySchedulingSec)) .flatMap(t -> { if (this.isClosed) { logger.warn("client already closed"); return Mono.empty(); } if(!Configs.isClientTelemetryEnabled(this.isClientTelemetryEnabled)) { logger.trace("client telemetry not enabled"); return Mono.empty(); } readHistogram(); try { logger.info("ClientTelemetry {}", OBJECT_MAPPER.writeValueAsString(this.clientTelemetryInfo)); } catch (JsonProcessingException e) { logger.error("Error which parsing client telemetry into json. ", e); } clearDataForNextRun(); return this.sendClientTelemetry(); }).onErrorResume(ex -> { logger.error("sendClientTelemetry() - Unable to send client telemetry" + ". Exception: ", ex); clearDataForNextRun(); return this.sendClientTelemetry(); }).subscribeOn(scheduler); }
class ClientTelemetry { public final static int REQUEST_LATENCY_MAX = 300000000; public final static int REQUEST_LATENCY_SUCCESS_PRECISION = 4; public final static int REQUEST_LATENCY_FAILURE_PRECISION = 2; public final static String REQUEST_LATENCY_NAME = "RequestLatency"; public final static String REQUEST_LATENCY_UNIT = "MicroSec"; public final static int REQUEST_CHARGE_MAX = 10000; public final static int REQUEST_CHARGE_PRECISION = 2; public final static String REQUEST_CHARGE_NAME = "RequestCharge"; public final static String REQUEST_CHARGE_UNIT = "RU"; public final static int CPU_MAX = 100; public final static int CPU_PRECISION = 2; private final static String CPU_NAME = "CPU"; private final static String CPU_UNIT = "Percentage"; public final static int MEMORY_MAX = 102400; public final static int MEMORY_PRECISION = 2; private final static String MEMORY_NAME = "MemoryRemaining"; private final static String MEMORY_UNIT = "MB"; private static final ObjectMapper OBJECT_MAPPER = new ObjectMapper(); private ClientTelemetryInfo clientTelemetryInfo; private HttpClient httpClient; private final ExecutorService executor = Executors.newSingleThreadExecutor(); private final Scheduler scheduler = Schedulers.fromExecutor(executor); private static final Logger logger = LoggerFactory.getLogger(GlobalEndpointManager.class); private volatile boolean isClosed; private volatile boolean isClientTelemetryEnabled; private static String AZURE_VM_METADATA = "http: private static final double PERCENTILE_50 = 50.0; private static final double PERCENTILE_90 = 90.0; private static final double PERCENTILE_95 = 95.0; private static final double PERCENTILE_99 = 99.0; private static final double PERCENTILE_999 = 99.9; private final int clientTelemetrySchedulingSec; public ClientTelemetry(Boolean acceleratedNetworking, String clientId, String processId, String userAgent, ConnectionMode connectionMode, String globalDatabaseAccountName, String applicationRegion, String hostEnvInfo, HttpClient httpClient, boolean isClientTelemetryEnabled ) { clientTelemetryInfo = new ClientTelemetryInfo(clientId, processId, userAgent, connectionMode, globalDatabaseAccountName, applicationRegion, hostEnvInfo, acceleratedNetworking); this.isClosed = false; this.httpClient = httpClient; this.isClientTelemetryEnabled = isClientTelemetryEnabled; this.clientTelemetrySchedulingSec = Configs.getClientTelemetrySchedulingInSec(); } public ClientTelemetryInfo getClientTelemetryInfo() { return clientTelemetryInfo; } public static void recordValue(DoubleHistogram doubleHistogram, long value) { try { doubleHistogram.recordValue(value); } catch (Exception ex) { logger.warn("Error while recording value for client telemetry", ex.getMessage()); } } public static void recordValue(DoubleHistogram doubleHistogram, double value) { try { doubleHistogram.recordValue(value); } catch (Exception ex) { logger.warn("Error while recording value for client telemetry", ex.getMessage()); } } public void init() { loadAzureVmMetaData(); sendClientTelemetry().subscribe(); } public void close() { this.isClosed = true; this.executor.shutdown(); logger.debug("GlobalEndpointManager closed."); } private void loadAzureVmMetaData() { URI targetEndpoint = null; try { targetEndpoint = new URI(AZURE_VM_METADATA); } catch (URISyntaxException ex) { logger.info("Unable to parse azure vm metadata url"); return; } HashMap<String, String> headers = new HashMap<>(); headers.put("Metadata", "true"); HttpHeaders httpHeaders = new HttpHeaders(headers); HttpRequest httpRequest = new HttpRequest(HttpMethod.GET, targetEndpoint, targetEndpoint.getPort(), httpHeaders); Mono<HttpResponse> httpResponseMono = this.httpClient.send(httpRequest); httpResponseMono.flatMap(response -> response.bodyAsString()).map(metadataJson -> Utils.parse(metadataJson, AzureVMMetadata.class)).doOnSuccess(azureVMMetadata -> { this.clientTelemetryInfo.setApplicationRegion(azureVMMetadata.getLocation()); }).onErrorResume(throwable -> { logger.info("Unable to get azure vm metadata"); return Mono.empty(); }).subscribe(); } private void clearDataForNextRun() { this.clientTelemetryInfo.getOperationInfoMap().clear(); this.clientTelemetryInfo.getCacheRefreshInfoMap().clear(); for (DoubleHistogram histogram : this.clientTelemetryInfo.getSystemInfoMap().values()) { histogram.reset(); } } private void readHistogram() { ConcurrentDoubleHistogram cpuHistogram = new ConcurrentDoubleHistogram(ClientTelemetry.CPU_MAX, ClientTelemetry.CPU_PRECISION); cpuHistogram.setAutoResize(true); for(double val : CpuMemoryMonitor.getClientTelemetryCpuLatestList()) { recordValue(cpuHistogram, val); } ReportPayload cpuReportPayload = new ReportPayload(CPU_NAME, CPU_UNIT); clientTelemetryInfo.getSystemInfoMap().put(cpuReportPayload, cpuHistogram); ConcurrentDoubleHistogram memoryHistogram = new ConcurrentDoubleHistogram(ClientTelemetry.MEMORY_MAX, ClientTelemetry.MEMORY_PRECISION); memoryHistogram.setAutoResize(true); for(double val : CpuMemoryMonitor.getClientTelemetryMemoryLatestList()) { recordValue(memoryHistogram, val); } ReportPayload memoryReportPayload = new ReportPayload(MEMORY_NAME, MEMORY_UNIT); clientTelemetryInfo.getSystemInfoMap().put(memoryReportPayload, memoryHistogram); this.clientTelemetryInfo.setTimeStamp(Instant.now().toString()); for (Map.Entry<ReportPayload, ConcurrentDoubleHistogram> entry : this.clientTelemetryInfo.getSystemInfoMap().entrySet()) { fillMetricsInfo(entry.getKey(), entry.getValue()); } for (Map.Entry<ReportPayload, ConcurrentDoubleHistogram> entry : this.clientTelemetryInfo.getCacheRefreshInfoMap().entrySet()) { fillMetricsInfo(entry.getKey(), entry.getValue()); } for (Map.Entry<ReportPayload, ConcurrentDoubleHistogram> entry : this.clientTelemetryInfo.getOperationInfoMap().entrySet()) { fillMetricsInfo(entry.getKey(), entry.getValue()); } } private void fillMetricsInfo(ReportPayload payload, DoubleHistogram histogram) { payload.getMetricInfo().setCount(histogram.getTotalCount()); payload.getMetricInfo().setMax(histogram.getMaxValue()); payload.getMetricInfo().setMin(histogram.getMinValue()); payload.getMetricInfo().setMean(histogram.getMean()); Map<Double, Double> percentile = new HashMap<>(); percentile.put(PERCENTILE_50, histogram.getValueAtPercentile(PERCENTILE_50)); percentile.put(PERCENTILE_90, histogram.getValueAtPercentile(PERCENTILE_90)); percentile.put(PERCENTILE_95, histogram.getValueAtPercentile(PERCENTILE_95)); percentile.put(PERCENTILE_99, histogram.getValueAtPercentile(PERCENTILE_99)); percentile.put(PERCENTILE_999, histogram.getValueAtPercentile(PERCENTILE_999)); payload.getMetricInfo().setPercentiles(percentile); } }
class ClientTelemetry { public final static int ONE_KB_TO_BYTES = 1024; public final static int REQUEST_LATENCY_MAX_MICRO_SEC = 300000000; public final static int REQUEST_LATENCY_SUCCESS_PRECISION = 4; public final static int REQUEST_LATENCY_FAILURE_PRECISION = 2; public final static String REQUEST_LATENCY_NAME = "RequestLatency"; public final static String REQUEST_LATENCY_UNIT = "MicroSec"; public final static int REQUEST_CHARGE_MAX = 10000; public final static int REQUEST_CHARGE_PRECISION = 2; public final static String REQUEST_CHARGE_NAME = "RequestCharge"; public final static String REQUEST_CHARGE_UNIT = "RU"; public final static int CPU_MAX = 100; public final static int CPU_PRECISION = 2; private final static String CPU_NAME = "CPU"; private final static String CPU_UNIT = "Percentage"; public final static int MEMORY_MAX_IN_MB = 102400; public final static int MEMORY_PRECISION = 2; private final static String MEMORY_NAME = "MemoryRemaining"; private final static String MEMORY_UNIT = "MB"; private static final ObjectMapper OBJECT_MAPPER = new ObjectMapper(); private ClientTelemetryInfo clientTelemetryInfo; private HttpClient httpClient; private final ScheduledThreadPoolExecutor scheduledExecutorService = new ScheduledThreadPoolExecutor(1, new DaemonThreadFactory()); private final Scheduler scheduler = Schedulers.fromExecutor(scheduledExecutorService); private static final Logger logger = LoggerFactory.getLogger(GlobalEndpointManager.class); private volatile boolean isClosed; private volatile boolean isClientTelemetryEnabled; private static String AZURE_VM_METADATA = "http: private static final double PERCENTILE_50 = 50.0; private static final double PERCENTILE_90 = 90.0; private static final double PERCENTILE_95 = 95.0; private static final double PERCENTILE_99 = 99.0; private static final double PERCENTILE_999 = 99.9; private final int clientTelemetrySchedulingSec; public ClientTelemetry(Boolean acceleratedNetworking, String clientId, String processId, String userAgent, ConnectionMode connectionMode, String globalDatabaseAccountName, String applicationRegion, String hostEnvInfo, HttpClient httpClient, boolean isClientTelemetryEnabled ) { clientTelemetryInfo = new ClientTelemetryInfo(clientId, processId, userAgent, connectionMode, globalDatabaseAccountName, applicationRegion, hostEnvInfo, acceleratedNetworking); this.isClosed = false; this.httpClient = httpClient; this.isClientTelemetryEnabled = isClientTelemetryEnabled; this.clientTelemetrySchedulingSec = Configs.getClientTelemetrySchedulingInSec(); } public ClientTelemetryInfo getClientTelemetryInfo() { return clientTelemetryInfo; } public static void recordValue(DoubleHistogram doubleHistogram, long value) { try { doubleHistogram.recordValue(value); } catch (Exception ex) { logger.warn("Error while recording value for client telemetry. ", ex); } } public static void recordValue(DoubleHistogram doubleHistogram, double value) { try { doubleHistogram.recordValue(value); } catch (Exception ex) { logger.warn("Error while recording value for client telemetry. ", ex); } } public void init() { loadAzureVmMetaData(); sendClientTelemetry().subscribe(); } public void close() { this.isClosed = true; this.scheduledExecutorService.shutdown(); logger.debug("GlobalEndpointManager closed."); } private void loadAzureVmMetaData() { URI targetEndpoint = null; try { targetEndpoint = new URI(AZURE_VM_METADATA); } catch (URISyntaxException ex) { logger.info("Unable to parse azure vm metadata url"); return; } HashMap<String, String> headers = new HashMap<>(); headers.put("Metadata", "true"); HttpHeaders httpHeaders = new HttpHeaders(headers); HttpRequest httpRequest = new HttpRequest(HttpMethod.GET, targetEndpoint, targetEndpoint.getPort(), httpHeaders); Mono<HttpResponse> httpResponseMono = this.httpClient.send(httpRequest); httpResponseMono.flatMap(response -> response.bodyAsString()).map(metadataJson -> Utils.parse(metadataJson, AzureVMMetadata.class)).doOnSuccess(azureVMMetadata -> { this.clientTelemetryInfo.setApplicationRegion(azureVMMetadata.getLocation()); this.clientTelemetryInfo.setHostEnvInfo(azureVMMetadata.getOsType() + "|" + azureVMMetadata.getSku() + "|" + azureVMMetadata.getVmSize() + "|" + azureVMMetadata.getAzEnvironment()); }).onErrorResume(throwable -> { logger.info("Unable to get azure vm metadata"); return Mono.empty(); }).subscribe(); } private void clearDataForNextRun() { this.clientTelemetryInfo.getOperationInfoMap().clear(); this.clientTelemetryInfo.getCacheRefreshInfoMap().clear(); for (DoubleHistogram histogram : this.clientTelemetryInfo.getSystemInfoMap().values()) { histogram.reset(); } } private void readHistogram() { ConcurrentDoubleHistogram cpuHistogram = new ConcurrentDoubleHistogram(ClientTelemetry.CPU_MAX, ClientTelemetry.CPU_PRECISION); cpuHistogram.setAutoResize(true); for(double val : CpuMemoryMonitor.getClientTelemetryCpuLatestList()) { recordValue(cpuHistogram, val); } ReportPayload cpuReportPayload = new ReportPayload(CPU_NAME, CPU_UNIT); clientTelemetryInfo.getSystemInfoMap().put(cpuReportPayload, cpuHistogram); ConcurrentDoubleHistogram memoryHistogram = new ConcurrentDoubleHistogram(ClientTelemetry.MEMORY_MAX_IN_MB, ClientTelemetry.MEMORY_PRECISION); memoryHistogram.setAutoResize(true); for(double val : CpuMemoryMonitor.getClientTelemetryMemoryLatestList()) { recordValue(memoryHistogram, val); } ReportPayload memoryReportPayload = new ReportPayload(MEMORY_NAME, MEMORY_UNIT); clientTelemetryInfo.getSystemInfoMap().put(memoryReportPayload, memoryHistogram); this.clientTelemetryInfo.setTimeStamp(Instant.now().toString()); for (Map.Entry<ReportPayload, ConcurrentDoubleHistogram> entry : this.clientTelemetryInfo.getSystemInfoMap().entrySet()) { fillMetricsInfo(entry.getKey(), entry.getValue()); } for (Map.Entry<ReportPayload, ConcurrentDoubleHistogram> entry : this.clientTelemetryInfo.getCacheRefreshInfoMap().entrySet()) { fillMetricsInfo(entry.getKey(), entry.getValue()); } for (Map.Entry<ReportPayload, ConcurrentDoubleHistogram> entry : this.clientTelemetryInfo.getOperationInfoMap().entrySet()) { fillMetricsInfo(entry.getKey(), entry.getValue()); } } private void fillMetricsInfo(ReportPayload payload, DoubleHistogram histogram) { payload.getMetricInfo().setCount(histogram.getTotalCount()); payload.getMetricInfo().setMax(histogram.getMaxValue()); payload.getMetricInfo().setMin(histogram.getMinValue()); payload.getMetricInfo().setMean(histogram.getMean()); Map<Double, Double> percentile = new HashMap<>(); percentile.put(PERCENTILE_50, histogram.getValueAtPercentile(PERCENTILE_50)); percentile.put(PERCENTILE_90, histogram.getValueAtPercentile(PERCENTILE_90)); percentile.put(PERCENTILE_95, histogram.getValueAtPercentile(PERCENTILE_95)); percentile.put(PERCENTILE_99, histogram.getValueAtPercentile(PERCENTILE_99)); percentile.put(PERCENTILE_999, histogram.getValueAtPercentile(PERCENTILE_999)); payload.getMetricInfo().setPercentiles(percentile); } private static class DaemonThreadFactory implements ThreadFactory { @Override public Thread newThread(Runnable r) { Thread t = new Thread(r); t.setDaemon(true); return t; } } }
This is in line with what we do in GlobalEndpointManager. Also expand is more applicable where we have values in stream and we want to do breath first or depth first recursion. In our case it is mono void.
private Mono<Void> sendClientTelemetry() { return Mono.delay(Duration.ofSeconds(clientTelemetrySchedulingSec)) .flatMap(t -> { if (this.isClosed) { logger.warn("client already closed"); return Mono.empty(); } if(!Configs.isClientTelemetryEnabled(this.isClientTelemetryEnabled)) { logger.trace("client telemetry not enabled"); return Mono.empty(); } readHistogram(); try { logger.info("ClientTelemetry {}", OBJECT_MAPPER.writeValueAsString(this.clientTelemetryInfo)); } catch (JsonProcessingException e) { e.printStackTrace(); } clearDataForNextRun(); return this.sendClientTelemetry(); }).onErrorResume(ex -> { logger.error("sendClientTelemetry() - Unable to send client telemetry" + ". Exception: {}", ex.toString(), ex); clearDataForNextRun(); return this.sendClientTelemetry(); }).subscribeOn(scheduler); }
return this.sendClientTelemetry();
private Mono<Void> sendClientTelemetry() { return Mono.delay(Duration.ofSeconds(clientTelemetrySchedulingSec)) .flatMap(t -> { if (this.isClosed) { logger.warn("client already closed"); return Mono.empty(); } if(!Configs.isClientTelemetryEnabled(this.isClientTelemetryEnabled)) { logger.trace("client telemetry not enabled"); return Mono.empty(); } readHistogram(); try { logger.info("ClientTelemetry {}", OBJECT_MAPPER.writeValueAsString(this.clientTelemetryInfo)); } catch (JsonProcessingException e) { logger.error("Error which parsing client telemetry into json. ", e); } clearDataForNextRun(); return this.sendClientTelemetry(); }).onErrorResume(ex -> { logger.error("sendClientTelemetry() - Unable to send client telemetry" + ". Exception: ", ex); clearDataForNextRun(); return this.sendClientTelemetry(); }).subscribeOn(scheduler); }
class ClientTelemetry { public final static int REQUEST_LATENCY_MAX = 300000000; public final static int REQUEST_LATENCY_SUCCESS_PRECISION = 4; public final static int REQUEST_LATENCY_FAILURE_PRECISION = 2; public final static String REQUEST_LATENCY_NAME = "RequestLatency"; public final static String REQUEST_LATENCY_UNIT = "MicroSec"; public final static int REQUEST_CHARGE_MAX = 10000; public final static int REQUEST_CHARGE_PRECISION = 2; public final static String REQUEST_CHARGE_NAME = "RequestCharge"; public final static String REQUEST_CHARGE_UNIT = "RU"; public final static int CPU_MAX = 100; public final static int CPU_PRECISION = 2; private final static String CPU_NAME = "CPU"; private final static String CPU_UNIT = "Percentage"; public final static int MEMORY_MAX = 102400; public final static int MEMORY_PRECISION = 2; private final static String MEMORY_NAME = "MemoryRemaining"; private final static String MEMORY_UNIT = "MB"; private static final ObjectMapper OBJECT_MAPPER = new ObjectMapper(); private ClientTelemetryInfo clientTelemetryInfo; private HttpClient httpClient; private final ExecutorService executor = Executors.newSingleThreadExecutor(); private final Scheduler scheduler = Schedulers.fromExecutor(executor); private static final Logger logger = LoggerFactory.getLogger(GlobalEndpointManager.class); private volatile boolean isClosed; private volatile boolean isClientTelemetryEnabled; private static String AZURE_VM_METADATA = "http: private static final double PERCENTILE_50 = 50.0; private static final double PERCENTILE_90 = 90.0; private static final double PERCENTILE_95 = 95.0; private static final double PERCENTILE_99 = 99.0; private static final double PERCENTILE_999 = 99.9; private final int clientTelemetrySchedulingSec; public ClientTelemetry(Boolean acceleratedNetworking, String clientId, String processId, String userAgent, ConnectionMode connectionMode, String globalDatabaseAccountName, String applicationRegion, String hostEnvInfo, HttpClient httpClient, boolean isClientTelemetryEnabled ) { clientTelemetryInfo = new ClientTelemetryInfo(clientId, processId, userAgent, connectionMode, globalDatabaseAccountName, applicationRegion, hostEnvInfo, acceleratedNetworking); this.isClosed = false; this.httpClient = httpClient; this.isClientTelemetryEnabled = isClientTelemetryEnabled; this.clientTelemetrySchedulingSec = Configs.getClientTelemetrySchedulingInSec(); } public ClientTelemetryInfo getClientTelemetryInfo() { return clientTelemetryInfo; } public static void recordValue(DoubleHistogram doubleHistogram, long value) { try { doubleHistogram.recordValue(value); } catch (Exception ex) { logger.warn("Error while recording value for client telemetry", ex.getMessage()); } } public static void recordValue(DoubleHistogram doubleHistogram, double value) { try { doubleHistogram.recordValue(value); } catch (Exception ex) { logger.warn("Error while recording value for client telemetry", ex.getMessage()); } } public void init() { loadAzureVmMetaData(); sendClientTelemetry().subscribe(); } public void close() { this.isClosed = true; this.executor.shutdown(); logger.debug("GlobalEndpointManager closed."); } private void loadAzureVmMetaData() { URI targetEndpoint = null; try { targetEndpoint = new URI(AZURE_VM_METADATA); } catch (URISyntaxException ex) { logger.info("Unable to parse azure vm metadata url"); return; } HashMap<String, String> headers = new HashMap<>(); headers.put("Metadata", "true"); HttpHeaders httpHeaders = new HttpHeaders(headers); HttpRequest httpRequest = new HttpRequest(HttpMethod.GET, targetEndpoint, targetEndpoint.getPort(), httpHeaders); Mono<HttpResponse> httpResponseMono = this.httpClient.send(httpRequest); httpResponseMono.flatMap(response -> response.bodyAsString()).map(metadataJson -> Utils.parse(metadataJson, AzureVMMetadata.class)).doOnSuccess(azureVMMetadata -> { this.clientTelemetryInfo.setApplicationRegion(azureVMMetadata.getLocation()); }).onErrorResume(throwable -> { logger.info("Unable to get azure vm metadata"); return Mono.empty(); }).subscribe(); } private void clearDataForNextRun() { this.clientTelemetryInfo.getOperationInfoMap().clear(); this.clientTelemetryInfo.getCacheRefreshInfoMap().clear(); for (DoubleHistogram histogram : this.clientTelemetryInfo.getSystemInfoMap().values()) { histogram.reset(); } } private void readHistogram() { ConcurrentDoubleHistogram cpuHistogram = new ConcurrentDoubleHistogram(ClientTelemetry.CPU_MAX, ClientTelemetry.CPU_PRECISION); cpuHistogram.setAutoResize(true); for(double val : CpuMemoryMonitor.getClientTelemetryCpuLatestList()) { recordValue(cpuHistogram, val); } ReportPayload cpuReportPayload = new ReportPayload(CPU_NAME, CPU_UNIT); clientTelemetryInfo.getSystemInfoMap().put(cpuReportPayload, cpuHistogram); ConcurrentDoubleHistogram memoryHistogram = new ConcurrentDoubleHistogram(ClientTelemetry.MEMORY_MAX, ClientTelemetry.MEMORY_PRECISION); memoryHistogram.setAutoResize(true); for(double val : CpuMemoryMonitor.getClientTelemetryMemoryLatestList()) { recordValue(memoryHistogram, val); } ReportPayload memoryReportPayload = new ReportPayload(MEMORY_NAME, MEMORY_UNIT); clientTelemetryInfo.getSystemInfoMap().put(memoryReportPayload, memoryHistogram); this.clientTelemetryInfo.setTimeStamp(Instant.now().toString()); for (Map.Entry<ReportPayload, ConcurrentDoubleHistogram> entry : this.clientTelemetryInfo.getSystemInfoMap().entrySet()) { fillMetricsInfo(entry.getKey(), entry.getValue()); } for (Map.Entry<ReportPayload, ConcurrentDoubleHistogram> entry : this.clientTelemetryInfo.getCacheRefreshInfoMap().entrySet()) { fillMetricsInfo(entry.getKey(), entry.getValue()); } for (Map.Entry<ReportPayload, ConcurrentDoubleHistogram> entry : this.clientTelemetryInfo.getOperationInfoMap().entrySet()) { fillMetricsInfo(entry.getKey(), entry.getValue()); } } private void fillMetricsInfo(ReportPayload payload, DoubleHistogram histogram) { payload.getMetricInfo().setCount(histogram.getTotalCount()); payload.getMetricInfo().setMax(histogram.getMaxValue()); payload.getMetricInfo().setMin(histogram.getMinValue()); payload.getMetricInfo().setMean(histogram.getMean()); Map<Double, Double> percentile = new HashMap<>(); percentile.put(PERCENTILE_50, histogram.getValueAtPercentile(PERCENTILE_50)); percentile.put(PERCENTILE_90, histogram.getValueAtPercentile(PERCENTILE_90)); percentile.put(PERCENTILE_95, histogram.getValueAtPercentile(PERCENTILE_95)); percentile.put(PERCENTILE_99, histogram.getValueAtPercentile(PERCENTILE_99)); percentile.put(PERCENTILE_999, histogram.getValueAtPercentile(PERCENTILE_999)); payload.getMetricInfo().setPercentiles(percentile); } }
class ClientTelemetry { public final static int ONE_KB_TO_BYTES = 1024; public final static int REQUEST_LATENCY_MAX_MICRO_SEC = 300000000; public final static int REQUEST_LATENCY_SUCCESS_PRECISION = 4; public final static int REQUEST_LATENCY_FAILURE_PRECISION = 2; public final static String REQUEST_LATENCY_NAME = "RequestLatency"; public final static String REQUEST_LATENCY_UNIT = "MicroSec"; public final static int REQUEST_CHARGE_MAX = 10000; public final static int REQUEST_CHARGE_PRECISION = 2; public final static String REQUEST_CHARGE_NAME = "RequestCharge"; public final static String REQUEST_CHARGE_UNIT = "RU"; public final static int CPU_MAX = 100; public final static int CPU_PRECISION = 2; private final static String CPU_NAME = "CPU"; private final static String CPU_UNIT = "Percentage"; public final static int MEMORY_MAX_IN_MB = 102400; public final static int MEMORY_PRECISION = 2; private final static String MEMORY_NAME = "MemoryRemaining"; private final static String MEMORY_UNIT = "MB"; private static final ObjectMapper OBJECT_MAPPER = new ObjectMapper(); private ClientTelemetryInfo clientTelemetryInfo; private HttpClient httpClient; private final ScheduledThreadPoolExecutor scheduledExecutorService = new ScheduledThreadPoolExecutor(1, new DaemonThreadFactory()); private final Scheduler scheduler = Schedulers.fromExecutor(scheduledExecutorService); private static final Logger logger = LoggerFactory.getLogger(GlobalEndpointManager.class); private volatile boolean isClosed; private volatile boolean isClientTelemetryEnabled; private static String AZURE_VM_METADATA = "http: private static final double PERCENTILE_50 = 50.0; private static final double PERCENTILE_90 = 90.0; private static final double PERCENTILE_95 = 95.0; private static final double PERCENTILE_99 = 99.0; private static final double PERCENTILE_999 = 99.9; private final int clientTelemetrySchedulingSec; public ClientTelemetry(Boolean acceleratedNetworking, String clientId, String processId, String userAgent, ConnectionMode connectionMode, String globalDatabaseAccountName, String applicationRegion, String hostEnvInfo, HttpClient httpClient, boolean isClientTelemetryEnabled ) { clientTelemetryInfo = new ClientTelemetryInfo(clientId, processId, userAgent, connectionMode, globalDatabaseAccountName, applicationRegion, hostEnvInfo, acceleratedNetworking); this.isClosed = false; this.httpClient = httpClient; this.isClientTelemetryEnabled = isClientTelemetryEnabled; this.clientTelemetrySchedulingSec = Configs.getClientTelemetrySchedulingInSec(); } public ClientTelemetryInfo getClientTelemetryInfo() { return clientTelemetryInfo; } public static void recordValue(DoubleHistogram doubleHistogram, long value) { try { doubleHistogram.recordValue(value); } catch (Exception ex) { logger.warn("Error while recording value for client telemetry. ", ex); } } public static void recordValue(DoubleHistogram doubleHistogram, double value) { try { doubleHistogram.recordValue(value); } catch (Exception ex) { logger.warn("Error while recording value for client telemetry. ", ex); } } public void init() { loadAzureVmMetaData(); sendClientTelemetry().subscribe(); } public void close() { this.isClosed = true; this.scheduledExecutorService.shutdown(); logger.debug("GlobalEndpointManager closed."); } private void loadAzureVmMetaData() { URI targetEndpoint = null; try { targetEndpoint = new URI(AZURE_VM_METADATA); } catch (URISyntaxException ex) { logger.info("Unable to parse azure vm metadata url"); return; } HashMap<String, String> headers = new HashMap<>(); headers.put("Metadata", "true"); HttpHeaders httpHeaders = new HttpHeaders(headers); HttpRequest httpRequest = new HttpRequest(HttpMethod.GET, targetEndpoint, targetEndpoint.getPort(), httpHeaders); Mono<HttpResponse> httpResponseMono = this.httpClient.send(httpRequest); httpResponseMono.flatMap(response -> response.bodyAsString()).map(metadataJson -> Utils.parse(metadataJson, AzureVMMetadata.class)).doOnSuccess(azureVMMetadata -> { this.clientTelemetryInfo.setApplicationRegion(azureVMMetadata.getLocation()); this.clientTelemetryInfo.setHostEnvInfo(azureVMMetadata.getOsType() + "|" + azureVMMetadata.getSku() + "|" + azureVMMetadata.getVmSize() + "|" + azureVMMetadata.getAzEnvironment()); }).onErrorResume(throwable -> { logger.info("Unable to get azure vm metadata"); return Mono.empty(); }).subscribe(); } private void clearDataForNextRun() { this.clientTelemetryInfo.getOperationInfoMap().clear(); this.clientTelemetryInfo.getCacheRefreshInfoMap().clear(); for (DoubleHistogram histogram : this.clientTelemetryInfo.getSystemInfoMap().values()) { histogram.reset(); } } private void readHistogram() { ConcurrentDoubleHistogram cpuHistogram = new ConcurrentDoubleHistogram(ClientTelemetry.CPU_MAX, ClientTelemetry.CPU_PRECISION); cpuHistogram.setAutoResize(true); for(double val : CpuMemoryMonitor.getClientTelemetryCpuLatestList()) { recordValue(cpuHistogram, val); } ReportPayload cpuReportPayload = new ReportPayload(CPU_NAME, CPU_UNIT); clientTelemetryInfo.getSystemInfoMap().put(cpuReportPayload, cpuHistogram); ConcurrentDoubleHistogram memoryHistogram = new ConcurrentDoubleHistogram(ClientTelemetry.MEMORY_MAX_IN_MB, ClientTelemetry.MEMORY_PRECISION); memoryHistogram.setAutoResize(true); for(double val : CpuMemoryMonitor.getClientTelemetryMemoryLatestList()) { recordValue(memoryHistogram, val); } ReportPayload memoryReportPayload = new ReportPayload(MEMORY_NAME, MEMORY_UNIT); clientTelemetryInfo.getSystemInfoMap().put(memoryReportPayload, memoryHistogram); this.clientTelemetryInfo.setTimeStamp(Instant.now().toString()); for (Map.Entry<ReportPayload, ConcurrentDoubleHistogram> entry : this.clientTelemetryInfo.getSystemInfoMap().entrySet()) { fillMetricsInfo(entry.getKey(), entry.getValue()); } for (Map.Entry<ReportPayload, ConcurrentDoubleHistogram> entry : this.clientTelemetryInfo.getCacheRefreshInfoMap().entrySet()) { fillMetricsInfo(entry.getKey(), entry.getValue()); } for (Map.Entry<ReportPayload, ConcurrentDoubleHistogram> entry : this.clientTelemetryInfo.getOperationInfoMap().entrySet()) { fillMetricsInfo(entry.getKey(), entry.getValue()); } } private void fillMetricsInfo(ReportPayload payload, DoubleHistogram histogram) { payload.getMetricInfo().setCount(histogram.getTotalCount()); payload.getMetricInfo().setMax(histogram.getMaxValue()); payload.getMetricInfo().setMin(histogram.getMinValue()); payload.getMetricInfo().setMean(histogram.getMean()); Map<Double, Double> percentile = new HashMap<>(); percentile.put(PERCENTILE_50, histogram.getValueAtPercentile(PERCENTILE_50)); percentile.put(PERCENTILE_90, histogram.getValueAtPercentile(PERCENTILE_90)); percentile.put(PERCENTILE_95, histogram.getValueAtPercentile(PERCENTILE_95)); percentile.put(PERCENTILE_99, histogram.getValueAtPercentile(PERCENTILE_99)); percentile.put(PERCENTILE_999, histogram.getValueAtPercentile(PERCENTILE_999)); payload.getMetricInfo().setPercentiles(percentile); } private static class DaemonThreadFactory implements ThreadFactory { @Override public Thread newThread(Runnable r) { Thread t = new Thread(r); t.setDaemon(true); return t; } } }
We want to keep the size as low as possible, float give 6 to 7 place of decimal value , more than enough for us to measure request charge.
private Flux<FeedResponse<T>> byPage(CosmosPagedFluxOptions pagedFluxOptions, Context context) { final AtomicReference<Context> parentContext = new AtomicReference<>(Context.NONE); AtomicReference<Instant> startTime = new AtomicReference<>(); return this.optionsFluxFunction.apply(pagedFluxOptions).doOnSubscribe(ignoredValue -> { if (pagedFluxOptions.getTracerProvider().isEnabled()) { parentContext.set(pagedFluxOptions.getTracerProvider().startSpan(pagedFluxOptions.getTracerSpanName(), pagedFluxOptions.getDatabaseId(), pagedFluxOptions.getServiceEndpoint(), context)); } startTime.set(Instant.now()); }).doOnComplete(() -> { if (pagedFluxOptions.getTracerProvider().isEnabled()) { pagedFluxOptions.getTracerProvider().endSpan(parentContext.get(), Signal.complete(), HttpConstants.StatusCodes.OK); } }).doOnError(throwable -> { if (pagedFluxOptions.getTracerProvider().isEnabled()) { pagedFluxOptions.getTracerProvider().endSpan(parentContext.get(), Signal.error(throwable), TracerProvider.ERROR_CODE); } if (pagedFluxOptions.getCosmosAsyncClient() != null && Configs.isClientTelemetryEnabled(BridgeInternal.isClientTelemetryEnabled(pagedFluxOptions.getCosmosAsyncClient())) && throwable instanceof CosmosException) { CosmosException cosmosException = (CosmosException) throwable; fillClientTelemetry(pagedFluxOptions.getCosmosAsyncClient(), 0, pagedFluxOptions.getContainerId(), pagedFluxOptions.getDatabaseId(), pagedFluxOptions.getOperationType(), pagedFluxOptions.getResourceType(), BridgeInternal.getContextClient(pagedFluxOptions.getCosmosAsyncClient()).getConsistencyLevel(), (float) cosmosException.getRequestCharge(), Duration.between(startTime.get(), Instant.now())); } startTime.set(Instant.now()); }).doOnNext(feedResponse -> { if (feedResponseConsumer != null) { feedResponseConsumer.accept(feedResponse); } if (pagedFluxOptions.getCosmosAsyncClient() != null && Configs.isClientTelemetryEnabled(BridgeInternal.isClientTelemetryEnabled(pagedFluxOptions.getCosmosAsyncClient()))) { fillClientTelemetry(pagedFluxOptions.getCosmosAsyncClient(), HttpConstants.StatusCodes.OK, pagedFluxOptions.getContainerId(), pagedFluxOptions.getDatabaseId(), pagedFluxOptions.getOperationType(), pagedFluxOptions.getResourceType(), BridgeInternal.getContextClient(pagedFluxOptions.getCosmosAsyncClient()).getConsistencyLevel(), (float) feedResponse.getRequestCharge(), Duration.between(startTime.get(), Instant.now())); startTime.set(Instant.now()); } }); }
(float) cosmosException.getRequestCharge(), Duration.between(startTime.get(), Instant.now()));
then call it with each feedResponse if (feedResponseConsumer != null) { feedResponseConsumer.accept(feedResponse); }
class CosmosPagedFlux<T> extends ContinuablePagedFlux<String, T, FeedResponse<T>> { private final Function<CosmosPagedFluxOptions, Flux<FeedResponse<T>>> optionsFluxFunction; private final Consumer<FeedResponse<T>> feedResponseConsumer; CosmosPagedFlux(Function<CosmosPagedFluxOptions, Flux<FeedResponse<T>>> optionsFluxFunction) { this.optionsFluxFunction = optionsFluxFunction; this.feedResponseConsumer = null; } CosmosPagedFlux(Function<CosmosPagedFluxOptions, Flux<FeedResponse<T>>> optionsFluxFunction, Consumer<FeedResponse<T>> feedResponseConsumer) { this.optionsFluxFunction = optionsFluxFunction; this.feedResponseConsumer = feedResponseConsumer; } /** * Handle for invoking "side-effects" on each FeedResponse returned by CosmosPagedFlux * * @param feedResponseConsumer handler * @return CosmosPagedFlux instance with attached handler */ @Beta(value = Beta.SinceVersion.V4_6_0) public CosmosPagedFlux<T> handle(Consumer<FeedResponse<T>> feedResponseConsumer) { return new CosmosPagedFlux<T>(this.optionsFluxFunction, feedResponseConsumer); } @Override public Flux<FeedResponse<T>> byPage() { CosmosPagedFluxOptions cosmosPagedFluxOptions = new CosmosPagedFluxOptions(); return FluxUtil.fluxContext(context -> byPage(cosmosPagedFluxOptions, context)); } @Override public Flux<FeedResponse<T>> byPage(String continuationToken) { CosmosPagedFluxOptions cosmosPagedFluxOptions = new CosmosPagedFluxOptions(); cosmosPagedFluxOptions.setRequestContinuation(continuationToken); return FluxUtil.fluxContext(context -> byPage(cosmosPagedFluxOptions, context)); } @Override public Flux<FeedResponse<T>> byPage(int preferredPageSize) { CosmosPagedFluxOptions cosmosPagedFluxOptions = new CosmosPagedFluxOptions(); cosmosPagedFluxOptions.setMaxItemCount(preferredPageSize); return FluxUtil.fluxContext(context -> byPage(cosmosPagedFluxOptions, context)); } @Override public Flux<FeedResponse<T>> byPage(String continuationToken, int preferredPageSize) { CosmosPagedFluxOptions cosmosPagedFluxOptions = new CosmosPagedFluxOptions(); cosmosPagedFluxOptions.setRequestContinuation(continuationToken); cosmosPagedFluxOptions.setMaxItemCount(preferredPageSize); return FluxUtil.fluxContext(context -> byPage(cosmosPagedFluxOptions, context)); } /** * Subscribe to consume all items of type {@code T} in the sequence respectively. This is recommended for most * common scenarios. This will seamlessly fetch next page when required and provide with a {@link Flux} of items. * * @param coreSubscriber The subscriber for this {@link CosmosPagedFlux} */ @Override public void subscribe(CoreSubscriber<? super T> coreSubscriber) { Flux<FeedResponse<T>> pagedResponse = this.byPage(); pagedResponse.flatMap(tFeedResponse -> { IterableStream<T> elements = tFeedResponse.getElements(); if (elements == null) { return Flux.empty(); } return Flux.fromIterable(elements); }).subscribe(coreSubscriber); } private Flux<FeedResponse<T>> byPage(CosmosPagedFluxOptions pagedFluxOptions, Context context) { final AtomicReference<Context> parentContext = new AtomicReference<>(Context.NONE); AtomicReference<Instant> startTime = new AtomicReference<>(); return this.optionsFluxFunction.apply(pagedFluxOptions).doOnSubscribe(ignoredValue -> { if (pagedFluxOptions.getTracerProvider().isEnabled()) { parentContext.set(pagedFluxOptions.getTracerProvider().startSpan(pagedFluxOptions.getTracerSpanName(), pagedFluxOptions.getDatabaseId(), pagedFluxOptions.getServiceEndpoint(), context)); } startTime.set(Instant.now()); }).doOnComplete(() -> { if (pagedFluxOptions.getTracerProvider().isEnabled()) { pagedFluxOptions.getTracerProvider().endSpan(parentContext.get(), Signal.complete(), HttpConstants.StatusCodes.OK); } }).doOnError(throwable -> { if (pagedFluxOptions.getTracerProvider().isEnabled()) { pagedFluxOptions.getTracerProvider().endSpan(parentContext.get(), Signal.error(throwable), TracerProvider.ERROR_CODE); } if (pagedFluxOptions.getCosmosAsyncClient() != null && Configs.isClientTelemetryEnabled(BridgeInternal.isClientTelemetryEnabled(pagedFluxOptions.getCosmosAsyncClient())) && throwable instanceof CosmosException) { CosmosException cosmosException = (CosmosException) throwable; fillClientTelemetry(pagedFluxOptions.getCosmosAsyncClient(), 0, pagedFluxOptions.getContainerId(), pagedFluxOptions.getDatabaseId(), pagedFluxOptions.getOperationType(), pagedFluxOptions.getResourceType(), BridgeInternal.getContextClient(pagedFluxOptions.getCosmosAsyncClient()).getConsistencyLevel(), (float) cosmosException.getRequestCharge(), Duration.between(startTime.get(), Instant.now())); } startTime.set(Instant.now()); }).doOnNext(feedResponse -> { if (pagedFluxOptions.getCosmosAsyncClient() != null && Configs.isClientTelemetryEnabled(BridgeInternal.isClientTelemetryEnabled(pagedFluxOptions.getCosmosAsyncClient()))) { fillClientTelemetry(pagedFluxOptions.getCosmosAsyncClient(), HttpConstants.StatusCodes.OK, pagedFluxOptions.getContainerId(), pagedFluxOptions.getDatabaseId(), pagedFluxOptions.getOperationType(), pagedFluxOptions.getResourceType(), BridgeInternal.getContextClient(pagedFluxOptions.getCosmosAsyncClient()).getConsistencyLevel(), (float) feedResponse.getRequestCharge(), Duration.between(startTime.get(), Instant.now())); startTime.set(Instant.now()); } }); } public void fillClientTelemetry(CosmosAsyncClient cosmosAsyncClient, int statusCode, String containerId, String databaseId, OperationType operationType, ResourceType resourceType, ConsistencyLevel consistencyLevel, float requestCharge, Duration latency) { ClientTelemetry telemetry = BridgeInternal.getContextClient(cosmosAsyncClient).getClientTelemetry(); ReportPayload reportPayloadLatency = createReportPayload(cosmosAsyncClient, statusCode, containerId, databaseId , operationType, resourceType, consistencyLevel, ClientTelemetry.REQUEST_LATENCY_NAME, ClientTelemetry.REQUEST_LATENCY_UNIT); ConcurrentDoubleHistogram latencyHistogram = telemetry.getClientTelemetryInfo().getOperationInfoMap().get(reportPayloadLatency); if (latencyHistogram != null) { ClientTelemetry.recordValue(latencyHistogram, latency.toNanos() / 1000); } else { if (statusCode == HttpConstants.StatusCodes.OK) { latencyHistogram = new ConcurrentDoubleHistogram(ClientTelemetry.REQUEST_LATENCY_MAX, ClientTelemetry.REQUEST_LATENCY_SUCCESS_PRECISION); } else { latencyHistogram = new ConcurrentDoubleHistogram(ClientTelemetry.REQUEST_LATENCY_MAX, ClientTelemetry.REQUEST_LATENCY_FAILURE_PRECISION); } latencyHistogram.setAutoResize(true); ClientTelemetry.recordValue(latencyHistogram, latency.toNanos() / 1000); telemetry.getClientTelemetryInfo().getOperationInfoMap().put(reportPayloadLatency, latencyHistogram); } ReportPayload reportPayloadRequestCharge = createReportPayload(cosmosAsyncClient, statusCode, containerId, databaseId , operationType, resourceType, consistencyLevel, ClientTelemetry.REQUEST_CHARGE_NAME, ClientTelemetry.REQUEST_CHARGE_UNIT); ConcurrentDoubleHistogram requestChargeHistogram = telemetry.getClientTelemetryInfo().getOperationInfoMap().get(reportPayloadRequestCharge); if (requestChargeHistogram != null) { ClientTelemetry.recordValue(requestChargeHistogram, requestCharge); } else { requestChargeHistogram = new ConcurrentDoubleHistogram(ClientTelemetry.REQUEST_CHARGE_MAX, ClientTelemetry.REQUEST_CHARGE_PRECISION); requestChargeHistogram.setAutoResize(true); ClientTelemetry.recordValue(requestChargeHistogram, requestCharge); telemetry.getClientTelemetryInfo().getOperationInfoMap().put(reportPayloadRequestCharge, requestChargeHistogram); } } private ReportPayload createReportPayload(CosmosAsyncClient cosmosAsyncClient, int statusCode, String containerId, String databaseId, OperationType operationType, ResourceType resourceType, ConsistencyLevel consistencyLevel, String metricsName, String unitName) { ReportPayload reportPayload = new ReportPayload(metricsName, unitName); reportPayload.setConsistency(consistencyLevel == null ? BridgeInternal.getContextClient(cosmosAsyncClient).getConsistencyLevel() : consistencyLevel); reportPayload.setDatabasesName(databaseId); reportPayload.setContainerName(containerId); reportPayload.setOperation(operationType); reportPayload.setResource(resourceType); reportPayload.setStatusCode(statusCode); return reportPayload; } }
class CosmosPagedFlux<T> extends ContinuablePagedFlux<String, T, FeedResponse<T>> { private final Function<CosmosPagedFluxOptions, Flux<FeedResponse<T>>> optionsFluxFunction; private final Consumer<FeedResponse<T>> feedResponseConsumer; CosmosPagedFlux(Function<CosmosPagedFluxOptions, Flux<FeedResponse<T>>> optionsFluxFunction) { this.optionsFluxFunction = optionsFluxFunction; this.feedResponseConsumer = null; } CosmosPagedFlux(Function<CosmosPagedFluxOptions, Flux<FeedResponse<T>>> optionsFluxFunction, Consumer<FeedResponse<T>> feedResponseConsumer) { this.optionsFluxFunction = optionsFluxFunction; this.feedResponseConsumer = feedResponseConsumer; } /** * Handle for invoking "side-effects" on each FeedResponse returned by CosmosPagedFlux * * @param feedResponseConsumer handler * @return CosmosPagedFlux instance with attached handler */ @Beta(value = Beta.SinceVersion.V4_6_0) public CosmosPagedFlux<T> handle(Consumer<FeedResponse<T>> feedResponseConsumer) { return new CosmosPagedFlux<T>(this.optionsFluxFunction, feedResponseConsumer); } @Override public Flux<FeedResponse<T>> byPage() { CosmosPagedFluxOptions cosmosPagedFluxOptions = new CosmosPagedFluxOptions(); return FluxUtil.fluxContext(context -> byPage(cosmosPagedFluxOptions, context)); } @Override public Flux<FeedResponse<T>> byPage(String continuationToken) { CosmosPagedFluxOptions cosmosPagedFluxOptions = new CosmosPagedFluxOptions(); cosmosPagedFluxOptions.setRequestContinuation(continuationToken); return FluxUtil.fluxContext(context -> byPage(cosmosPagedFluxOptions, context)); } @Override public Flux<FeedResponse<T>> byPage(int preferredPageSize) { CosmosPagedFluxOptions cosmosPagedFluxOptions = new CosmosPagedFluxOptions(); cosmosPagedFluxOptions.setMaxItemCount(preferredPageSize); return FluxUtil.fluxContext(context -> byPage(cosmosPagedFluxOptions, context)); } @Override public Flux<FeedResponse<T>> byPage(String continuationToken, int preferredPageSize) { CosmosPagedFluxOptions cosmosPagedFluxOptions = new CosmosPagedFluxOptions(); cosmosPagedFluxOptions.setRequestContinuation(continuationToken); cosmosPagedFluxOptions.setMaxItemCount(preferredPageSize); return FluxUtil.fluxContext(context -> byPage(cosmosPagedFluxOptions, context)); } /** * Subscribe to consume all items of type {@code T} in the sequence respectively. This is recommended for most * common scenarios. This will seamlessly fetch next page when required and provide with a {@link Flux} of items. * * @param coreSubscriber The subscriber for this {@link CosmosPagedFlux} */ @Override public void subscribe(CoreSubscriber<? super T> coreSubscriber) { Flux<FeedResponse<T>> pagedResponse = this.byPage(); pagedResponse.flatMap(tFeedResponse -> { IterableStream<T> elements = tFeedResponse.getElements(); if (elements == null) { return Flux.empty(); } return Flux.fromIterable(elements); }).subscribe(coreSubscriber); } private Flux<FeedResponse<T>> byPage(CosmosPagedFluxOptions pagedFluxOptions, Context context) { final AtomicReference<Context> parentContext = new AtomicReference<>(Context.NONE); AtomicReference<Instant> startTime = new AtomicReference<>(); return this.optionsFluxFunction.apply(pagedFluxOptions).doOnSubscribe(ignoredValue -> { if (pagedFluxOptions.getTracerProvider().isEnabled()) { parentContext.set(pagedFluxOptions.getTracerProvider().startSpan(pagedFluxOptions.getTracerSpanName(), pagedFluxOptions.getDatabaseId(), pagedFluxOptions.getServiceEndpoint(), context)); } startTime.set(Instant.now()); }).doOnComplete(() -> { if (pagedFluxOptions.getTracerProvider().isEnabled()) { pagedFluxOptions.getTracerProvider().endSpan(parentContext.get(), Signal.complete(), HttpConstants.StatusCodes.OK); } }).doOnError(throwable -> { if (pagedFluxOptions.getTracerProvider().isEnabled()) { pagedFluxOptions.getTracerProvider().endSpan(parentContext.get(), Signal.error(throwable), TracerProvider.ERROR_CODE); } if (pagedFluxOptions.getCosmosAsyncClient() != null && Configs.isClientTelemetryEnabled(BridgeInternal.isClientTelemetryEnabled(pagedFluxOptions.getCosmosAsyncClient())) && throwable instanceof CosmosException) { CosmosException cosmosException = (CosmosException) throwable; fillClientTelemetry(pagedFluxOptions.getCosmosAsyncClient(), 0, pagedFluxOptions.getContainerId(), pagedFluxOptions.getDatabaseId(), pagedFluxOptions.getOperationType(), pagedFluxOptions.getResourceType(), BridgeInternal.getContextClient(pagedFluxOptions.getCosmosAsyncClient()).getConsistencyLevel(), (float) cosmosException.getRequestCharge(), Duration.between(startTime.get(), Instant.now())); } startTime.set(Instant.now()); }).doOnNext(feedResponse -> { if (pagedFluxOptions.getCosmosAsyncClient() != null && Configs.isClientTelemetryEnabled(BridgeInternal.isClientTelemetryEnabled(pagedFluxOptions.getCosmosAsyncClient()))) { fillClientTelemetry(pagedFluxOptions.getCosmosAsyncClient(), HttpConstants.StatusCodes.OK, pagedFluxOptions.getContainerId(), pagedFluxOptions.getDatabaseId(), pagedFluxOptions.getOperationType(), pagedFluxOptions.getResourceType(), BridgeInternal.getContextClient(pagedFluxOptions.getCosmosAsyncClient()).getConsistencyLevel(), (float) feedResponse.getRequestCharge(), Duration.between(startTime.get(), Instant.now())); startTime.set(Instant.now()); } }); } public void fillClientTelemetry(CosmosAsyncClient cosmosAsyncClient, int statusCode, String containerId, String databaseId, OperationType operationType, ResourceType resourceType, ConsistencyLevel consistencyLevel, float requestCharge, Duration latency) { ClientTelemetry telemetry = BridgeInternal.getContextClient(cosmosAsyncClient).getClientTelemetry(); ReportPayload reportPayloadLatency = createReportPayload(cosmosAsyncClient, statusCode, containerId, databaseId , operationType, resourceType, consistencyLevel, ClientTelemetry.REQUEST_LATENCY_NAME, ClientTelemetry.REQUEST_LATENCY_UNIT); ConcurrentDoubleHistogram latencyHistogram = telemetry.getClientTelemetryInfo().getOperationInfoMap().get(reportPayloadLatency); if (latencyHistogram != null) { ClientTelemetry.recordValue(latencyHistogram, latency.toNanos() / 1000); } else { if (statusCode == HttpConstants.StatusCodes.OK) { latencyHistogram = new ConcurrentDoubleHistogram(ClientTelemetry.REQUEST_LATENCY_MAX_MICRO_SEC, ClientTelemetry.REQUEST_LATENCY_SUCCESS_PRECISION); } else { latencyHistogram = new ConcurrentDoubleHistogram(ClientTelemetry.REQUEST_LATENCY_MAX_MICRO_SEC, ClientTelemetry.REQUEST_LATENCY_FAILURE_PRECISION); } latencyHistogram.setAutoResize(true); ClientTelemetry.recordValue(latencyHistogram, latency.toNanos() / 1000); telemetry.getClientTelemetryInfo().getOperationInfoMap().put(reportPayloadLatency, latencyHistogram); } ReportPayload reportPayloadRequestCharge = createReportPayload(cosmosAsyncClient, statusCode, containerId, databaseId , operationType, resourceType, consistencyLevel, ClientTelemetry.REQUEST_CHARGE_NAME, ClientTelemetry.REQUEST_CHARGE_UNIT); ConcurrentDoubleHistogram requestChargeHistogram = telemetry.getClientTelemetryInfo().getOperationInfoMap().get(reportPayloadRequestCharge); if (requestChargeHistogram != null) { ClientTelemetry.recordValue(requestChargeHistogram, requestCharge); } else { requestChargeHistogram = new ConcurrentDoubleHistogram(ClientTelemetry.REQUEST_CHARGE_MAX, ClientTelemetry.REQUEST_CHARGE_PRECISION); requestChargeHistogram.setAutoResize(true); ClientTelemetry.recordValue(requestChargeHistogram, requestCharge); telemetry.getClientTelemetryInfo().getOperationInfoMap().put(reportPayloadRequestCharge, requestChargeHistogram); } } private ReportPayload createReportPayload(CosmosAsyncClient cosmosAsyncClient, int statusCode, String containerId, String databaseId, OperationType operationType, ResourceType resourceType, ConsistencyLevel consistencyLevel, String metricsName, String unitName) { ReportPayload reportPayload = new ReportPayload(metricsName, unitName); reportPayload.setConsistency(consistencyLevel == null ? BridgeInternal.getContextClient(cosmosAsyncClient).getConsistencyLevel() : consistencyLevel); reportPayload.setDatabaseName(databaseId); reportPayload.setContainerName(containerId); reportPayload.setOperation(operationType); reportPayload.setResource(resourceType); reportPayload.setStatusCode(statusCode); return reportPayload; } }
This is done intentionally, till the time we enable telemetry by default i don't want cx to get this log. Otherwise every customer will be getting this every 10 min , and might create a cri with question.
private Mono<Void> sendClientTelemetry() { return Mono.delay(Duration.ofSeconds(clientTelemetrySchedulingSec)) .flatMap(t -> { if (this.isClosed) { logger.warn("client already closed"); return Mono.empty(); } if(!Configs.isClientTelemetryEnabled(this.isClientTelemetryEnabled)) { logger.trace("client telemetry not enabled"); return Mono.empty(); } readHistogram(); try { logger.info("ClientTelemetry {}", OBJECT_MAPPER.writeValueAsString(this.clientTelemetryInfo)); } catch (JsonProcessingException e) { e.printStackTrace(); } clearDataForNextRun(); return this.sendClientTelemetry(); }).onErrorResume(ex -> { logger.error("sendClientTelemetry() - Unable to send client telemetry" + ". Exception: {}", ex.toString(), ex); clearDataForNextRun(); return this.sendClientTelemetry(); }).subscribeOn(scheduler); }
logger.trace("client telemetry not enabled");
private Mono<Void> sendClientTelemetry() { return Mono.delay(Duration.ofSeconds(clientTelemetrySchedulingSec)) .flatMap(t -> { if (this.isClosed) { logger.warn("client already closed"); return Mono.empty(); } if(!Configs.isClientTelemetryEnabled(this.isClientTelemetryEnabled)) { logger.trace("client telemetry not enabled"); return Mono.empty(); } readHistogram(); try { logger.info("ClientTelemetry {}", OBJECT_MAPPER.writeValueAsString(this.clientTelemetryInfo)); } catch (JsonProcessingException e) { logger.error("Error which parsing client telemetry into json. ", e); } clearDataForNextRun(); return this.sendClientTelemetry(); }).onErrorResume(ex -> { logger.error("sendClientTelemetry() - Unable to send client telemetry" + ". Exception: ", ex); clearDataForNextRun(); return this.sendClientTelemetry(); }).subscribeOn(scheduler); }
class ClientTelemetry { public final static int REQUEST_LATENCY_MAX = 300000000; public final static int REQUEST_LATENCY_SUCCESS_PRECISION = 4; public final static int REQUEST_LATENCY_FAILURE_PRECISION = 2; public final static String REQUEST_LATENCY_NAME = "RequestLatency"; public final static String REQUEST_LATENCY_UNIT = "MicroSec"; public final static int REQUEST_CHARGE_MAX = 10000; public final static int REQUEST_CHARGE_PRECISION = 2; public final static String REQUEST_CHARGE_NAME = "RequestCharge"; public final static String REQUEST_CHARGE_UNIT = "RU"; public final static int CPU_MAX = 100; public final static int CPU_PRECISION = 2; private final static String CPU_NAME = "CPU"; private final static String CPU_UNIT = "Percentage"; public final static int MEMORY_MAX = 102400; public final static int MEMORY_PRECISION = 2; private final static String MEMORY_NAME = "MemoryRemaining"; private final static String MEMORY_UNIT = "MB"; private static final ObjectMapper OBJECT_MAPPER = new ObjectMapper(); private ClientTelemetryInfo clientTelemetryInfo; private HttpClient httpClient; private final ExecutorService executor = Executors.newSingleThreadExecutor(); private final Scheduler scheduler = Schedulers.fromExecutor(executor); private static final Logger logger = LoggerFactory.getLogger(GlobalEndpointManager.class); private volatile boolean isClosed; private volatile boolean isClientTelemetryEnabled; private static String AZURE_VM_METADATA = "http: private static final double PERCENTILE_50 = 50.0; private static final double PERCENTILE_90 = 90.0; private static final double PERCENTILE_95 = 95.0; private static final double PERCENTILE_99 = 99.0; private static final double PERCENTILE_999 = 99.9; private final int clientTelemetrySchedulingSec; public ClientTelemetry(Boolean acceleratedNetworking, String clientId, String processId, String userAgent, ConnectionMode connectionMode, String globalDatabaseAccountName, String applicationRegion, String hostEnvInfo, HttpClient httpClient, boolean isClientTelemetryEnabled ) { clientTelemetryInfo = new ClientTelemetryInfo(clientId, processId, userAgent, connectionMode, globalDatabaseAccountName, applicationRegion, hostEnvInfo, acceleratedNetworking); this.isClosed = false; this.httpClient = httpClient; this.isClientTelemetryEnabled = isClientTelemetryEnabled; this.clientTelemetrySchedulingSec = Configs.getClientTelemetrySchedulingInSec(); } public ClientTelemetryInfo getClientTelemetryInfo() { return clientTelemetryInfo; } public static void recordValue(DoubleHistogram doubleHistogram, long value) { try { doubleHistogram.recordValue(value); } catch (Exception ex) { logger.warn("Error while recording value for client telemetry", ex.getMessage()); } } public static void recordValue(DoubleHistogram doubleHistogram, double value) { try { doubleHistogram.recordValue(value); } catch (Exception ex) { logger.warn("Error while recording value for client telemetry", ex.getMessage()); } } public void init() { loadAzureVmMetaData(); sendClientTelemetry().subscribe(); } public void close() { this.isClosed = true; this.executor.shutdown(); logger.debug("GlobalEndpointManager closed."); } private void loadAzureVmMetaData() { URI targetEndpoint = null; try { targetEndpoint = new URI(AZURE_VM_METADATA); } catch (URISyntaxException ex) { logger.info("Unable to parse azure vm metadata url"); return; } HashMap<String, String> headers = new HashMap<>(); headers.put("Metadata", "true"); HttpHeaders httpHeaders = new HttpHeaders(headers); HttpRequest httpRequest = new HttpRequest(HttpMethod.GET, targetEndpoint, targetEndpoint.getPort(), httpHeaders); Mono<HttpResponse> httpResponseMono = this.httpClient.send(httpRequest); httpResponseMono.flatMap(response -> response.bodyAsString()).map(metadataJson -> Utils.parse(metadataJson, AzureVMMetadata.class)).doOnSuccess(azureVMMetadata -> { this.clientTelemetryInfo.setApplicationRegion(azureVMMetadata.getLocation()); }).onErrorResume(throwable -> { logger.info("Unable to get azure vm metadata"); return Mono.empty(); }).subscribe(); } private void clearDataForNextRun() { this.clientTelemetryInfo.getOperationInfoMap().clear(); this.clientTelemetryInfo.getCacheRefreshInfoMap().clear(); for (DoubleHistogram histogram : this.clientTelemetryInfo.getSystemInfoMap().values()) { histogram.reset(); } } private void readHistogram() { ConcurrentDoubleHistogram cpuHistogram = new ConcurrentDoubleHistogram(ClientTelemetry.CPU_MAX, ClientTelemetry.CPU_PRECISION); cpuHistogram.setAutoResize(true); for(double val : CpuMemoryMonitor.getClientTelemetryCpuLatestList()) { recordValue(cpuHistogram, val); } ReportPayload cpuReportPayload = new ReportPayload(CPU_NAME, CPU_UNIT); clientTelemetryInfo.getSystemInfoMap().put(cpuReportPayload, cpuHistogram); ConcurrentDoubleHistogram memoryHistogram = new ConcurrentDoubleHistogram(ClientTelemetry.MEMORY_MAX, ClientTelemetry.MEMORY_PRECISION); memoryHistogram.setAutoResize(true); for(double val : CpuMemoryMonitor.getClientTelemetryMemoryLatestList()) { recordValue(memoryHistogram, val); } ReportPayload memoryReportPayload = new ReportPayload(MEMORY_NAME, MEMORY_UNIT); clientTelemetryInfo.getSystemInfoMap().put(memoryReportPayload, memoryHistogram); this.clientTelemetryInfo.setTimeStamp(Instant.now().toString()); for (Map.Entry<ReportPayload, ConcurrentDoubleHistogram> entry : this.clientTelemetryInfo.getSystemInfoMap().entrySet()) { fillMetricsInfo(entry.getKey(), entry.getValue()); } for (Map.Entry<ReportPayload, ConcurrentDoubleHistogram> entry : this.clientTelemetryInfo.getCacheRefreshInfoMap().entrySet()) { fillMetricsInfo(entry.getKey(), entry.getValue()); } for (Map.Entry<ReportPayload, ConcurrentDoubleHistogram> entry : this.clientTelemetryInfo.getOperationInfoMap().entrySet()) { fillMetricsInfo(entry.getKey(), entry.getValue()); } } private void fillMetricsInfo(ReportPayload payload, DoubleHistogram histogram) { payload.getMetricInfo().setCount(histogram.getTotalCount()); payload.getMetricInfo().setMax(histogram.getMaxValue()); payload.getMetricInfo().setMin(histogram.getMinValue()); payload.getMetricInfo().setMean(histogram.getMean()); Map<Double, Double> percentile = new HashMap<>(); percentile.put(PERCENTILE_50, histogram.getValueAtPercentile(PERCENTILE_50)); percentile.put(PERCENTILE_90, histogram.getValueAtPercentile(PERCENTILE_90)); percentile.put(PERCENTILE_95, histogram.getValueAtPercentile(PERCENTILE_95)); percentile.put(PERCENTILE_99, histogram.getValueAtPercentile(PERCENTILE_99)); percentile.put(PERCENTILE_999, histogram.getValueAtPercentile(PERCENTILE_999)); payload.getMetricInfo().setPercentiles(percentile); } }
class ClientTelemetry { public final static int ONE_KB_TO_BYTES = 1024; public final static int REQUEST_LATENCY_MAX_MICRO_SEC = 300000000; public final static int REQUEST_LATENCY_SUCCESS_PRECISION = 4; public final static int REQUEST_LATENCY_FAILURE_PRECISION = 2; public final static String REQUEST_LATENCY_NAME = "RequestLatency"; public final static String REQUEST_LATENCY_UNIT = "MicroSec"; public final static int REQUEST_CHARGE_MAX = 10000; public final static int REQUEST_CHARGE_PRECISION = 2; public final static String REQUEST_CHARGE_NAME = "RequestCharge"; public final static String REQUEST_CHARGE_UNIT = "RU"; public final static int CPU_MAX = 100; public final static int CPU_PRECISION = 2; private final static String CPU_NAME = "CPU"; private final static String CPU_UNIT = "Percentage"; public final static int MEMORY_MAX_IN_MB = 102400; public final static int MEMORY_PRECISION = 2; private final static String MEMORY_NAME = "MemoryRemaining"; private final static String MEMORY_UNIT = "MB"; private static final ObjectMapper OBJECT_MAPPER = new ObjectMapper(); private ClientTelemetryInfo clientTelemetryInfo; private HttpClient httpClient; private final ScheduledThreadPoolExecutor scheduledExecutorService = new ScheduledThreadPoolExecutor(1, new DaemonThreadFactory()); private final Scheduler scheduler = Schedulers.fromExecutor(scheduledExecutorService); private static final Logger logger = LoggerFactory.getLogger(GlobalEndpointManager.class); private volatile boolean isClosed; private volatile boolean isClientTelemetryEnabled; private static String AZURE_VM_METADATA = "http: private static final double PERCENTILE_50 = 50.0; private static final double PERCENTILE_90 = 90.0; private static final double PERCENTILE_95 = 95.0; private static final double PERCENTILE_99 = 99.0; private static final double PERCENTILE_999 = 99.9; private final int clientTelemetrySchedulingSec; public ClientTelemetry(Boolean acceleratedNetworking, String clientId, String processId, String userAgent, ConnectionMode connectionMode, String globalDatabaseAccountName, String applicationRegion, String hostEnvInfo, HttpClient httpClient, boolean isClientTelemetryEnabled ) { clientTelemetryInfo = new ClientTelemetryInfo(clientId, processId, userAgent, connectionMode, globalDatabaseAccountName, applicationRegion, hostEnvInfo, acceleratedNetworking); this.isClosed = false; this.httpClient = httpClient; this.isClientTelemetryEnabled = isClientTelemetryEnabled; this.clientTelemetrySchedulingSec = Configs.getClientTelemetrySchedulingInSec(); } public ClientTelemetryInfo getClientTelemetryInfo() { return clientTelemetryInfo; } public static void recordValue(DoubleHistogram doubleHistogram, long value) { try { doubleHistogram.recordValue(value); } catch (Exception ex) { logger.warn("Error while recording value for client telemetry. ", ex); } } public static void recordValue(DoubleHistogram doubleHistogram, double value) { try { doubleHistogram.recordValue(value); } catch (Exception ex) { logger.warn("Error while recording value for client telemetry. ", ex); } } public void init() { loadAzureVmMetaData(); sendClientTelemetry().subscribe(); } public void close() { this.isClosed = true; this.scheduledExecutorService.shutdown(); logger.debug("GlobalEndpointManager closed."); } private void loadAzureVmMetaData() { URI targetEndpoint = null; try { targetEndpoint = new URI(AZURE_VM_METADATA); } catch (URISyntaxException ex) { logger.info("Unable to parse azure vm metadata url"); return; } HashMap<String, String> headers = new HashMap<>(); headers.put("Metadata", "true"); HttpHeaders httpHeaders = new HttpHeaders(headers); HttpRequest httpRequest = new HttpRequest(HttpMethod.GET, targetEndpoint, targetEndpoint.getPort(), httpHeaders); Mono<HttpResponse> httpResponseMono = this.httpClient.send(httpRequest); httpResponseMono.flatMap(response -> response.bodyAsString()).map(metadataJson -> Utils.parse(metadataJson, AzureVMMetadata.class)).doOnSuccess(azureVMMetadata -> { this.clientTelemetryInfo.setApplicationRegion(azureVMMetadata.getLocation()); this.clientTelemetryInfo.setHostEnvInfo(azureVMMetadata.getOsType() + "|" + azureVMMetadata.getSku() + "|" + azureVMMetadata.getVmSize() + "|" + azureVMMetadata.getAzEnvironment()); }).onErrorResume(throwable -> { logger.info("Unable to get azure vm metadata"); return Mono.empty(); }).subscribe(); } private void clearDataForNextRun() { this.clientTelemetryInfo.getOperationInfoMap().clear(); this.clientTelemetryInfo.getCacheRefreshInfoMap().clear(); for (DoubleHistogram histogram : this.clientTelemetryInfo.getSystemInfoMap().values()) { histogram.reset(); } } private void readHistogram() { ConcurrentDoubleHistogram cpuHistogram = new ConcurrentDoubleHistogram(ClientTelemetry.CPU_MAX, ClientTelemetry.CPU_PRECISION); cpuHistogram.setAutoResize(true); for(double val : CpuMemoryMonitor.getClientTelemetryCpuLatestList()) { recordValue(cpuHistogram, val); } ReportPayload cpuReportPayload = new ReportPayload(CPU_NAME, CPU_UNIT); clientTelemetryInfo.getSystemInfoMap().put(cpuReportPayload, cpuHistogram); ConcurrentDoubleHistogram memoryHistogram = new ConcurrentDoubleHistogram(ClientTelemetry.MEMORY_MAX_IN_MB, ClientTelemetry.MEMORY_PRECISION); memoryHistogram.setAutoResize(true); for(double val : CpuMemoryMonitor.getClientTelemetryMemoryLatestList()) { recordValue(memoryHistogram, val); } ReportPayload memoryReportPayload = new ReportPayload(MEMORY_NAME, MEMORY_UNIT); clientTelemetryInfo.getSystemInfoMap().put(memoryReportPayload, memoryHistogram); this.clientTelemetryInfo.setTimeStamp(Instant.now().toString()); for (Map.Entry<ReportPayload, ConcurrentDoubleHistogram> entry : this.clientTelemetryInfo.getSystemInfoMap().entrySet()) { fillMetricsInfo(entry.getKey(), entry.getValue()); } for (Map.Entry<ReportPayload, ConcurrentDoubleHistogram> entry : this.clientTelemetryInfo.getCacheRefreshInfoMap().entrySet()) { fillMetricsInfo(entry.getKey(), entry.getValue()); } for (Map.Entry<ReportPayload, ConcurrentDoubleHistogram> entry : this.clientTelemetryInfo.getOperationInfoMap().entrySet()) { fillMetricsInfo(entry.getKey(), entry.getValue()); } } private void fillMetricsInfo(ReportPayload payload, DoubleHistogram histogram) { payload.getMetricInfo().setCount(histogram.getTotalCount()); payload.getMetricInfo().setMax(histogram.getMaxValue()); payload.getMetricInfo().setMin(histogram.getMinValue()); payload.getMetricInfo().setMean(histogram.getMean()); Map<Double, Double> percentile = new HashMap<>(); percentile.put(PERCENTILE_50, histogram.getValueAtPercentile(PERCENTILE_50)); percentile.put(PERCENTILE_90, histogram.getValueAtPercentile(PERCENTILE_90)); percentile.put(PERCENTILE_95, histogram.getValueAtPercentile(PERCENTILE_95)); percentile.put(PERCENTILE_99, histogram.getValueAtPercentile(PERCENTILE_99)); percentile.put(PERCENTILE_999, histogram.getValueAtPercentile(PERCENTILE_999)); payload.getMetricInfo().setPercentiles(percentile); } private static class DaemonThreadFactory implements ThreadFactory { @Override public Thread newThread(Runnable r) { Thread t = new Thread(r); t.setDaemon(true); return t; } } }
done
private Mono<Void> sendClientTelemetry() { return Mono.delay(Duration.ofSeconds(clientTelemetrySchedulingSec)) .flatMap(t -> { if (this.isClosed) { logger.warn("client already closed"); return Mono.empty(); } if(!Configs.isClientTelemetryEnabled(this.isClientTelemetryEnabled)) { logger.trace("client telemetry not enabled"); return Mono.empty(); } readHistogram(); try { logger.info("ClientTelemetry {}", OBJECT_MAPPER.writeValueAsString(this.clientTelemetryInfo)); } catch (JsonProcessingException e) { e.printStackTrace(); } clearDataForNextRun(); return this.sendClientTelemetry(); }).onErrorResume(ex -> { logger.error("sendClientTelemetry() - Unable to send client telemetry" + ". Exception: {}", ex.toString(), ex); clearDataForNextRun(); return this.sendClientTelemetry(); }).subscribeOn(scheduler); }
e.printStackTrace();
private Mono<Void> sendClientTelemetry() { return Mono.delay(Duration.ofSeconds(clientTelemetrySchedulingSec)) .flatMap(t -> { if (this.isClosed) { logger.warn("client already closed"); return Mono.empty(); } if(!Configs.isClientTelemetryEnabled(this.isClientTelemetryEnabled)) { logger.trace("client telemetry not enabled"); return Mono.empty(); } readHistogram(); try { logger.info("ClientTelemetry {}", OBJECT_MAPPER.writeValueAsString(this.clientTelemetryInfo)); } catch (JsonProcessingException e) { logger.error("Error which parsing client telemetry into json. ", e); } clearDataForNextRun(); return this.sendClientTelemetry(); }).onErrorResume(ex -> { logger.error("sendClientTelemetry() - Unable to send client telemetry" + ". Exception: ", ex); clearDataForNextRun(); return this.sendClientTelemetry(); }).subscribeOn(scheduler); }
class ClientTelemetry { public final static int REQUEST_LATENCY_MAX = 300000000; public final static int REQUEST_LATENCY_SUCCESS_PRECISION = 4; public final static int REQUEST_LATENCY_FAILURE_PRECISION = 2; public final static String REQUEST_LATENCY_NAME = "RequestLatency"; public final static String REQUEST_LATENCY_UNIT = "MicroSec"; public final static int REQUEST_CHARGE_MAX = 10000; public final static int REQUEST_CHARGE_PRECISION = 2; public final static String REQUEST_CHARGE_NAME = "RequestCharge"; public final static String REQUEST_CHARGE_UNIT = "RU"; public final static int CPU_MAX = 100; public final static int CPU_PRECISION = 2; private final static String CPU_NAME = "CPU"; private final static String CPU_UNIT = "Percentage"; public final static int MEMORY_MAX = 102400; public final static int MEMORY_PRECISION = 2; private final static String MEMORY_NAME = "MemoryRemaining"; private final static String MEMORY_UNIT = "MB"; private static final ObjectMapper OBJECT_MAPPER = new ObjectMapper(); private ClientTelemetryInfo clientTelemetryInfo; private HttpClient httpClient; private final ExecutorService executor = Executors.newSingleThreadExecutor(); private final Scheduler scheduler = Schedulers.fromExecutor(executor); private static final Logger logger = LoggerFactory.getLogger(GlobalEndpointManager.class); private volatile boolean isClosed; private volatile boolean isClientTelemetryEnabled; private static String AZURE_VM_METADATA = "http: private static final double PERCENTILE_50 = 50.0; private static final double PERCENTILE_90 = 90.0; private static final double PERCENTILE_95 = 95.0; private static final double PERCENTILE_99 = 99.0; private static final double PERCENTILE_999 = 99.9; private final int clientTelemetrySchedulingSec; public ClientTelemetry(Boolean acceleratedNetworking, String clientId, String processId, String userAgent, ConnectionMode connectionMode, String globalDatabaseAccountName, String applicationRegion, String hostEnvInfo, HttpClient httpClient, boolean isClientTelemetryEnabled ) { clientTelemetryInfo = new ClientTelemetryInfo(clientId, processId, userAgent, connectionMode, globalDatabaseAccountName, applicationRegion, hostEnvInfo, acceleratedNetworking); this.isClosed = false; this.httpClient = httpClient; this.isClientTelemetryEnabled = isClientTelemetryEnabled; this.clientTelemetrySchedulingSec = Configs.getClientTelemetrySchedulingInSec(); } public ClientTelemetryInfo getClientTelemetryInfo() { return clientTelemetryInfo; } public static void recordValue(DoubleHistogram doubleHistogram, long value) { try { doubleHistogram.recordValue(value); } catch (Exception ex) { logger.warn("Error while recording value for client telemetry", ex.getMessage()); } } public static void recordValue(DoubleHistogram doubleHistogram, double value) { try { doubleHistogram.recordValue(value); } catch (Exception ex) { logger.warn("Error while recording value for client telemetry", ex.getMessage()); } } public void init() { loadAzureVmMetaData(); sendClientTelemetry().subscribe(); } public void close() { this.isClosed = true; this.executor.shutdown(); logger.debug("GlobalEndpointManager closed."); } private void loadAzureVmMetaData() { URI targetEndpoint = null; try { targetEndpoint = new URI(AZURE_VM_METADATA); } catch (URISyntaxException ex) { logger.info("Unable to parse azure vm metadata url"); return; } HashMap<String, String> headers = new HashMap<>(); headers.put("Metadata", "true"); HttpHeaders httpHeaders = new HttpHeaders(headers); HttpRequest httpRequest = new HttpRequest(HttpMethod.GET, targetEndpoint, targetEndpoint.getPort(), httpHeaders); Mono<HttpResponse> httpResponseMono = this.httpClient.send(httpRequest); httpResponseMono.flatMap(response -> response.bodyAsString()).map(metadataJson -> Utils.parse(metadataJson, AzureVMMetadata.class)).doOnSuccess(azureVMMetadata -> { this.clientTelemetryInfo.setApplicationRegion(azureVMMetadata.getLocation()); }).onErrorResume(throwable -> { logger.info("Unable to get azure vm metadata"); return Mono.empty(); }).subscribe(); } private void clearDataForNextRun() { this.clientTelemetryInfo.getOperationInfoMap().clear(); this.clientTelemetryInfo.getCacheRefreshInfoMap().clear(); for (DoubleHistogram histogram : this.clientTelemetryInfo.getSystemInfoMap().values()) { histogram.reset(); } } private void readHistogram() { ConcurrentDoubleHistogram cpuHistogram = new ConcurrentDoubleHistogram(ClientTelemetry.CPU_MAX, ClientTelemetry.CPU_PRECISION); cpuHistogram.setAutoResize(true); for(double val : CpuMemoryMonitor.getClientTelemetryCpuLatestList()) { recordValue(cpuHistogram, val); } ReportPayload cpuReportPayload = new ReportPayload(CPU_NAME, CPU_UNIT); clientTelemetryInfo.getSystemInfoMap().put(cpuReportPayload, cpuHistogram); ConcurrentDoubleHistogram memoryHistogram = new ConcurrentDoubleHistogram(ClientTelemetry.MEMORY_MAX, ClientTelemetry.MEMORY_PRECISION); memoryHistogram.setAutoResize(true); for(double val : CpuMemoryMonitor.getClientTelemetryMemoryLatestList()) { recordValue(memoryHistogram, val); } ReportPayload memoryReportPayload = new ReportPayload(MEMORY_NAME, MEMORY_UNIT); clientTelemetryInfo.getSystemInfoMap().put(memoryReportPayload, memoryHistogram); this.clientTelemetryInfo.setTimeStamp(Instant.now().toString()); for (Map.Entry<ReportPayload, ConcurrentDoubleHistogram> entry : this.clientTelemetryInfo.getSystemInfoMap().entrySet()) { fillMetricsInfo(entry.getKey(), entry.getValue()); } for (Map.Entry<ReportPayload, ConcurrentDoubleHistogram> entry : this.clientTelemetryInfo.getCacheRefreshInfoMap().entrySet()) { fillMetricsInfo(entry.getKey(), entry.getValue()); } for (Map.Entry<ReportPayload, ConcurrentDoubleHistogram> entry : this.clientTelemetryInfo.getOperationInfoMap().entrySet()) { fillMetricsInfo(entry.getKey(), entry.getValue()); } } private void fillMetricsInfo(ReportPayload payload, DoubleHistogram histogram) { payload.getMetricInfo().setCount(histogram.getTotalCount()); payload.getMetricInfo().setMax(histogram.getMaxValue()); payload.getMetricInfo().setMin(histogram.getMinValue()); payload.getMetricInfo().setMean(histogram.getMean()); Map<Double, Double> percentile = new HashMap<>(); percentile.put(PERCENTILE_50, histogram.getValueAtPercentile(PERCENTILE_50)); percentile.put(PERCENTILE_90, histogram.getValueAtPercentile(PERCENTILE_90)); percentile.put(PERCENTILE_95, histogram.getValueAtPercentile(PERCENTILE_95)); percentile.put(PERCENTILE_99, histogram.getValueAtPercentile(PERCENTILE_99)); percentile.put(PERCENTILE_999, histogram.getValueAtPercentile(PERCENTILE_999)); payload.getMetricInfo().setPercentiles(percentile); } }
class ClientTelemetry { public final static int ONE_KB_TO_BYTES = 1024; public final static int REQUEST_LATENCY_MAX_MICRO_SEC = 300000000; public final static int REQUEST_LATENCY_SUCCESS_PRECISION = 4; public final static int REQUEST_LATENCY_FAILURE_PRECISION = 2; public final static String REQUEST_LATENCY_NAME = "RequestLatency"; public final static String REQUEST_LATENCY_UNIT = "MicroSec"; public final static int REQUEST_CHARGE_MAX = 10000; public final static int REQUEST_CHARGE_PRECISION = 2; public final static String REQUEST_CHARGE_NAME = "RequestCharge"; public final static String REQUEST_CHARGE_UNIT = "RU"; public final static int CPU_MAX = 100; public final static int CPU_PRECISION = 2; private final static String CPU_NAME = "CPU"; private final static String CPU_UNIT = "Percentage"; public final static int MEMORY_MAX_IN_MB = 102400; public final static int MEMORY_PRECISION = 2; private final static String MEMORY_NAME = "MemoryRemaining"; private final static String MEMORY_UNIT = "MB"; private static final ObjectMapper OBJECT_MAPPER = new ObjectMapper(); private ClientTelemetryInfo clientTelemetryInfo; private HttpClient httpClient; private final ScheduledThreadPoolExecutor scheduledExecutorService = new ScheduledThreadPoolExecutor(1, new DaemonThreadFactory()); private final Scheduler scheduler = Schedulers.fromExecutor(scheduledExecutorService); private static final Logger logger = LoggerFactory.getLogger(GlobalEndpointManager.class); private volatile boolean isClosed; private volatile boolean isClientTelemetryEnabled; private static String AZURE_VM_METADATA = "http: private static final double PERCENTILE_50 = 50.0; private static final double PERCENTILE_90 = 90.0; private static final double PERCENTILE_95 = 95.0; private static final double PERCENTILE_99 = 99.0; private static final double PERCENTILE_999 = 99.9; private final int clientTelemetrySchedulingSec; public ClientTelemetry(Boolean acceleratedNetworking, String clientId, String processId, String userAgent, ConnectionMode connectionMode, String globalDatabaseAccountName, String applicationRegion, String hostEnvInfo, HttpClient httpClient, boolean isClientTelemetryEnabled ) { clientTelemetryInfo = new ClientTelemetryInfo(clientId, processId, userAgent, connectionMode, globalDatabaseAccountName, applicationRegion, hostEnvInfo, acceleratedNetworking); this.isClosed = false; this.httpClient = httpClient; this.isClientTelemetryEnabled = isClientTelemetryEnabled; this.clientTelemetrySchedulingSec = Configs.getClientTelemetrySchedulingInSec(); } public ClientTelemetryInfo getClientTelemetryInfo() { return clientTelemetryInfo; } public static void recordValue(DoubleHistogram doubleHistogram, long value) { try { doubleHistogram.recordValue(value); } catch (Exception ex) { logger.warn("Error while recording value for client telemetry. ", ex); } } public static void recordValue(DoubleHistogram doubleHistogram, double value) { try { doubleHistogram.recordValue(value); } catch (Exception ex) { logger.warn("Error while recording value for client telemetry. ", ex); } } public void init() { loadAzureVmMetaData(); sendClientTelemetry().subscribe(); } public void close() { this.isClosed = true; this.scheduledExecutorService.shutdown(); logger.debug("GlobalEndpointManager closed."); } private void loadAzureVmMetaData() { URI targetEndpoint = null; try { targetEndpoint = new URI(AZURE_VM_METADATA); } catch (URISyntaxException ex) { logger.info("Unable to parse azure vm metadata url"); return; } HashMap<String, String> headers = new HashMap<>(); headers.put("Metadata", "true"); HttpHeaders httpHeaders = new HttpHeaders(headers); HttpRequest httpRequest = new HttpRequest(HttpMethod.GET, targetEndpoint, targetEndpoint.getPort(), httpHeaders); Mono<HttpResponse> httpResponseMono = this.httpClient.send(httpRequest); httpResponseMono.flatMap(response -> response.bodyAsString()).map(metadataJson -> Utils.parse(metadataJson, AzureVMMetadata.class)).doOnSuccess(azureVMMetadata -> { this.clientTelemetryInfo.setApplicationRegion(azureVMMetadata.getLocation()); this.clientTelemetryInfo.setHostEnvInfo(azureVMMetadata.getOsType() + "|" + azureVMMetadata.getSku() + "|" + azureVMMetadata.getVmSize() + "|" + azureVMMetadata.getAzEnvironment()); }).onErrorResume(throwable -> { logger.info("Unable to get azure vm metadata"); return Mono.empty(); }).subscribe(); } private void clearDataForNextRun() { this.clientTelemetryInfo.getOperationInfoMap().clear(); this.clientTelemetryInfo.getCacheRefreshInfoMap().clear(); for (DoubleHistogram histogram : this.clientTelemetryInfo.getSystemInfoMap().values()) { histogram.reset(); } } private void readHistogram() { ConcurrentDoubleHistogram cpuHistogram = new ConcurrentDoubleHistogram(ClientTelemetry.CPU_MAX, ClientTelemetry.CPU_PRECISION); cpuHistogram.setAutoResize(true); for(double val : CpuMemoryMonitor.getClientTelemetryCpuLatestList()) { recordValue(cpuHistogram, val); } ReportPayload cpuReportPayload = new ReportPayload(CPU_NAME, CPU_UNIT); clientTelemetryInfo.getSystemInfoMap().put(cpuReportPayload, cpuHistogram); ConcurrentDoubleHistogram memoryHistogram = new ConcurrentDoubleHistogram(ClientTelemetry.MEMORY_MAX_IN_MB, ClientTelemetry.MEMORY_PRECISION); memoryHistogram.setAutoResize(true); for(double val : CpuMemoryMonitor.getClientTelemetryMemoryLatestList()) { recordValue(memoryHistogram, val); } ReportPayload memoryReportPayload = new ReportPayload(MEMORY_NAME, MEMORY_UNIT); clientTelemetryInfo.getSystemInfoMap().put(memoryReportPayload, memoryHistogram); this.clientTelemetryInfo.setTimeStamp(Instant.now().toString()); for (Map.Entry<ReportPayload, ConcurrentDoubleHistogram> entry : this.clientTelemetryInfo.getSystemInfoMap().entrySet()) { fillMetricsInfo(entry.getKey(), entry.getValue()); } for (Map.Entry<ReportPayload, ConcurrentDoubleHistogram> entry : this.clientTelemetryInfo.getCacheRefreshInfoMap().entrySet()) { fillMetricsInfo(entry.getKey(), entry.getValue()); } for (Map.Entry<ReportPayload, ConcurrentDoubleHistogram> entry : this.clientTelemetryInfo.getOperationInfoMap().entrySet()) { fillMetricsInfo(entry.getKey(), entry.getValue()); } } private void fillMetricsInfo(ReportPayload payload, DoubleHistogram histogram) { payload.getMetricInfo().setCount(histogram.getTotalCount()); payload.getMetricInfo().setMax(histogram.getMaxValue()); payload.getMetricInfo().setMin(histogram.getMinValue()); payload.getMetricInfo().setMean(histogram.getMean()); Map<Double, Double> percentile = new HashMap<>(); percentile.put(PERCENTILE_50, histogram.getValueAtPercentile(PERCENTILE_50)); percentile.put(PERCENTILE_90, histogram.getValueAtPercentile(PERCENTILE_90)); percentile.put(PERCENTILE_95, histogram.getValueAtPercentile(PERCENTILE_95)); percentile.put(PERCENTILE_99, histogram.getValueAtPercentile(PERCENTILE_99)); percentile.put(PERCENTILE_999, histogram.getValueAtPercentile(PERCENTILE_999)); payload.getMetricInfo().setPercentiles(percentile); } private static class DaemonThreadFactory implements ThreadFactory { @Override public Thread newThread(Runnable r) { Thread t = new Thread(r); t.setDaemon(true); return t; } } }
removed extra message
private Mono<Void> sendClientTelemetry() { return Mono.delay(Duration.ofSeconds(clientTelemetrySchedulingSec)) .flatMap(t -> { if (this.isClosed) { logger.warn("client already closed"); return Mono.empty(); } if(!Configs.isClientTelemetryEnabled(this.isClientTelemetryEnabled)) { logger.trace("client telemetry not enabled"); return Mono.empty(); } readHistogram(); try { logger.info("ClientTelemetry {}", OBJECT_MAPPER.writeValueAsString(this.clientTelemetryInfo)); } catch (JsonProcessingException e) { e.printStackTrace(); } clearDataForNextRun(); return this.sendClientTelemetry(); }).onErrorResume(ex -> { logger.error("sendClientTelemetry() - Unable to send client telemetry" + ". Exception: {}", ex.toString(), ex); clearDataForNextRun(); return this.sendClientTelemetry(); }).subscribeOn(scheduler); }
". Exception: {}", ex.toString(), ex);
private Mono<Void> sendClientTelemetry() { return Mono.delay(Duration.ofSeconds(clientTelemetrySchedulingSec)) .flatMap(t -> { if (this.isClosed) { logger.warn("client already closed"); return Mono.empty(); } if(!Configs.isClientTelemetryEnabled(this.isClientTelemetryEnabled)) { logger.trace("client telemetry not enabled"); return Mono.empty(); } readHistogram(); try { logger.info("ClientTelemetry {}", OBJECT_MAPPER.writeValueAsString(this.clientTelemetryInfo)); } catch (JsonProcessingException e) { logger.error("Error which parsing client telemetry into json. ", e); } clearDataForNextRun(); return this.sendClientTelemetry(); }).onErrorResume(ex -> { logger.error("sendClientTelemetry() - Unable to send client telemetry" + ". Exception: ", ex); clearDataForNextRun(); return this.sendClientTelemetry(); }).subscribeOn(scheduler); }
class ClientTelemetry { public final static int REQUEST_LATENCY_MAX = 300000000; public final static int REQUEST_LATENCY_SUCCESS_PRECISION = 4; public final static int REQUEST_LATENCY_FAILURE_PRECISION = 2; public final static String REQUEST_LATENCY_NAME = "RequestLatency"; public final static String REQUEST_LATENCY_UNIT = "MicroSec"; public final static int REQUEST_CHARGE_MAX = 10000; public final static int REQUEST_CHARGE_PRECISION = 2; public final static String REQUEST_CHARGE_NAME = "RequestCharge"; public final static String REQUEST_CHARGE_UNIT = "RU"; public final static int CPU_MAX = 100; public final static int CPU_PRECISION = 2; private final static String CPU_NAME = "CPU"; private final static String CPU_UNIT = "Percentage"; public final static int MEMORY_MAX = 102400; public final static int MEMORY_PRECISION = 2; private final static String MEMORY_NAME = "MemoryRemaining"; private final static String MEMORY_UNIT = "MB"; private static final ObjectMapper OBJECT_MAPPER = new ObjectMapper(); private ClientTelemetryInfo clientTelemetryInfo; private HttpClient httpClient; private final ExecutorService executor = Executors.newSingleThreadExecutor(); private final Scheduler scheduler = Schedulers.fromExecutor(executor); private static final Logger logger = LoggerFactory.getLogger(GlobalEndpointManager.class); private volatile boolean isClosed; private volatile boolean isClientTelemetryEnabled; private static String AZURE_VM_METADATA = "http: private static final double PERCENTILE_50 = 50.0; private static final double PERCENTILE_90 = 90.0; private static final double PERCENTILE_95 = 95.0; private static final double PERCENTILE_99 = 99.0; private static final double PERCENTILE_999 = 99.9; private final int clientTelemetrySchedulingSec; public ClientTelemetry(Boolean acceleratedNetworking, String clientId, String processId, String userAgent, ConnectionMode connectionMode, String globalDatabaseAccountName, String applicationRegion, String hostEnvInfo, HttpClient httpClient, boolean isClientTelemetryEnabled ) { clientTelemetryInfo = new ClientTelemetryInfo(clientId, processId, userAgent, connectionMode, globalDatabaseAccountName, applicationRegion, hostEnvInfo, acceleratedNetworking); this.isClosed = false; this.httpClient = httpClient; this.isClientTelemetryEnabled = isClientTelemetryEnabled; this.clientTelemetrySchedulingSec = Configs.getClientTelemetrySchedulingInSec(); } public ClientTelemetryInfo getClientTelemetryInfo() { return clientTelemetryInfo; } public static void recordValue(DoubleHistogram doubleHistogram, long value) { try { doubleHistogram.recordValue(value); } catch (Exception ex) { logger.warn("Error while recording value for client telemetry", ex.getMessage()); } } public static void recordValue(DoubleHistogram doubleHistogram, double value) { try { doubleHistogram.recordValue(value); } catch (Exception ex) { logger.warn("Error while recording value for client telemetry", ex.getMessage()); } } public void init() { loadAzureVmMetaData(); sendClientTelemetry().subscribe(); } public void close() { this.isClosed = true; this.executor.shutdown(); logger.debug("GlobalEndpointManager closed."); } private void loadAzureVmMetaData() { URI targetEndpoint = null; try { targetEndpoint = new URI(AZURE_VM_METADATA); } catch (URISyntaxException ex) { logger.info("Unable to parse azure vm metadata url"); return; } HashMap<String, String> headers = new HashMap<>(); headers.put("Metadata", "true"); HttpHeaders httpHeaders = new HttpHeaders(headers); HttpRequest httpRequest = new HttpRequest(HttpMethod.GET, targetEndpoint, targetEndpoint.getPort(), httpHeaders); Mono<HttpResponse> httpResponseMono = this.httpClient.send(httpRequest); httpResponseMono.flatMap(response -> response.bodyAsString()).map(metadataJson -> Utils.parse(metadataJson, AzureVMMetadata.class)).doOnSuccess(azureVMMetadata -> { this.clientTelemetryInfo.setApplicationRegion(azureVMMetadata.getLocation()); }).onErrorResume(throwable -> { logger.info("Unable to get azure vm metadata"); return Mono.empty(); }).subscribe(); } private void clearDataForNextRun() { this.clientTelemetryInfo.getOperationInfoMap().clear(); this.clientTelemetryInfo.getCacheRefreshInfoMap().clear(); for (DoubleHistogram histogram : this.clientTelemetryInfo.getSystemInfoMap().values()) { histogram.reset(); } } private void readHistogram() { DoubleHistogram cpuHistogram = new DoubleHistogram(ClientTelemetry.CPU_MAX, ClientTelemetry.CPU_PRECISION); for(double val : CpuMemoryMonitor.getClientTelemetryCpuLatestList()) { recordValue(cpuHistogram, val); } ReportPayload cpuReportPayload = new ReportPayload(CPU_NAME, CPU_UNIT); clientTelemetryInfo.getSystemInfoMap().put(cpuReportPayload, cpuHistogram); DoubleHistogram memoryHistogram = new DoubleHistogram(ClientTelemetry.MEMORY_MAX, ClientTelemetry.MEMORY_PRECISION); for(double val : CpuMemoryMonitor.getClientTelemetryMemoryLatestList()) { recordValue(memoryHistogram, val); } ReportPayload memoryReportPayload = new ReportPayload(MEMORY_NAME, MEMORY_UNIT); clientTelemetryInfo.getSystemInfoMap().put(memoryReportPayload, memoryHistogram); this.clientTelemetryInfo.setTimeStamp(Instant.now().toString()); for (Map.Entry<ReportPayload, DoubleHistogram> entry : this.clientTelemetryInfo.getSystemInfoMap().entrySet()) { fillMetricsInfo(entry.getKey(), entry.getValue()); } for (Map.Entry<ReportPayload, DoubleHistogram> entry : this.clientTelemetryInfo.getCacheRefreshInfoMap().entrySet()) { fillMetricsInfo(entry.getKey(), entry.getValue()); } for (Map.Entry<ReportPayload, DoubleHistogram> entry : this.clientTelemetryInfo.getOperationInfoMap().entrySet()) { fillMetricsInfo(entry.getKey(), entry.getValue()); } } private void fillMetricsInfo(ReportPayload payload, DoubleHistogram histogram) { payload.getMetricInfo().setCount(histogram.getTotalCount()); payload.getMetricInfo().setMax(histogram.getMaxValue()); payload.getMetricInfo().setMin(histogram.getMinValue()); payload.getMetricInfo().setMean(histogram.getMean()); Map<Double, Double> percentile = new HashMap<>(); percentile.put(PERCENTILE_50, histogram.getValueAtPercentile(PERCENTILE_50)); percentile.put(PERCENTILE_90, histogram.getValueAtPercentile(PERCENTILE_90)); percentile.put(PERCENTILE_95, histogram.getValueAtPercentile(PERCENTILE_95)); percentile.put(PERCENTILE_99, histogram.getValueAtPercentile(PERCENTILE_99)); percentile.put(PERCENTILE_999, histogram.getValueAtPercentile(PERCENTILE_999)); payload.getMetricInfo().setPercentiles(percentile); } }
class ClientTelemetry { public final static int ONE_KB_TO_BYTES = 1024; public final static int REQUEST_LATENCY_MAX_MICRO_SEC = 300000000; public final static int REQUEST_LATENCY_SUCCESS_PRECISION = 4; public final static int REQUEST_LATENCY_FAILURE_PRECISION = 2; public final static String REQUEST_LATENCY_NAME = "RequestLatency"; public final static String REQUEST_LATENCY_UNIT = "MicroSec"; public final static int REQUEST_CHARGE_MAX = 10000; public final static int REQUEST_CHARGE_PRECISION = 2; public final static String REQUEST_CHARGE_NAME = "RequestCharge"; public final static String REQUEST_CHARGE_UNIT = "RU"; public final static int CPU_MAX = 100; public final static int CPU_PRECISION = 2; private final static String CPU_NAME = "CPU"; private final static String CPU_UNIT = "Percentage"; public final static int MEMORY_MAX_IN_MB = 102400; public final static int MEMORY_PRECISION = 2; private final static String MEMORY_NAME = "MemoryRemaining"; private final static String MEMORY_UNIT = "MB"; private static final ObjectMapper OBJECT_MAPPER = new ObjectMapper(); private ClientTelemetryInfo clientTelemetryInfo; private HttpClient httpClient; private final ScheduledThreadPoolExecutor scheduledExecutorService = new ScheduledThreadPoolExecutor(1, new DaemonThreadFactory()); private final Scheduler scheduler = Schedulers.fromExecutor(scheduledExecutorService); private static final Logger logger = LoggerFactory.getLogger(GlobalEndpointManager.class); private volatile boolean isClosed; private volatile boolean isClientTelemetryEnabled; private static String AZURE_VM_METADATA = "http: private static final double PERCENTILE_50 = 50.0; private static final double PERCENTILE_90 = 90.0; private static final double PERCENTILE_95 = 95.0; private static final double PERCENTILE_99 = 99.0; private static final double PERCENTILE_999 = 99.9; private final int clientTelemetrySchedulingSec; public ClientTelemetry(Boolean acceleratedNetworking, String clientId, String processId, String userAgent, ConnectionMode connectionMode, String globalDatabaseAccountName, String applicationRegion, String hostEnvInfo, HttpClient httpClient, boolean isClientTelemetryEnabled ) { clientTelemetryInfo = new ClientTelemetryInfo(clientId, processId, userAgent, connectionMode, globalDatabaseAccountName, applicationRegion, hostEnvInfo, acceleratedNetworking); this.isClosed = false; this.httpClient = httpClient; this.isClientTelemetryEnabled = isClientTelemetryEnabled; this.clientTelemetrySchedulingSec = Configs.getClientTelemetrySchedulingInSec(); } public ClientTelemetryInfo getClientTelemetryInfo() { return clientTelemetryInfo; } public static void recordValue(DoubleHistogram doubleHistogram, long value) { try { doubleHistogram.recordValue(value); } catch (Exception ex) { logger.warn("Error while recording value for client telemetry. ", ex); } } public static void recordValue(DoubleHistogram doubleHistogram, double value) { try { doubleHistogram.recordValue(value); } catch (Exception ex) { logger.warn("Error while recording value for client telemetry. ", ex); } } public void init() { loadAzureVmMetaData(); sendClientTelemetry().subscribe(); } public void close() { this.isClosed = true; this.scheduledExecutorService.shutdown(); logger.debug("GlobalEndpointManager closed."); } private void loadAzureVmMetaData() { URI targetEndpoint = null; try { targetEndpoint = new URI(AZURE_VM_METADATA); } catch (URISyntaxException ex) { logger.info("Unable to parse azure vm metadata url"); return; } HashMap<String, String> headers = new HashMap<>(); headers.put("Metadata", "true"); HttpHeaders httpHeaders = new HttpHeaders(headers); HttpRequest httpRequest = new HttpRequest(HttpMethod.GET, targetEndpoint, targetEndpoint.getPort(), httpHeaders); Mono<HttpResponse> httpResponseMono = this.httpClient.send(httpRequest); httpResponseMono.flatMap(response -> response.bodyAsString()).map(metadataJson -> Utils.parse(metadataJson, AzureVMMetadata.class)).doOnSuccess(azureVMMetadata -> { this.clientTelemetryInfo.setApplicationRegion(azureVMMetadata.getLocation()); this.clientTelemetryInfo.setHostEnvInfo(azureVMMetadata.getOsType() + "|" + azureVMMetadata.getSku() + "|" + azureVMMetadata.getVmSize() + "|" + azureVMMetadata.getAzEnvironment()); }).onErrorResume(throwable -> { logger.info("Unable to get azure vm metadata"); return Mono.empty(); }).subscribe(); } private void clearDataForNextRun() { this.clientTelemetryInfo.getOperationInfoMap().clear(); this.clientTelemetryInfo.getCacheRefreshInfoMap().clear(); for (DoubleHistogram histogram : this.clientTelemetryInfo.getSystemInfoMap().values()) { histogram.reset(); } } private void readHistogram() { ConcurrentDoubleHistogram cpuHistogram = new ConcurrentDoubleHistogram(ClientTelemetry.CPU_MAX, ClientTelemetry.CPU_PRECISION); cpuHistogram.setAutoResize(true); for(double val : CpuMemoryMonitor.getClientTelemetryCpuLatestList()) { recordValue(cpuHistogram, val); } ReportPayload cpuReportPayload = new ReportPayload(CPU_NAME, CPU_UNIT); clientTelemetryInfo.getSystemInfoMap().put(cpuReportPayload, cpuHistogram); ConcurrentDoubleHistogram memoryHistogram = new ConcurrentDoubleHistogram(ClientTelemetry.MEMORY_MAX_IN_MB, ClientTelemetry.MEMORY_PRECISION); memoryHistogram.setAutoResize(true); for(double val : CpuMemoryMonitor.getClientTelemetryMemoryLatestList()) { recordValue(memoryHistogram, val); } ReportPayload memoryReportPayload = new ReportPayload(MEMORY_NAME, MEMORY_UNIT); clientTelemetryInfo.getSystemInfoMap().put(memoryReportPayload, memoryHistogram); this.clientTelemetryInfo.setTimeStamp(Instant.now().toString()); for (Map.Entry<ReportPayload, ConcurrentDoubleHistogram> entry : this.clientTelemetryInfo.getSystemInfoMap().entrySet()) { fillMetricsInfo(entry.getKey(), entry.getValue()); } for (Map.Entry<ReportPayload, ConcurrentDoubleHistogram> entry : this.clientTelemetryInfo.getCacheRefreshInfoMap().entrySet()) { fillMetricsInfo(entry.getKey(), entry.getValue()); } for (Map.Entry<ReportPayload, ConcurrentDoubleHistogram> entry : this.clientTelemetryInfo.getOperationInfoMap().entrySet()) { fillMetricsInfo(entry.getKey(), entry.getValue()); } } private void fillMetricsInfo(ReportPayload payload, DoubleHistogram histogram) { payload.getMetricInfo().setCount(histogram.getTotalCount()); payload.getMetricInfo().setMax(histogram.getMaxValue()); payload.getMetricInfo().setMin(histogram.getMinValue()); payload.getMetricInfo().setMean(histogram.getMean()); Map<Double, Double> percentile = new HashMap<>(); percentile.put(PERCENTILE_50, histogram.getValueAtPercentile(PERCENTILE_50)); percentile.put(PERCENTILE_90, histogram.getValueAtPercentile(PERCENTILE_90)); percentile.put(PERCENTILE_95, histogram.getValueAtPercentile(PERCENTILE_95)); percentile.put(PERCENTILE_99, histogram.getValueAtPercentile(PERCENTILE_99)); percentile.put(PERCENTILE_999, histogram.getValueAtPercentile(PERCENTILE_999)); payload.getMetricInfo().setPercentiles(percentile); } private static class DaemonThreadFactory implements ThreadFactory { @Override public Thread newThread(Runnable r) { Thread t = new Thread(r); t.setDaemon(true); return t; } } }
done
public static void recordValue(DoubleHistogram doubleHistogram, double value) { try { doubleHistogram.recordValue(value); } catch (Exception ex) { logger.warn("Error while recording value for client telemetry", ex.getMessage()); } }
logger.warn("Error while recording value for client telemetry", ex.getMessage());
public static void recordValue(DoubleHistogram doubleHistogram, double value) { try { doubleHistogram.recordValue(value); } catch (Exception ex) { logger.warn("Error while recording value for client telemetry. ", ex); } }
class ClientTelemetry { public final static int REQUEST_LATENCY_MAX = 300000000; public final static int REQUEST_LATENCY_SUCCESS_PRECISION = 4; public final static int REQUEST_LATENCY_FAILURE_PRECISION = 2; public final static String REQUEST_LATENCY_NAME = "RequestLatency"; public final static String REQUEST_LATENCY_UNIT = "MicroSec"; public final static int REQUEST_CHARGE_MAX = 10000; public final static int REQUEST_CHARGE_PRECISION = 2; public final static String REQUEST_CHARGE_NAME = "RequestCharge"; public final static String REQUEST_CHARGE_UNIT = "RU"; public final static int CPU_MAX = 100; public final static int CPU_PRECISION = 2; private final static String CPU_NAME = "CPU"; private final static String CPU_UNIT = "Percentage"; public final static int MEMORY_MAX = 102400; public final static int MEMORY_PRECISION = 2; private final static String MEMORY_NAME = "MemoryRemaining"; private final static String MEMORY_UNIT = "MB"; private static final ObjectMapper OBJECT_MAPPER = new ObjectMapper(); private ClientTelemetryInfo clientTelemetryInfo; private HttpClient httpClient; private final ExecutorService executor = Executors.newSingleThreadExecutor(); private final Scheduler scheduler = Schedulers.fromExecutor(executor); private static final Logger logger = LoggerFactory.getLogger(GlobalEndpointManager.class); private volatile boolean isClosed; private volatile boolean isClientTelemetryEnabled; private static String AZURE_VM_METADATA = "http: private static final double PERCENTILE_50 = 50.0; private static final double PERCENTILE_90 = 90.0; private static final double PERCENTILE_95 = 95.0; private static final double PERCENTILE_99 = 99.0; private static final double PERCENTILE_999 = 99.9; private final int clientTelemetrySchedulingSec; public ClientTelemetry(Boolean acceleratedNetworking, String clientId, String processId, String userAgent, ConnectionMode connectionMode, String globalDatabaseAccountName, String applicationRegion, String hostEnvInfo, HttpClient httpClient, boolean isClientTelemetryEnabled ) { clientTelemetryInfo = new ClientTelemetryInfo(clientId, processId, userAgent, connectionMode, globalDatabaseAccountName, applicationRegion, hostEnvInfo, acceleratedNetworking); this.isClosed = false; this.httpClient = httpClient; this.isClientTelemetryEnabled = isClientTelemetryEnabled; this.clientTelemetrySchedulingSec = Configs.getClientTelemetrySchedulingInSec(); } public ClientTelemetryInfo getClientTelemetryInfo() { return clientTelemetryInfo; } public static void recordValue(DoubleHistogram doubleHistogram, long value) { try { doubleHistogram.recordValue(value); } catch (Exception ex) { logger.warn("Error while recording value for client telemetry", ex.getMessage()); } } public void init() { loadAzureVmMetaData(); sendClientTelemetry().subscribe(); } public void close() { this.isClosed = true; this.executor.shutdown(); logger.debug("GlobalEndpointManager closed."); } private Mono<Void> sendClientTelemetry() { return Mono.delay(Duration.ofSeconds(clientTelemetrySchedulingSec)) .flatMap(t -> { if (this.isClosed) { logger.warn("client already closed"); return Mono.empty(); } if(!Configs.isClientTelemetryEnabled(this.isClientTelemetryEnabled)) { logger.trace("client telemetry not enabled"); return Mono.empty(); } readHistogram(); try { logger.info("ClientTelemetry {}", OBJECT_MAPPER.writeValueAsString(this.clientTelemetryInfo)); } catch (JsonProcessingException e) { e.printStackTrace(); } clearDataForNextRun(); return this.sendClientTelemetry(); }).onErrorResume(ex -> { logger.error("sendClientTelemetry() - Unable to send client telemetry" + ". Exception: {}", ex.toString(), ex); clearDataForNextRun(); return this.sendClientTelemetry(); }).subscribeOn(scheduler); } private void loadAzureVmMetaData() { URI targetEndpoint = null; try { targetEndpoint = new URI(AZURE_VM_METADATA); } catch (URISyntaxException ex) { logger.info("Unable to parse azure vm metadata url"); return; } HashMap<String, String> headers = new HashMap<>(); headers.put("Metadata", "true"); HttpHeaders httpHeaders = new HttpHeaders(headers); HttpRequest httpRequest = new HttpRequest(HttpMethod.GET, targetEndpoint, targetEndpoint.getPort(), httpHeaders); Mono<HttpResponse> httpResponseMono = this.httpClient.send(httpRequest); httpResponseMono.flatMap(response -> response.bodyAsString()).map(metadataJson -> Utils.parse(metadataJson, AzureVMMetadata.class)).doOnSuccess(azureVMMetadata -> { this.clientTelemetryInfo.setApplicationRegion(azureVMMetadata.getLocation()); }).onErrorResume(throwable -> { logger.info("Unable to get azure vm metadata"); return Mono.empty(); }).subscribe(); } private void clearDataForNextRun() { this.clientTelemetryInfo.getOperationInfoMap().clear(); this.clientTelemetryInfo.getCacheRefreshInfoMap().clear(); for (DoubleHistogram histogram : this.clientTelemetryInfo.getSystemInfoMap().values()) { histogram.reset(); } } private void readHistogram() { DoubleHistogram cpuHistogram = new DoubleHistogram(ClientTelemetry.CPU_MAX, ClientTelemetry.CPU_PRECISION); for(double val : CpuMemoryMonitor.getClientTelemetryCpuLatestList()) { recordValue(cpuHistogram, val); } ReportPayload cpuReportPayload = new ReportPayload(CPU_NAME, CPU_UNIT); clientTelemetryInfo.getSystemInfoMap().put(cpuReportPayload, cpuHistogram); DoubleHistogram memoryHistogram = new DoubleHistogram(ClientTelemetry.MEMORY_MAX, ClientTelemetry.MEMORY_PRECISION); for(double val : CpuMemoryMonitor.getClientTelemetryMemoryLatestList()) { recordValue(memoryHistogram, val); } ReportPayload memoryReportPayload = new ReportPayload(MEMORY_NAME, MEMORY_UNIT); clientTelemetryInfo.getSystemInfoMap().put(memoryReportPayload, memoryHistogram); this.clientTelemetryInfo.setTimeStamp(Instant.now().toString()); for (Map.Entry<ReportPayload, DoubleHistogram> entry : this.clientTelemetryInfo.getSystemInfoMap().entrySet()) { fillMetricsInfo(entry.getKey(), entry.getValue()); } for (Map.Entry<ReportPayload, DoubleHistogram> entry : this.clientTelemetryInfo.getCacheRefreshInfoMap().entrySet()) { fillMetricsInfo(entry.getKey(), entry.getValue()); } for (Map.Entry<ReportPayload, DoubleHistogram> entry : this.clientTelemetryInfo.getOperationInfoMap().entrySet()) { fillMetricsInfo(entry.getKey(), entry.getValue()); } } private void fillMetricsInfo(ReportPayload payload, DoubleHistogram histogram) { payload.getMetricInfo().setCount(histogram.getTotalCount()); payload.getMetricInfo().setMax(histogram.getMaxValue()); payload.getMetricInfo().setMin(histogram.getMinValue()); payload.getMetricInfo().setMean(histogram.getMean()); Map<Double, Double> percentile = new HashMap<>(); percentile.put(PERCENTILE_50, histogram.getValueAtPercentile(PERCENTILE_50)); percentile.put(PERCENTILE_90, histogram.getValueAtPercentile(PERCENTILE_90)); percentile.put(PERCENTILE_95, histogram.getValueAtPercentile(PERCENTILE_95)); percentile.put(PERCENTILE_99, histogram.getValueAtPercentile(PERCENTILE_99)); percentile.put(PERCENTILE_999, histogram.getValueAtPercentile(PERCENTILE_999)); payload.getMetricInfo().setPercentiles(percentile); } }
class ClientTelemetry { public final static int ONE_KB_TO_BYTES = 1024; public final static int REQUEST_LATENCY_MAX_MICRO_SEC = 300000000; public final static int REQUEST_LATENCY_SUCCESS_PRECISION = 4; public final static int REQUEST_LATENCY_FAILURE_PRECISION = 2; public final static String REQUEST_LATENCY_NAME = "RequestLatency"; public final static String REQUEST_LATENCY_UNIT = "MicroSec"; public final static int REQUEST_CHARGE_MAX = 10000; public final static int REQUEST_CHARGE_PRECISION = 2; public final static String REQUEST_CHARGE_NAME = "RequestCharge"; public final static String REQUEST_CHARGE_UNIT = "RU"; public final static int CPU_MAX = 100; public final static int CPU_PRECISION = 2; private final static String CPU_NAME = "CPU"; private final static String CPU_UNIT = "Percentage"; public final static int MEMORY_MAX_IN_MB = 102400; public final static int MEMORY_PRECISION = 2; private final static String MEMORY_NAME = "MemoryRemaining"; private final static String MEMORY_UNIT = "MB"; private static final ObjectMapper OBJECT_MAPPER = new ObjectMapper(); private ClientTelemetryInfo clientTelemetryInfo; private HttpClient httpClient; private final ScheduledThreadPoolExecutor scheduledExecutorService = new ScheduledThreadPoolExecutor(1, new DaemonThreadFactory()); private final Scheduler scheduler = Schedulers.fromExecutor(scheduledExecutorService); private static final Logger logger = LoggerFactory.getLogger(GlobalEndpointManager.class); private volatile boolean isClosed; private volatile boolean isClientTelemetryEnabled; private static String AZURE_VM_METADATA = "http: private static final double PERCENTILE_50 = 50.0; private static final double PERCENTILE_90 = 90.0; private static final double PERCENTILE_95 = 95.0; private static final double PERCENTILE_99 = 99.0; private static final double PERCENTILE_999 = 99.9; private final int clientTelemetrySchedulingSec; public ClientTelemetry(Boolean acceleratedNetworking, String clientId, String processId, String userAgent, ConnectionMode connectionMode, String globalDatabaseAccountName, String applicationRegion, String hostEnvInfo, HttpClient httpClient, boolean isClientTelemetryEnabled ) { clientTelemetryInfo = new ClientTelemetryInfo(clientId, processId, userAgent, connectionMode, globalDatabaseAccountName, applicationRegion, hostEnvInfo, acceleratedNetworking); this.isClosed = false; this.httpClient = httpClient; this.isClientTelemetryEnabled = isClientTelemetryEnabled; this.clientTelemetrySchedulingSec = Configs.getClientTelemetrySchedulingInSec(); } public ClientTelemetryInfo getClientTelemetryInfo() { return clientTelemetryInfo; } public static void recordValue(DoubleHistogram doubleHistogram, long value) { try { doubleHistogram.recordValue(value); } catch (Exception ex) { logger.warn("Error while recording value for client telemetry. ", ex); } } public void init() { loadAzureVmMetaData(); sendClientTelemetry().subscribe(); } public void close() { this.isClosed = true; this.scheduledExecutorService.shutdown(); logger.debug("GlobalEndpointManager closed."); } private Mono<Void> sendClientTelemetry() { return Mono.delay(Duration.ofSeconds(clientTelemetrySchedulingSec)) .flatMap(t -> { if (this.isClosed) { logger.warn("client already closed"); return Mono.empty(); } if(!Configs.isClientTelemetryEnabled(this.isClientTelemetryEnabled)) { logger.trace("client telemetry not enabled"); return Mono.empty(); } readHistogram(); try { logger.info("ClientTelemetry {}", OBJECT_MAPPER.writeValueAsString(this.clientTelemetryInfo)); } catch (JsonProcessingException e) { logger.error("Error which parsing client telemetry into json. ", e); } clearDataForNextRun(); return this.sendClientTelemetry(); }).onErrorResume(ex -> { logger.error("sendClientTelemetry() - Unable to send client telemetry" + ". Exception: ", ex); clearDataForNextRun(); return this.sendClientTelemetry(); }).subscribeOn(scheduler); } private void loadAzureVmMetaData() { URI targetEndpoint = null; try { targetEndpoint = new URI(AZURE_VM_METADATA); } catch (URISyntaxException ex) { logger.info("Unable to parse azure vm metadata url"); return; } HashMap<String, String> headers = new HashMap<>(); headers.put("Metadata", "true"); HttpHeaders httpHeaders = new HttpHeaders(headers); HttpRequest httpRequest = new HttpRequest(HttpMethod.GET, targetEndpoint, targetEndpoint.getPort(), httpHeaders); Mono<HttpResponse> httpResponseMono = this.httpClient.send(httpRequest); httpResponseMono.flatMap(response -> response.bodyAsString()).map(metadataJson -> Utils.parse(metadataJson, AzureVMMetadata.class)).doOnSuccess(azureVMMetadata -> { this.clientTelemetryInfo.setApplicationRegion(azureVMMetadata.getLocation()); this.clientTelemetryInfo.setHostEnvInfo(azureVMMetadata.getOsType() + "|" + azureVMMetadata.getSku() + "|" + azureVMMetadata.getVmSize() + "|" + azureVMMetadata.getAzEnvironment()); }).onErrorResume(throwable -> { logger.info("Unable to get azure vm metadata"); return Mono.empty(); }).subscribe(); } private void clearDataForNextRun() { this.clientTelemetryInfo.getOperationInfoMap().clear(); this.clientTelemetryInfo.getCacheRefreshInfoMap().clear(); for (DoubleHistogram histogram : this.clientTelemetryInfo.getSystemInfoMap().values()) { histogram.reset(); } } private void readHistogram() { ConcurrentDoubleHistogram cpuHistogram = new ConcurrentDoubleHistogram(ClientTelemetry.CPU_MAX, ClientTelemetry.CPU_PRECISION); cpuHistogram.setAutoResize(true); for(double val : CpuMemoryMonitor.getClientTelemetryCpuLatestList()) { recordValue(cpuHistogram, val); } ReportPayload cpuReportPayload = new ReportPayload(CPU_NAME, CPU_UNIT); clientTelemetryInfo.getSystemInfoMap().put(cpuReportPayload, cpuHistogram); ConcurrentDoubleHistogram memoryHistogram = new ConcurrentDoubleHistogram(ClientTelemetry.MEMORY_MAX_IN_MB, ClientTelemetry.MEMORY_PRECISION); memoryHistogram.setAutoResize(true); for(double val : CpuMemoryMonitor.getClientTelemetryMemoryLatestList()) { recordValue(memoryHistogram, val); } ReportPayload memoryReportPayload = new ReportPayload(MEMORY_NAME, MEMORY_UNIT); clientTelemetryInfo.getSystemInfoMap().put(memoryReportPayload, memoryHistogram); this.clientTelemetryInfo.setTimeStamp(Instant.now().toString()); for (Map.Entry<ReportPayload, ConcurrentDoubleHistogram> entry : this.clientTelemetryInfo.getSystemInfoMap().entrySet()) { fillMetricsInfo(entry.getKey(), entry.getValue()); } for (Map.Entry<ReportPayload, ConcurrentDoubleHistogram> entry : this.clientTelemetryInfo.getCacheRefreshInfoMap().entrySet()) { fillMetricsInfo(entry.getKey(), entry.getValue()); } for (Map.Entry<ReportPayload, ConcurrentDoubleHistogram> entry : this.clientTelemetryInfo.getOperationInfoMap().entrySet()) { fillMetricsInfo(entry.getKey(), entry.getValue()); } } private void fillMetricsInfo(ReportPayload payload, DoubleHistogram histogram) { payload.getMetricInfo().setCount(histogram.getTotalCount()); payload.getMetricInfo().setMax(histogram.getMaxValue()); payload.getMetricInfo().setMin(histogram.getMinValue()); payload.getMetricInfo().setMean(histogram.getMean()); Map<Double, Double> percentile = new HashMap<>(); percentile.put(PERCENTILE_50, histogram.getValueAtPercentile(PERCENTILE_50)); percentile.put(PERCENTILE_90, histogram.getValueAtPercentile(PERCENTILE_90)); percentile.put(PERCENTILE_95, histogram.getValueAtPercentile(PERCENTILE_95)); percentile.put(PERCENTILE_99, histogram.getValueAtPercentile(PERCENTILE_99)); percentile.put(PERCENTILE_999, histogram.getValueAtPercentile(PERCENTILE_999)); payload.getMetricInfo().setPercentiles(percentile); } private static class DaemonThreadFactory implements ThreadFactory { @Override public Thread newThread(Runnable r) { Thread t = new Thread(r); t.setDaemon(true); return t; } } }
done
public static void recordValue(DoubleHistogram doubleHistogram, long value) { try { doubleHistogram.recordValue(value); } catch (Exception ex) { logger.warn("Error while recording value for client telemetry", ex.getMessage()); } }
logger.warn("Error while recording value for client telemetry", ex.getMessage());
public static void recordValue(DoubleHistogram doubleHistogram, long value) { try { doubleHistogram.recordValue(value); } catch (Exception ex) { logger.warn("Error while recording value for client telemetry. ", ex); } }
class ClientTelemetry { public final static int REQUEST_LATENCY_MAX = 300000000; public final static int REQUEST_LATENCY_SUCCESS_PRECISION = 4; public final static int REQUEST_LATENCY_FAILURE_PRECISION = 2; public final static String REQUEST_LATENCY_NAME = "RequestLatency"; public final static String REQUEST_LATENCY_UNIT = "MicroSec"; public final static int REQUEST_CHARGE_MAX = 10000; public final static int REQUEST_CHARGE_PRECISION = 2; public final static String REQUEST_CHARGE_NAME = "RequestCharge"; public final static String REQUEST_CHARGE_UNIT = "RU"; public final static int CPU_MAX = 100; public final static int CPU_PRECISION = 2; private final static String CPU_NAME = "CPU"; private final static String CPU_UNIT = "Percentage"; public final static int MEMORY_MAX = 102400; public final static int MEMORY_PRECISION = 2; private final static String MEMORY_NAME = "MemoryRemaining"; private final static String MEMORY_UNIT = "MB"; private static final ObjectMapper OBJECT_MAPPER = new ObjectMapper(); private ClientTelemetryInfo clientTelemetryInfo; private HttpClient httpClient; private final ExecutorService executor = Executors.newSingleThreadExecutor(); private final Scheduler scheduler = Schedulers.fromExecutor(executor); private static final Logger logger = LoggerFactory.getLogger(GlobalEndpointManager.class); private volatile boolean isClosed; private volatile boolean isClientTelemetryEnabled; private static String AZURE_VM_METADATA = "http: private static final double PERCENTILE_50 = 50.0; private static final double PERCENTILE_90 = 90.0; private static final double PERCENTILE_95 = 95.0; private static final double PERCENTILE_99 = 99.0; private static final double PERCENTILE_999 = 99.9; private final int clientTelemetrySchedulingSec; public ClientTelemetry(Boolean acceleratedNetworking, String clientId, String processId, String userAgent, ConnectionMode connectionMode, String globalDatabaseAccountName, String applicationRegion, String hostEnvInfo, HttpClient httpClient, boolean isClientTelemetryEnabled ) { clientTelemetryInfo = new ClientTelemetryInfo(clientId, processId, userAgent, connectionMode, globalDatabaseAccountName, applicationRegion, hostEnvInfo, acceleratedNetworking); this.isClosed = false; this.httpClient = httpClient; this.isClientTelemetryEnabled = isClientTelemetryEnabled; this.clientTelemetrySchedulingSec = Configs.getClientTelemetrySchedulingInSec(); } public ClientTelemetryInfo getClientTelemetryInfo() { return clientTelemetryInfo; } public static void recordValue(DoubleHistogram doubleHistogram, double value) { try { doubleHistogram.recordValue(value); } catch (Exception ex) { logger.warn("Error while recording value for client telemetry", ex.getMessage()); } } public void init() { loadAzureVmMetaData(); sendClientTelemetry().subscribe(); } public void close() { this.isClosed = true; this.executor.shutdown(); logger.debug("GlobalEndpointManager closed."); } private Mono<Void> sendClientTelemetry() { return Mono.delay(Duration.ofSeconds(clientTelemetrySchedulingSec)) .flatMap(t -> { if (this.isClosed) { logger.warn("client already closed"); return Mono.empty(); } if(!Configs.isClientTelemetryEnabled(this.isClientTelemetryEnabled)) { logger.trace("client telemetry not enabled"); return Mono.empty(); } readHistogram(); try { logger.info("ClientTelemetry {}", OBJECT_MAPPER.writeValueAsString(this.clientTelemetryInfo)); } catch (JsonProcessingException e) { e.printStackTrace(); } clearDataForNextRun(); return this.sendClientTelemetry(); }).onErrorResume(ex -> { logger.error("sendClientTelemetry() - Unable to send client telemetry" + ". Exception: {}", ex.toString(), ex); clearDataForNextRun(); return this.sendClientTelemetry(); }).subscribeOn(scheduler); } private void loadAzureVmMetaData() { URI targetEndpoint = null; try { targetEndpoint = new URI(AZURE_VM_METADATA); } catch (URISyntaxException ex) { logger.info("Unable to parse azure vm metadata url"); return; } HashMap<String, String> headers = new HashMap<>(); headers.put("Metadata", "true"); HttpHeaders httpHeaders = new HttpHeaders(headers); HttpRequest httpRequest = new HttpRequest(HttpMethod.GET, targetEndpoint, targetEndpoint.getPort(), httpHeaders); Mono<HttpResponse> httpResponseMono = this.httpClient.send(httpRequest); httpResponseMono.flatMap(response -> response.bodyAsString()).map(metadataJson -> Utils.parse(metadataJson, AzureVMMetadata.class)).doOnSuccess(azureVMMetadata -> { this.clientTelemetryInfo.setApplicationRegion(azureVMMetadata.getLocation()); }).onErrorResume(throwable -> { logger.info("Unable to get azure vm metadata"); return Mono.empty(); }).subscribe(); } private void clearDataForNextRun() { this.clientTelemetryInfo.getOperationInfoMap().clear(); this.clientTelemetryInfo.getCacheRefreshInfoMap().clear(); for (DoubleHistogram histogram : this.clientTelemetryInfo.getSystemInfoMap().values()) { histogram.reset(); } } private void readHistogram() { DoubleHistogram cpuHistogram = new DoubleHistogram(ClientTelemetry.CPU_MAX, ClientTelemetry.CPU_PRECISION); for(double val : CpuMemoryMonitor.getClientTelemetryCpuLatestList()) { recordValue(cpuHistogram, val); } ReportPayload cpuReportPayload = new ReportPayload(CPU_NAME, CPU_UNIT); clientTelemetryInfo.getSystemInfoMap().put(cpuReportPayload, cpuHistogram); DoubleHistogram memoryHistogram = new DoubleHistogram(ClientTelemetry.MEMORY_MAX, ClientTelemetry.MEMORY_PRECISION); for(double val : CpuMemoryMonitor.getClientTelemetryMemoryLatestList()) { recordValue(memoryHistogram, val); } ReportPayload memoryReportPayload = new ReportPayload(MEMORY_NAME, MEMORY_UNIT); clientTelemetryInfo.getSystemInfoMap().put(memoryReportPayload, memoryHistogram); this.clientTelemetryInfo.setTimeStamp(Instant.now().toString()); for (Map.Entry<ReportPayload, DoubleHistogram> entry : this.clientTelemetryInfo.getSystemInfoMap().entrySet()) { fillMetricsInfo(entry.getKey(), entry.getValue()); } for (Map.Entry<ReportPayload, DoubleHistogram> entry : this.clientTelemetryInfo.getCacheRefreshInfoMap().entrySet()) { fillMetricsInfo(entry.getKey(), entry.getValue()); } for (Map.Entry<ReportPayload, DoubleHistogram> entry : this.clientTelemetryInfo.getOperationInfoMap().entrySet()) { fillMetricsInfo(entry.getKey(), entry.getValue()); } } private void fillMetricsInfo(ReportPayload payload, DoubleHistogram histogram) { payload.getMetricInfo().setCount(histogram.getTotalCount()); payload.getMetricInfo().setMax(histogram.getMaxValue()); payload.getMetricInfo().setMin(histogram.getMinValue()); payload.getMetricInfo().setMean(histogram.getMean()); Map<Double, Double> percentile = new HashMap<>(); percentile.put(PERCENTILE_50, histogram.getValueAtPercentile(PERCENTILE_50)); percentile.put(PERCENTILE_90, histogram.getValueAtPercentile(PERCENTILE_90)); percentile.put(PERCENTILE_95, histogram.getValueAtPercentile(PERCENTILE_95)); percentile.put(PERCENTILE_99, histogram.getValueAtPercentile(PERCENTILE_99)); percentile.put(PERCENTILE_999, histogram.getValueAtPercentile(PERCENTILE_999)); payload.getMetricInfo().setPercentiles(percentile); } }
class ClientTelemetry { public final static int ONE_KB_TO_BYTES = 1024; public final static int REQUEST_LATENCY_MAX_MICRO_SEC = 300000000; public final static int REQUEST_LATENCY_SUCCESS_PRECISION = 4; public final static int REQUEST_LATENCY_FAILURE_PRECISION = 2; public final static String REQUEST_LATENCY_NAME = "RequestLatency"; public final static String REQUEST_LATENCY_UNIT = "MicroSec"; public final static int REQUEST_CHARGE_MAX = 10000; public final static int REQUEST_CHARGE_PRECISION = 2; public final static String REQUEST_CHARGE_NAME = "RequestCharge"; public final static String REQUEST_CHARGE_UNIT = "RU"; public final static int CPU_MAX = 100; public final static int CPU_PRECISION = 2; private final static String CPU_NAME = "CPU"; private final static String CPU_UNIT = "Percentage"; public final static int MEMORY_MAX_IN_MB = 102400; public final static int MEMORY_PRECISION = 2; private final static String MEMORY_NAME = "MemoryRemaining"; private final static String MEMORY_UNIT = "MB"; private static final ObjectMapper OBJECT_MAPPER = new ObjectMapper(); private ClientTelemetryInfo clientTelemetryInfo; private HttpClient httpClient; private final ScheduledThreadPoolExecutor scheduledExecutorService = new ScheduledThreadPoolExecutor(1, new DaemonThreadFactory()); private final Scheduler scheduler = Schedulers.fromExecutor(scheduledExecutorService); private static final Logger logger = LoggerFactory.getLogger(GlobalEndpointManager.class); private volatile boolean isClosed; private volatile boolean isClientTelemetryEnabled; private static String AZURE_VM_METADATA = "http: private static final double PERCENTILE_50 = 50.0; private static final double PERCENTILE_90 = 90.0; private static final double PERCENTILE_95 = 95.0; private static final double PERCENTILE_99 = 99.0; private static final double PERCENTILE_999 = 99.9; private final int clientTelemetrySchedulingSec; public ClientTelemetry(Boolean acceleratedNetworking, String clientId, String processId, String userAgent, ConnectionMode connectionMode, String globalDatabaseAccountName, String applicationRegion, String hostEnvInfo, HttpClient httpClient, boolean isClientTelemetryEnabled ) { clientTelemetryInfo = new ClientTelemetryInfo(clientId, processId, userAgent, connectionMode, globalDatabaseAccountName, applicationRegion, hostEnvInfo, acceleratedNetworking); this.isClosed = false; this.httpClient = httpClient; this.isClientTelemetryEnabled = isClientTelemetryEnabled; this.clientTelemetrySchedulingSec = Configs.getClientTelemetrySchedulingInSec(); } public ClientTelemetryInfo getClientTelemetryInfo() { return clientTelemetryInfo; } public static void recordValue(DoubleHistogram doubleHistogram, double value) { try { doubleHistogram.recordValue(value); } catch (Exception ex) { logger.warn("Error while recording value for client telemetry. ", ex); } } public void init() { loadAzureVmMetaData(); sendClientTelemetry().subscribe(); } public void close() { this.isClosed = true; this.scheduledExecutorService.shutdown(); logger.debug("GlobalEndpointManager closed."); } private Mono<Void> sendClientTelemetry() { return Mono.delay(Duration.ofSeconds(clientTelemetrySchedulingSec)) .flatMap(t -> { if (this.isClosed) { logger.warn("client already closed"); return Mono.empty(); } if(!Configs.isClientTelemetryEnabled(this.isClientTelemetryEnabled)) { logger.trace("client telemetry not enabled"); return Mono.empty(); } readHistogram(); try { logger.info("ClientTelemetry {}", OBJECT_MAPPER.writeValueAsString(this.clientTelemetryInfo)); } catch (JsonProcessingException e) { logger.error("Error which parsing client telemetry into json. ", e); } clearDataForNextRun(); return this.sendClientTelemetry(); }).onErrorResume(ex -> { logger.error("sendClientTelemetry() - Unable to send client telemetry" + ". Exception: ", ex); clearDataForNextRun(); return this.sendClientTelemetry(); }).subscribeOn(scheduler); } private void loadAzureVmMetaData() { URI targetEndpoint = null; try { targetEndpoint = new URI(AZURE_VM_METADATA); } catch (URISyntaxException ex) { logger.info("Unable to parse azure vm metadata url"); return; } HashMap<String, String> headers = new HashMap<>(); headers.put("Metadata", "true"); HttpHeaders httpHeaders = new HttpHeaders(headers); HttpRequest httpRequest = new HttpRequest(HttpMethod.GET, targetEndpoint, targetEndpoint.getPort(), httpHeaders); Mono<HttpResponse> httpResponseMono = this.httpClient.send(httpRequest); httpResponseMono.flatMap(response -> response.bodyAsString()).map(metadataJson -> Utils.parse(metadataJson, AzureVMMetadata.class)).doOnSuccess(azureVMMetadata -> { this.clientTelemetryInfo.setApplicationRegion(azureVMMetadata.getLocation()); this.clientTelemetryInfo.setHostEnvInfo(azureVMMetadata.getOsType() + "|" + azureVMMetadata.getSku() + "|" + azureVMMetadata.getVmSize() + "|" + azureVMMetadata.getAzEnvironment()); }).onErrorResume(throwable -> { logger.info("Unable to get azure vm metadata"); return Mono.empty(); }).subscribe(); } private void clearDataForNextRun() { this.clientTelemetryInfo.getOperationInfoMap().clear(); this.clientTelemetryInfo.getCacheRefreshInfoMap().clear(); for (DoubleHistogram histogram : this.clientTelemetryInfo.getSystemInfoMap().values()) { histogram.reset(); } } private void readHistogram() { ConcurrentDoubleHistogram cpuHistogram = new ConcurrentDoubleHistogram(ClientTelemetry.CPU_MAX, ClientTelemetry.CPU_PRECISION); cpuHistogram.setAutoResize(true); for(double val : CpuMemoryMonitor.getClientTelemetryCpuLatestList()) { recordValue(cpuHistogram, val); } ReportPayload cpuReportPayload = new ReportPayload(CPU_NAME, CPU_UNIT); clientTelemetryInfo.getSystemInfoMap().put(cpuReportPayload, cpuHistogram); ConcurrentDoubleHistogram memoryHistogram = new ConcurrentDoubleHistogram(ClientTelemetry.MEMORY_MAX_IN_MB, ClientTelemetry.MEMORY_PRECISION); memoryHistogram.setAutoResize(true); for(double val : CpuMemoryMonitor.getClientTelemetryMemoryLatestList()) { recordValue(memoryHistogram, val); } ReportPayload memoryReportPayload = new ReportPayload(MEMORY_NAME, MEMORY_UNIT); clientTelemetryInfo.getSystemInfoMap().put(memoryReportPayload, memoryHistogram); this.clientTelemetryInfo.setTimeStamp(Instant.now().toString()); for (Map.Entry<ReportPayload, ConcurrentDoubleHistogram> entry : this.clientTelemetryInfo.getSystemInfoMap().entrySet()) { fillMetricsInfo(entry.getKey(), entry.getValue()); } for (Map.Entry<ReportPayload, ConcurrentDoubleHistogram> entry : this.clientTelemetryInfo.getCacheRefreshInfoMap().entrySet()) { fillMetricsInfo(entry.getKey(), entry.getValue()); } for (Map.Entry<ReportPayload, ConcurrentDoubleHistogram> entry : this.clientTelemetryInfo.getOperationInfoMap().entrySet()) { fillMetricsInfo(entry.getKey(), entry.getValue()); } } private void fillMetricsInfo(ReportPayload payload, DoubleHistogram histogram) { payload.getMetricInfo().setCount(histogram.getTotalCount()); payload.getMetricInfo().setMax(histogram.getMaxValue()); payload.getMetricInfo().setMin(histogram.getMinValue()); payload.getMetricInfo().setMean(histogram.getMean()); Map<Double, Double> percentile = new HashMap<>(); percentile.put(PERCENTILE_50, histogram.getValueAtPercentile(PERCENTILE_50)); percentile.put(PERCENTILE_90, histogram.getValueAtPercentile(PERCENTILE_90)); percentile.put(PERCENTILE_95, histogram.getValueAtPercentile(PERCENTILE_95)); percentile.put(PERCENTILE_99, histogram.getValueAtPercentile(PERCENTILE_99)); percentile.put(PERCENTILE_999, histogram.getValueAtPercentile(PERCENTILE_999)); payload.getMetricInfo().setPercentiles(percentile); } private static class DaemonThreadFactory implements ThreadFactory { @Override public Thread newThread(Runnable r) { Thread t = new Thread(r); t.setDaemon(true); return t; } } }
Makes sense, thanks.
private Mono<Void> sendClientTelemetry() { return Mono.delay(Duration.ofSeconds(clientTelemetrySchedulingSec)) .flatMap(t -> { if (this.isClosed) { logger.warn("client already closed"); return Mono.empty(); } if(!Configs.isClientTelemetryEnabled(this.isClientTelemetryEnabled)) { logger.trace("client telemetry not enabled"); return Mono.empty(); } readHistogram(); try { logger.info("ClientTelemetry {}", OBJECT_MAPPER.writeValueAsString(this.clientTelemetryInfo)); } catch (JsonProcessingException e) { e.printStackTrace(); } clearDataForNextRun(); return this.sendClientTelemetry(); }).onErrorResume(ex -> { logger.error("sendClientTelemetry() - Unable to send client telemetry" + ". Exception: {}", ex.toString(), ex); clearDataForNextRun(); return this.sendClientTelemetry(); }).subscribeOn(scheduler); }
return this.sendClientTelemetry();
private Mono<Void> sendClientTelemetry() { return Mono.delay(Duration.ofSeconds(clientTelemetrySchedulingSec)) .flatMap(t -> { if (this.isClosed) { logger.warn("client already closed"); return Mono.empty(); } if(!Configs.isClientTelemetryEnabled(this.isClientTelemetryEnabled)) { logger.trace("client telemetry not enabled"); return Mono.empty(); } readHistogram(); try { logger.info("ClientTelemetry {}", OBJECT_MAPPER.writeValueAsString(this.clientTelemetryInfo)); } catch (JsonProcessingException e) { logger.error("Error which parsing client telemetry into json. ", e); } clearDataForNextRun(); return this.sendClientTelemetry(); }).onErrorResume(ex -> { logger.error("sendClientTelemetry() - Unable to send client telemetry" + ". Exception: ", ex); clearDataForNextRun(); return this.sendClientTelemetry(); }).subscribeOn(scheduler); }
class ClientTelemetry { public final static int REQUEST_LATENCY_MAX = 300000000; public final static int REQUEST_LATENCY_SUCCESS_PRECISION = 4; public final static int REQUEST_LATENCY_FAILURE_PRECISION = 2; public final static String REQUEST_LATENCY_NAME = "RequestLatency"; public final static String REQUEST_LATENCY_UNIT = "MicroSec"; public final static int REQUEST_CHARGE_MAX = 10000; public final static int REQUEST_CHARGE_PRECISION = 2; public final static String REQUEST_CHARGE_NAME = "RequestCharge"; public final static String REQUEST_CHARGE_UNIT = "RU"; public final static int CPU_MAX = 100; public final static int CPU_PRECISION = 2; private final static String CPU_NAME = "CPU"; private final static String CPU_UNIT = "Percentage"; public final static int MEMORY_MAX = 102400; public final static int MEMORY_PRECISION = 2; private final static String MEMORY_NAME = "MemoryRemaining"; private final static String MEMORY_UNIT = "MB"; private static final ObjectMapper OBJECT_MAPPER = new ObjectMapper(); private ClientTelemetryInfo clientTelemetryInfo; private HttpClient httpClient; private final ExecutorService executor = Executors.newSingleThreadExecutor(); private final Scheduler scheduler = Schedulers.fromExecutor(executor); private static final Logger logger = LoggerFactory.getLogger(GlobalEndpointManager.class); private volatile boolean isClosed; private volatile boolean isClientTelemetryEnabled; private static String AZURE_VM_METADATA = "http: private static final double PERCENTILE_50 = 50.0; private static final double PERCENTILE_90 = 90.0; private static final double PERCENTILE_95 = 95.0; private static final double PERCENTILE_99 = 99.0; private static final double PERCENTILE_999 = 99.9; private final int clientTelemetrySchedulingSec; public ClientTelemetry(Boolean acceleratedNetworking, String clientId, String processId, String userAgent, ConnectionMode connectionMode, String globalDatabaseAccountName, String applicationRegion, String hostEnvInfo, HttpClient httpClient, boolean isClientTelemetryEnabled ) { clientTelemetryInfo = new ClientTelemetryInfo(clientId, processId, userAgent, connectionMode, globalDatabaseAccountName, applicationRegion, hostEnvInfo, acceleratedNetworking); this.isClosed = false; this.httpClient = httpClient; this.isClientTelemetryEnabled = isClientTelemetryEnabled; this.clientTelemetrySchedulingSec = Configs.getClientTelemetrySchedulingInSec(); } public ClientTelemetryInfo getClientTelemetryInfo() { return clientTelemetryInfo; } public static void recordValue(DoubleHistogram doubleHistogram, long value) { try { doubleHistogram.recordValue(value); } catch (Exception ex) { logger.warn("Error while recording value for client telemetry", ex.getMessage()); } } public static void recordValue(DoubleHistogram doubleHistogram, double value) { try { doubleHistogram.recordValue(value); } catch (Exception ex) { logger.warn("Error while recording value for client telemetry", ex.getMessage()); } } public void init() { loadAzureVmMetaData(); sendClientTelemetry().subscribe(); } public void close() { this.isClosed = true; this.executor.shutdown(); logger.debug("GlobalEndpointManager closed."); } private void loadAzureVmMetaData() { URI targetEndpoint = null; try { targetEndpoint = new URI(AZURE_VM_METADATA); } catch (URISyntaxException ex) { logger.info("Unable to parse azure vm metadata url"); return; } HashMap<String, String> headers = new HashMap<>(); headers.put("Metadata", "true"); HttpHeaders httpHeaders = new HttpHeaders(headers); HttpRequest httpRequest = new HttpRequest(HttpMethod.GET, targetEndpoint, targetEndpoint.getPort(), httpHeaders); Mono<HttpResponse> httpResponseMono = this.httpClient.send(httpRequest); httpResponseMono.flatMap(response -> response.bodyAsString()).map(metadataJson -> Utils.parse(metadataJson, AzureVMMetadata.class)).doOnSuccess(azureVMMetadata -> { this.clientTelemetryInfo.setApplicationRegion(azureVMMetadata.getLocation()); }).onErrorResume(throwable -> { logger.info("Unable to get azure vm metadata"); return Mono.empty(); }).subscribe(); } private void clearDataForNextRun() { this.clientTelemetryInfo.getOperationInfoMap().clear(); this.clientTelemetryInfo.getCacheRefreshInfoMap().clear(); for (DoubleHistogram histogram : this.clientTelemetryInfo.getSystemInfoMap().values()) { histogram.reset(); } } private void readHistogram() { ConcurrentDoubleHistogram cpuHistogram = new ConcurrentDoubleHistogram(ClientTelemetry.CPU_MAX, ClientTelemetry.CPU_PRECISION); cpuHistogram.setAutoResize(true); for(double val : CpuMemoryMonitor.getClientTelemetryCpuLatestList()) { recordValue(cpuHistogram, val); } ReportPayload cpuReportPayload = new ReportPayload(CPU_NAME, CPU_UNIT); clientTelemetryInfo.getSystemInfoMap().put(cpuReportPayload, cpuHistogram); ConcurrentDoubleHistogram memoryHistogram = new ConcurrentDoubleHistogram(ClientTelemetry.MEMORY_MAX, ClientTelemetry.MEMORY_PRECISION); memoryHistogram.setAutoResize(true); for(double val : CpuMemoryMonitor.getClientTelemetryMemoryLatestList()) { recordValue(memoryHistogram, val); } ReportPayload memoryReportPayload = new ReportPayload(MEMORY_NAME, MEMORY_UNIT); clientTelemetryInfo.getSystemInfoMap().put(memoryReportPayload, memoryHistogram); this.clientTelemetryInfo.setTimeStamp(Instant.now().toString()); for (Map.Entry<ReportPayload, ConcurrentDoubleHistogram> entry : this.clientTelemetryInfo.getSystemInfoMap().entrySet()) { fillMetricsInfo(entry.getKey(), entry.getValue()); } for (Map.Entry<ReportPayload, ConcurrentDoubleHistogram> entry : this.clientTelemetryInfo.getCacheRefreshInfoMap().entrySet()) { fillMetricsInfo(entry.getKey(), entry.getValue()); } for (Map.Entry<ReportPayload, ConcurrentDoubleHistogram> entry : this.clientTelemetryInfo.getOperationInfoMap().entrySet()) { fillMetricsInfo(entry.getKey(), entry.getValue()); } } private void fillMetricsInfo(ReportPayload payload, DoubleHistogram histogram) { payload.getMetricInfo().setCount(histogram.getTotalCount()); payload.getMetricInfo().setMax(histogram.getMaxValue()); payload.getMetricInfo().setMin(histogram.getMinValue()); payload.getMetricInfo().setMean(histogram.getMean()); Map<Double, Double> percentile = new HashMap<>(); percentile.put(PERCENTILE_50, histogram.getValueAtPercentile(PERCENTILE_50)); percentile.put(PERCENTILE_90, histogram.getValueAtPercentile(PERCENTILE_90)); percentile.put(PERCENTILE_95, histogram.getValueAtPercentile(PERCENTILE_95)); percentile.put(PERCENTILE_99, histogram.getValueAtPercentile(PERCENTILE_99)); percentile.put(PERCENTILE_999, histogram.getValueAtPercentile(PERCENTILE_999)); payload.getMetricInfo().setPercentiles(percentile); } }
class ClientTelemetry { public final static int ONE_KB_TO_BYTES = 1024; public final static int REQUEST_LATENCY_MAX_MICRO_SEC = 300000000; public final static int REQUEST_LATENCY_SUCCESS_PRECISION = 4; public final static int REQUEST_LATENCY_FAILURE_PRECISION = 2; public final static String REQUEST_LATENCY_NAME = "RequestLatency"; public final static String REQUEST_LATENCY_UNIT = "MicroSec"; public final static int REQUEST_CHARGE_MAX = 10000; public final static int REQUEST_CHARGE_PRECISION = 2; public final static String REQUEST_CHARGE_NAME = "RequestCharge"; public final static String REQUEST_CHARGE_UNIT = "RU"; public final static int CPU_MAX = 100; public final static int CPU_PRECISION = 2; private final static String CPU_NAME = "CPU"; private final static String CPU_UNIT = "Percentage"; public final static int MEMORY_MAX_IN_MB = 102400; public final static int MEMORY_PRECISION = 2; private final static String MEMORY_NAME = "MemoryRemaining"; private final static String MEMORY_UNIT = "MB"; private static final ObjectMapper OBJECT_MAPPER = new ObjectMapper(); private ClientTelemetryInfo clientTelemetryInfo; private HttpClient httpClient; private final ScheduledThreadPoolExecutor scheduledExecutorService = new ScheduledThreadPoolExecutor(1, new DaemonThreadFactory()); private final Scheduler scheduler = Schedulers.fromExecutor(scheduledExecutorService); private static final Logger logger = LoggerFactory.getLogger(GlobalEndpointManager.class); private volatile boolean isClosed; private volatile boolean isClientTelemetryEnabled; private static String AZURE_VM_METADATA = "http: private static final double PERCENTILE_50 = 50.0; private static final double PERCENTILE_90 = 90.0; private static final double PERCENTILE_95 = 95.0; private static final double PERCENTILE_99 = 99.0; private static final double PERCENTILE_999 = 99.9; private final int clientTelemetrySchedulingSec; public ClientTelemetry(Boolean acceleratedNetworking, String clientId, String processId, String userAgent, ConnectionMode connectionMode, String globalDatabaseAccountName, String applicationRegion, String hostEnvInfo, HttpClient httpClient, boolean isClientTelemetryEnabled ) { clientTelemetryInfo = new ClientTelemetryInfo(clientId, processId, userAgent, connectionMode, globalDatabaseAccountName, applicationRegion, hostEnvInfo, acceleratedNetworking); this.isClosed = false; this.httpClient = httpClient; this.isClientTelemetryEnabled = isClientTelemetryEnabled; this.clientTelemetrySchedulingSec = Configs.getClientTelemetrySchedulingInSec(); } public ClientTelemetryInfo getClientTelemetryInfo() { return clientTelemetryInfo; } public static void recordValue(DoubleHistogram doubleHistogram, long value) { try { doubleHistogram.recordValue(value); } catch (Exception ex) { logger.warn("Error while recording value for client telemetry. ", ex); } } public static void recordValue(DoubleHistogram doubleHistogram, double value) { try { doubleHistogram.recordValue(value); } catch (Exception ex) { logger.warn("Error while recording value for client telemetry. ", ex); } } public void init() { loadAzureVmMetaData(); sendClientTelemetry().subscribe(); } public void close() { this.isClosed = true; this.scheduledExecutorService.shutdown(); logger.debug("GlobalEndpointManager closed."); } private void loadAzureVmMetaData() { URI targetEndpoint = null; try { targetEndpoint = new URI(AZURE_VM_METADATA); } catch (URISyntaxException ex) { logger.info("Unable to parse azure vm metadata url"); return; } HashMap<String, String> headers = new HashMap<>(); headers.put("Metadata", "true"); HttpHeaders httpHeaders = new HttpHeaders(headers); HttpRequest httpRequest = new HttpRequest(HttpMethod.GET, targetEndpoint, targetEndpoint.getPort(), httpHeaders); Mono<HttpResponse> httpResponseMono = this.httpClient.send(httpRequest); httpResponseMono.flatMap(response -> response.bodyAsString()).map(metadataJson -> Utils.parse(metadataJson, AzureVMMetadata.class)).doOnSuccess(azureVMMetadata -> { this.clientTelemetryInfo.setApplicationRegion(azureVMMetadata.getLocation()); this.clientTelemetryInfo.setHostEnvInfo(azureVMMetadata.getOsType() + "|" + azureVMMetadata.getSku() + "|" + azureVMMetadata.getVmSize() + "|" + azureVMMetadata.getAzEnvironment()); }).onErrorResume(throwable -> { logger.info("Unable to get azure vm metadata"); return Mono.empty(); }).subscribe(); } private void clearDataForNextRun() { this.clientTelemetryInfo.getOperationInfoMap().clear(); this.clientTelemetryInfo.getCacheRefreshInfoMap().clear(); for (DoubleHistogram histogram : this.clientTelemetryInfo.getSystemInfoMap().values()) { histogram.reset(); } } private void readHistogram() { ConcurrentDoubleHistogram cpuHistogram = new ConcurrentDoubleHistogram(ClientTelemetry.CPU_MAX, ClientTelemetry.CPU_PRECISION); cpuHistogram.setAutoResize(true); for(double val : CpuMemoryMonitor.getClientTelemetryCpuLatestList()) { recordValue(cpuHistogram, val); } ReportPayload cpuReportPayload = new ReportPayload(CPU_NAME, CPU_UNIT); clientTelemetryInfo.getSystemInfoMap().put(cpuReportPayload, cpuHistogram); ConcurrentDoubleHistogram memoryHistogram = new ConcurrentDoubleHistogram(ClientTelemetry.MEMORY_MAX_IN_MB, ClientTelemetry.MEMORY_PRECISION); memoryHistogram.setAutoResize(true); for(double val : CpuMemoryMonitor.getClientTelemetryMemoryLatestList()) { recordValue(memoryHistogram, val); } ReportPayload memoryReportPayload = new ReportPayload(MEMORY_NAME, MEMORY_UNIT); clientTelemetryInfo.getSystemInfoMap().put(memoryReportPayload, memoryHistogram); this.clientTelemetryInfo.setTimeStamp(Instant.now().toString()); for (Map.Entry<ReportPayload, ConcurrentDoubleHistogram> entry : this.clientTelemetryInfo.getSystemInfoMap().entrySet()) { fillMetricsInfo(entry.getKey(), entry.getValue()); } for (Map.Entry<ReportPayload, ConcurrentDoubleHistogram> entry : this.clientTelemetryInfo.getCacheRefreshInfoMap().entrySet()) { fillMetricsInfo(entry.getKey(), entry.getValue()); } for (Map.Entry<ReportPayload, ConcurrentDoubleHistogram> entry : this.clientTelemetryInfo.getOperationInfoMap().entrySet()) { fillMetricsInfo(entry.getKey(), entry.getValue()); } } private void fillMetricsInfo(ReportPayload payload, DoubleHistogram histogram) { payload.getMetricInfo().setCount(histogram.getTotalCount()); payload.getMetricInfo().setMax(histogram.getMaxValue()); payload.getMetricInfo().setMin(histogram.getMinValue()); payload.getMetricInfo().setMean(histogram.getMean()); Map<Double, Double> percentile = new HashMap<>(); percentile.put(PERCENTILE_50, histogram.getValueAtPercentile(PERCENTILE_50)); percentile.put(PERCENTILE_90, histogram.getValueAtPercentile(PERCENTILE_90)); percentile.put(PERCENTILE_95, histogram.getValueAtPercentile(PERCENTILE_95)); percentile.put(PERCENTILE_99, histogram.getValueAtPercentile(PERCENTILE_99)); percentile.put(PERCENTILE_999, histogram.getValueAtPercentile(PERCENTILE_999)); payload.getMetricInfo().setPercentiles(percentile); } private static class DaemonThreadFactory implements ThreadFactory { @Override public Thread newThread(Runnable r) { Thread t = new Thread(r); t.setDaemon(true); return t; } } }
`FAILED_CLOSE_CONSUMER_PARTITION=Failed to close consumer for partition {}` The string has only one parameter but this call gives two.
void verifyPartitionConnection(PartitionOwnership ownership) { String partitionId = ownership.getPartitionId(); if (partitionPumps.containsKey(partitionId)) { EventHubConsumerAsyncClient consumerClient = partitionPumps.get(partitionId); if (consumerClient.isConnectionClosed()) { logger.info("Connection closed for {}, partition {}. Removing the consumer.", ownership.getEventHubName(), partitionId); try { partitionPumps.get(partitionId).close(); } catch (Exception ex) { logger.warning(Messages.FAILED_CLOSE_CONSUMER_PARTITION, partitionId, ex); } finally { partitionPumps.remove(partitionId); } } } }
logger.warning(Messages.FAILED_CLOSE_CONSUMER_PARTITION, partitionId, ex);
void verifyPartitionConnection(PartitionOwnership ownership) { String partitionId = ownership.getPartitionId(); if (partitionPumps.containsKey(partitionId)) { EventHubConsumerAsyncClient consumerClient = partitionPumps.get(partitionId); if (consumerClient.isConnectionClosed()) { logger.info("Connection closed for {}, partition {}. Removing the consumer.", ownership.getEventHubName(), partitionId); try { partitionPumps.get(partitionId).close(); } catch (Exception ex) { logger.warning(Messages.FAILED_CLOSE_CONSUMER_PARTITION, partitionId, ex); } finally { partitionPumps.remove(partitionId); } } } }
class PartitionPumpManager { private final ClientLogger logger = new ClientLogger(PartitionPumpManager.class); private final CheckpointStore checkpointStore; private final Map<String, EventHubConsumerAsyncClient> partitionPumps = new ConcurrentHashMap<>(); private final Supplier<PartitionProcessor> partitionProcessorFactory; private final EventHubClientBuilder eventHubClientBuilder; private final TracerProvider tracerProvider; private final boolean trackLastEnqueuedEventProperties; private final Map<String, EventPosition> initialPartitionEventPosition; private final Duration maxWaitTime; private final int maxBatchSize; private final boolean batchReceiveMode; /** * Creates an instance of partition pump manager. * * @param checkpointStore The partition manager that is used to store and update checkpoints. * @param partitionProcessorFactory The partition processor factory that is used to create new instances of {@link * PartitionProcessor} when new partition pumps are started. * @param eventHubClientBuilder The client builder used to create new clients (and new connections) for each * partition processed by this {@link EventProcessorClient}. * @param trackLastEnqueuedEventProperties If set to {@code true}, all events received by this EventProcessorClient * will also include the last enqueued event properties for it's respective partitions. * @param tracerProvider The tracer implementation. * @param initialPartitionEventPosition Map of initial event positions for partition ids. * @param maxBatchSize The maximum batch size to receive per users' process handler invocation. * @param maxWaitTime The maximum time to wait to receive a batch or a single event. * @param batchReceiveMode The boolean value indicating if this processor is configured to receive in batches or * single events. */ PartitionPumpManager(CheckpointStore checkpointStore, Supplier<PartitionProcessor> partitionProcessorFactory, EventHubClientBuilder eventHubClientBuilder, boolean trackLastEnqueuedEventProperties, TracerProvider tracerProvider, Map<String, EventPosition> initialPartitionEventPosition, int maxBatchSize, Duration maxWaitTime, boolean batchReceiveMode) { this.checkpointStore = checkpointStore; this.partitionProcessorFactory = partitionProcessorFactory; this.eventHubClientBuilder = eventHubClientBuilder; this.trackLastEnqueuedEventProperties = trackLastEnqueuedEventProperties; this.tracerProvider = tracerProvider; this.initialPartitionEventPosition = initialPartitionEventPosition; this.maxBatchSize = maxBatchSize; this.maxWaitTime = maxWaitTime; this.batchReceiveMode = batchReceiveMode; } /** * Stops all partition pumps that are actively consuming events. This method is invoked when the {@link * EventProcessorClient} is requested to stop. */ void stopAllPartitionPumps() { this.partitionPumps.forEach((partitionId, eventHubConsumer) -> { try { eventHubConsumer.close(); } catch (Exception ex) { logger.warning(Messages.FAILED_CLOSE_CONSUMER_PARTITION, partitionId, ex); } finally { partitionPumps.remove(partitionId); } }); } /** * Checks the state of the connection for the given partition. If the connection is closed, then this method will * remove the partition from the list of partition pumps. * * @param ownership The partition ownership information for which the connection state will be verified. */ /** * Starts a new partition pump for the newly claimed partition. If the partition already has an active partition * pump, this will not create a new consumer. * * @param claimedOwnership The details of partition ownership for which new partition pump is requested to start. */ void startPartitionPump(PartitionOwnership claimedOwnership, Checkpoint checkpoint) { if (partitionPumps.containsKey(claimedOwnership.getPartitionId())) { logger.verbose("Consumer is already running for this partition {}", claimedOwnership.getPartitionId()); return; } try { PartitionContext partitionContext = new PartitionContext(claimedOwnership.getFullyQualifiedNamespace(), claimedOwnership.getEventHubName(), claimedOwnership.getConsumerGroup(), claimedOwnership.getPartitionId()); PartitionProcessor partitionProcessor = this.partitionProcessorFactory.get(); InitializationContext initializationContext = new InitializationContext(partitionContext); partitionProcessor.initialize(initializationContext); EventPosition startFromEventPosition = null; if (checkpoint != null && checkpoint.getOffset() != null) { startFromEventPosition = EventPosition.fromOffset(checkpoint.getOffset()); } else if (checkpoint != null && checkpoint.getSequenceNumber() != null) { startFromEventPosition = EventPosition.fromSequenceNumber(checkpoint.getSequenceNumber()); } else if (initialPartitionEventPosition.containsKey(claimedOwnership.getPartitionId())) { startFromEventPosition = initialPartitionEventPosition.get(claimedOwnership.getPartitionId()); } else { startFromEventPosition = EventPosition.latest(); } logger.info("Starting event processing from {} for partition {}", startFromEventPosition, claimedOwnership.getPartitionId()); ReceiveOptions receiveOptions = new ReceiveOptions().setOwnerLevel(0L) .setTrackLastEnqueuedEventProperties(trackLastEnqueuedEventProperties); EventHubConsumerAsyncClient eventHubConsumer = eventHubClientBuilder.buildAsyncClient() .createConsumer(claimedOwnership.getConsumerGroup(), EventHubClientBuilder.DEFAULT_PREFETCH_COUNT); partitionPumps.put(claimedOwnership.getPartitionId(), eventHubConsumer); Flux<Flux<PartitionEvent>> partitionEventFlux; Flux<PartitionEvent> receiver = eventHubConsumer .receiveFromPartition(claimedOwnership.getPartitionId(), startFromEventPosition, receiveOptions) .doOnNext(partitionEvent -> { if (logger.canLogAtLevel(LogLevel.VERBOSE)) { logger.verbose("On next {}, {}, {}", partitionContext.getEventHubName(), partitionContext.getPartitionId(), partitionEvent.getData().getSequenceNumber()); } }); if (maxWaitTime != null) { partitionEventFlux = receiver .windowTimeout(maxBatchSize, maxWaitTime); } else { partitionEventFlux = receiver .window(maxBatchSize); } partitionEventFlux .concatMap(Flux::collectList) .publishOn(Schedulers.boundedElastic()) .subscribe(partitionEventBatch -> { processEvents(partitionContext, partitionProcessor, eventHubConsumer, partitionEventBatch); }, /* EventHubConsumer receive() returned an error */ ex -> handleError(claimedOwnership, eventHubConsumer, partitionProcessor, ex, partitionContext), () -> { partitionProcessor.close(new CloseContext(partitionContext, CloseReason.EVENT_PROCESSOR_SHUTDOWN)); cleanup(claimedOwnership, eventHubConsumer); }); } catch (Exception ex) { if (partitionPumps.containsKey(claimedOwnership.getPartitionId())) { cleanup(claimedOwnership, partitionPumps.get(claimedOwnership.getPartitionId())); } throw logger.logExceptionAsError( new PartitionProcessorException( "Error occurred while starting partition pump for partition " + claimedOwnership.getPartitionId(), ex)); } } private void processEvent(PartitionContext partitionContext, PartitionProcessor partitionProcessor, EventHubConsumerAsyncClient eventHubConsumer, EventContext eventContext) { Context processSpanContext = null; EventData eventData = eventContext.getEventData(); if (eventData != null) { processSpanContext = startProcessTracingSpan(eventData, eventHubConsumer.getEventHubName(), eventHubConsumer.getFullyQualifiedNamespace()); if (processSpanContext.getData(SPAN_CONTEXT_KEY).isPresent()) { eventData.addContext(SPAN_CONTEXT_KEY, processSpanContext); } } try { if (logger.canLogAtLevel(LogLevel.VERBOSE)) { logger.verbose("Processing event {}, {}", partitionContext.getEventHubName(), partitionContext.getPartitionId()); } partitionProcessor.processEvent(new EventContext(partitionContext, eventData, checkpointStore, eventContext.getLastEnqueuedEventProperties())); if (logger.canLogAtLevel(LogLevel.VERBOSE)) { logger.verbose("Completed processing event {}, {}", partitionContext.getEventHubName(), partitionContext.getPartitionId()); } endProcessTracingSpan(processSpanContext, Signal.complete()); } catch (Throwable throwable) { /* user code for event processing threw an exception - log and bubble up */ endProcessTracingSpan(processSpanContext, Signal.error(throwable)); throw logger.logExceptionAsError(new PartitionProcessorException("Error in event processing callback", throwable)); } } private void processEvents(PartitionContext partitionContext, PartitionProcessor partitionProcessor, EventHubConsumerAsyncClient eventHubConsumer, List<PartitionEvent> partitionEventBatch) { try { if (batchReceiveMode) { LastEnqueuedEventProperties[] lastEnqueuedEventProperties = new LastEnqueuedEventProperties[1]; List<EventData> eventDataList = partitionEventBatch.stream() .map(partitionEvent -> { lastEnqueuedEventProperties[0] = partitionEvent.getLastEnqueuedEventProperties(); return partitionEvent.getData(); }) .collect(Collectors.toList()); EventBatchContext eventBatchContext = new EventBatchContext(partitionContext, eventDataList, checkpointStore, lastEnqueuedEventProperties[0]); if (logger.canLogAtLevel(LogLevel.VERBOSE)) { logger.verbose("Processing event batch {}, {}", partitionContext.getEventHubName(), partitionContext.getPartitionId()); } partitionProcessor.processEventBatch(eventBatchContext); if (logger.canLogAtLevel(LogLevel.VERBOSE)) { logger.verbose("Completed processing event batch{}, {}", partitionContext.getEventHubName(), partitionContext.getPartitionId()); } } else { EventData eventData = (partitionEventBatch.size() == 1 ? partitionEventBatch.get(0).getData() : null); LastEnqueuedEventProperties lastEnqueuedEventProperties = (partitionEventBatch.size() == 1 ? partitionEventBatch.get(0).getLastEnqueuedEventProperties() : null); EventContext eventContext = new EventContext(partitionContext, eventData, checkpointStore, lastEnqueuedEventProperties); processEvent(partitionContext, partitionProcessor, eventHubConsumer, eventContext); } } catch (Throwable throwable) { /* user code for event processing threw an exception - log and bubble up */ throw logger.logExceptionAsError(new PartitionProcessorException("Error in event processing callback", throwable)); } } Map<String, EventHubConsumerAsyncClient> getPartitionPumps() { return this.partitionPumps; } private void handleError(PartitionOwnership claimedOwnership, EventHubConsumerAsyncClient eventHubConsumer, PartitionProcessor partitionProcessor, Throwable throwable, PartitionContext partitionContext) { boolean shouldRethrow = true; if (!(throwable instanceof PartitionProcessorException)) { shouldRethrow = false; logger.warning("Error receiving events from partition {}", partitionContext.getPartitionId(), throwable); partitionProcessor.processError(new ErrorContext(partitionContext, throwable)); } CloseReason closeReason = CloseReason.LOST_PARTITION_OWNERSHIP; partitionProcessor.close(new CloseContext(partitionContext, closeReason)); cleanup(claimedOwnership, eventHubConsumer); if (shouldRethrow) { PartitionProcessorException exception = (PartitionProcessorException) throwable; throw logger.logExceptionAsError(exception); } } private void cleanup(PartitionOwnership claimedOwnership, EventHubConsumerAsyncClient eventHubConsumer) { try { logger.info("Closing consumer for partition id {}", claimedOwnership.getPartitionId()); eventHubConsumer.close(); } finally { logger.info("Removing partition id {} from list of processing partitions", claimedOwnership.getPartitionId()); partitionPumps.remove(claimedOwnership.getPartitionId()); } } /* * Starts a new process tracing span and attaches the returned context to the EventData object for users. */ private Context startProcessTracingSpan(EventData eventData, String eventHubName, String fullyQualifiedNamespace) { Object diagnosticId = eventData.getProperties().get(DIAGNOSTIC_ID_KEY); if (diagnosticId == null || !tracerProvider.isEnabled()) { return Context.NONE; } Context spanContext = tracerProvider.extractContext(diagnosticId.toString(), Context.NONE) .addData(ENTITY_PATH_KEY, eventHubName) .addData(HOST_NAME_KEY, fullyQualifiedNamespace) .addData(AZ_TRACING_NAMESPACE_KEY, AZ_NAMESPACE_VALUE); spanContext = eventData.getEnqueuedTime() == null ? spanContext : spanContext.addData(MESSAGE_ENQUEUED_TIME, eventData.getEnqueuedTime().getEpochSecond()); return tracerProvider.startSpan(AZ_TRACING_SERVICE_NAME, spanContext, ProcessKind.PROCESS); } /* * Ends the process tracing span and the scope of that span. */ private void endProcessTracingSpan(Context processSpanContext, Signal<Void> signal) { if (processSpanContext == null) { return; } Optional<Object> spanScope = processSpanContext.getData(SCOPE_KEY); if (!spanScope.isPresent() || !tracerProvider.isEnabled()) { return; } if (spanScope.get() instanceof Closeable) { Closeable close = (Closeable) processSpanContext.getData(SCOPE_KEY).get(); try { close.close(); tracerProvider.endSpan(processSpanContext, signal); } catch (IOException ioException) { logger.error(Messages.EVENT_PROCESSOR_RUN_END, ioException); } } else { logger.warning(String.format(Locale.US, Messages.PROCESS_SPAN_SCOPE_TYPE_ERROR, spanScope.get() != null ? spanScope.getClass() : "null")); } } }
class PartitionPumpManager { private final ClientLogger logger = new ClientLogger(PartitionPumpManager.class); private final CheckpointStore checkpointStore; private final Map<String, EventHubConsumerAsyncClient> partitionPumps = new ConcurrentHashMap<>(); private final Supplier<PartitionProcessor> partitionProcessorFactory; private final EventHubClientBuilder eventHubClientBuilder; private final TracerProvider tracerProvider; private final boolean trackLastEnqueuedEventProperties; private final Map<String, EventPosition> initialPartitionEventPosition; private final Duration maxWaitTime; private final int maxBatchSize; private final boolean batchReceiveMode; /** * Creates an instance of partition pump manager. * * @param checkpointStore The partition manager that is used to store and update checkpoints. * @param partitionProcessorFactory The partition processor factory that is used to create new instances of {@link * PartitionProcessor} when new partition pumps are started. * @param eventHubClientBuilder The client builder used to create new clients (and new connections) for each * partition processed by this {@link EventProcessorClient}. * @param trackLastEnqueuedEventProperties If set to {@code true}, all events received by this EventProcessorClient * will also include the last enqueued event properties for it's respective partitions. * @param tracerProvider The tracer implementation. * @param initialPartitionEventPosition Map of initial event positions for partition ids. * @param maxBatchSize The maximum batch size to receive per users' process handler invocation. * @param maxWaitTime The maximum time to wait to receive a batch or a single event. * @param batchReceiveMode The boolean value indicating if this processor is configured to receive in batches or * single events. */ PartitionPumpManager(CheckpointStore checkpointStore, Supplier<PartitionProcessor> partitionProcessorFactory, EventHubClientBuilder eventHubClientBuilder, boolean trackLastEnqueuedEventProperties, TracerProvider tracerProvider, Map<String, EventPosition> initialPartitionEventPosition, int maxBatchSize, Duration maxWaitTime, boolean batchReceiveMode) { this.checkpointStore = checkpointStore; this.partitionProcessorFactory = partitionProcessorFactory; this.eventHubClientBuilder = eventHubClientBuilder; this.trackLastEnqueuedEventProperties = trackLastEnqueuedEventProperties; this.tracerProvider = tracerProvider; this.initialPartitionEventPosition = initialPartitionEventPosition; this.maxBatchSize = maxBatchSize; this.maxWaitTime = maxWaitTime; this.batchReceiveMode = batchReceiveMode; } /** * Stops all partition pumps that are actively consuming events. This method is invoked when the {@link * EventProcessorClient} is requested to stop. */ void stopAllPartitionPumps() { this.partitionPumps.forEach((partitionId, eventHubConsumer) -> { try { eventHubConsumer.close(); } catch (Exception ex) { logger.warning(Messages.FAILED_CLOSE_CONSUMER_PARTITION, partitionId, ex); } finally { partitionPumps.remove(partitionId); } }); } /** * Checks the state of the connection for the given partition. If the connection is closed, then this method will * remove the partition from the list of partition pumps. * * @param ownership The partition ownership information for which the connection state will be verified. */ /** * Starts a new partition pump for the newly claimed partition. If the partition already has an active partition * pump, this will not create a new consumer. * * @param claimedOwnership The details of partition ownership for which new partition pump is requested to start. */ void startPartitionPump(PartitionOwnership claimedOwnership, Checkpoint checkpoint) { if (partitionPumps.containsKey(claimedOwnership.getPartitionId())) { logger.verbose("Consumer is already running for this partition {}", claimedOwnership.getPartitionId()); return; } try { PartitionContext partitionContext = new PartitionContext(claimedOwnership.getFullyQualifiedNamespace(), claimedOwnership.getEventHubName(), claimedOwnership.getConsumerGroup(), claimedOwnership.getPartitionId()); PartitionProcessor partitionProcessor = this.partitionProcessorFactory.get(); InitializationContext initializationContext = new InitializationContext(partitionContext); partitionProcessor.initialize(initializationContext); EventPosition startFromEventPosition = null; if (checkpoint != null && checkpoint.getOffset() != null) { startFromEventPosition = EventPosition.fromOffset(checkpoint.getOffset()); } else if (checkpoint != null && checkpoint.getSequenceNumber() != null) { startFromEventPosition = EventPosition.fromSequenceNumber(checkpoint.getSequenceNumber()); } else if (initialPartitionEventPosition.containsKey(claimedOwnership.getPartitionId())) { startFromEventPosition = initialPartitionEventPosition.get(claimedOwnership.getPartitionId()); } else { startFromEventPosition = EventPosition.latest(); } logger.info("Starting event processing from {} for partition {}", startFromEventPosition, claimedOwnership.getPartitionId()); ReceiveOptions receiveOptions = new ReceiveOptions().setOwnerLevel(0L) .setTrackLastEnqueuedEventProperties(trackLastEnqueuedEventProperties); EventHubConsumerAsyncClient eventHubConsumer = eventHubClientBuilder.buildAsyncClient() .createConsumer(claimedOwnership.getConsumerGroup(), EventHubClientBuilder.DEFAULT_PREFETCH_COUNT); partitionPumps.put(claimedOwnership.getPartitionId(), eventHubConsumer); Flux<Flux<PartitionEvent>> partitionEventFlux; Flux<PartitionEvent> receiver = eventHubConsumer .receiveFromPartition(claimedOwnership.getPartitionId(), startFromEventPosition, receiveOptions) .doOnNext(partitionEvent -> { if (logger.canLogAtLevel(LogLevel.VERBOSE)) { logger.verbose("On next {}, {}, {}", partitionContext.getEventHubName(), partitionContext.getPartitionId(), partitionEvent.getData().getSequenceNumber()); } }); if (maxWaitTime != null) { partitionEventFlux = receiver .windowTimeout(maxBatchSize, maxWaitTime); } else { partitionEventFlux = receiver .window(maxBatchSize); } partitionEventFlux .concatMap(Flux::collectList) .publishOn(Schedulers.boundedElastic()) .subscribe(partitionEventBatch -> { processEvents(partitionContext, partitionProcessor, eventHubConsumer, partitionEventBatch); }, /* EventHubConsumer receive() returned an error */ ex -> handleError(claimedOwnership, eventHubConsumer, partitionProcessor, ex, partitionContext), () -> { partitionProcessor.close(new CloseContext(partitionContext, CloseReason.EVENT_PROCESSOR_SHUTDOWN)); cleanup(claimedOwnership, eventHubConsumer); }); } catch (Exception ex) { if (partitionPumps.containsKey(claimedOwnership.getPartitionId())) { cleanup(claimedOwnership, partitionPumps.get(claimedOwnership.getPartitionId())); } throw logger.logExceptionAsError( new PartitionProcessorException( "Error occurred while starting partition pump for partition " + claimedOwnership.getPartitionId(), ex)); } } private void processEvent(PartitionContext partitionContext, PartitionProcessor partitionProcessor, EventHubConsumerAsyncClient eventHubConsumer, EventContext eventContext) { Context processSpanContext = null; EventData eventData = eventContext.getEventData(); if (eventData != null) { processSpanContext = startProcessTracingSpan(eventData, eventHubConsumer.getEventHubName(), eventHubConsumer.getFullyQualifiedNamespace()); if (processSpanContext.getData(SPAN_CONTEXT_KEY).isPresent()) { eventData.addContext(SPAN_CONTEXT_KEY, processSpanContext); } } try { if (logger.canLogAtLevel(LogLevel.VERBOSE)) { logger.verbose("Processing event {}, {}", partitionContext.getEventHubName(), partitionContext.getPartitionId()); } partitionProcessor.processEvent(new EventContext(partitionContext, eventData, checkpointStore, eventContext.getLastEnqueuedEventProperties())); if (logger.canLogAtLevel(LogLevel.VERBOSE)) { logger.verbose("Completed processing event {}, {}", partitionContext.getEventHubName(), partitionContext.getPartitionId()); } endProcessTracingSpan(processSpanContext, Signal.complete()); } catch (Throwable throwable) { /* user code for event processing threw an exception - log and bubble up */ endProcessTracingSpan(processSpanContext, Signal.error(throwable)); throw logger.logExceptionAsError(new PartitionProcessorException("Error in event processing callback", throwable)); } } private void processEvents(PartitionContext partitionContext, PartitionProcessor partitionProcessor, EventHubConsumerAsyncClient eventHubConsumer, List<PartitionEvent> partitionEventBatch) { try { if (batchReceiveMode) { LastEnqueuedEventProperties[] lastEnqueuedEventProperties = new LastEnqueuedEventProperties[1]; List<EventData> eventDataList = partitionEventBatch.stream() .map(partitionEvent -> { lastEnqueuedEventProperties[0] = partitionEvent.getLastEnqueuedEventProperties(); return partitionEvent.getData(); }) .collect(Collectors.toList()); EventBatchContext eventBatchContext = new EventBatchContext(partitionContext, eventDataList, checkpointStore, lastEnqueuedEventProperties[0]); if (logger.canLogAtLevel(LogLevel.VERBOSE)) { logger.verbose("Processing event batch {}, {}", partitionContext.getEventHubName(), partitionContext.getPartitionId()); } partitionProcessor.processEventBatch(eventBatchContext); if (logger.canLogAtLevel(LogLevel.VERBOSE)) { logger.verbose("Completed processing event batch{}, {}", partitionContext.getEventHubName(), partitionContext.getPartitionId()); } } else { EventData eventData = (partitionEventBatch.size() == 1 ? partitionEventBatch.get(0).getData() : null); LastEnqueuedEventProperties lastEnqueuedEventProperties = (partitionEventBatch.size() == 1 ? partitionEventBatch.get(0).getLastEnqueuedEventProperties() : null); EventContext eventContext = new EventContext(partitionContext, eventData, checkpointStore, lastEnqueuedEventProperties); processEvent(partitionContext, partitionProcessor, eventHubConsumer, eventContext); } } catch (Throwable throwable) { /* user code for event processing threw an exception - log and bubble up */ throw logger.logExceptionAsError(new PartitionProcessorException("Error in event processing callback", throwable)); } } Map<String, EventHubConsumerAsyncClient> getPartitionPumps() { return this.partitionPumps; } private void handleError(PartitionOwnership claimedOwnership, EventHubConsumerAsyncClient eventHubConsumer, PartitionProcessor partitionProcessor, Throwable throwable, PartitionContext partitionContext) { boolean shouldRethrow = true; if (!(throwable instanceof PartitionProcessorException)) { shouldRethrow = false; logger.warning("Error receiving events from partition {}", partitionContext.getPartitionId(), throwable); partitionProcessor.processError(new ErrorContext(partitionContext, throwable)); } CloseReason closeReason = CloseReason.LOST_PARTITION_OWNERSHIP; partitionProcessor.close(new CloseContext(partitionContext, closeReason)); cleanup(claimedOwnership, eventHubConsumer); if (shouldRethrow) { PartitionProcessorException exception = (PartitionProcessorException) throwable; throw logger.logExceptionAsError(exception); } } private void cleanup(PartitionOwnership claimedOwnership, EventHubConsumerAsyncClient eventHubConsumer) { try { logger.info("Closing consumer for partition id {}", claimedOwnership.getPartitionId()); eventHubConsumer.close(); } finally { logger.info("Removing partition id {} from list of processing partitions", claimedOwnership.getPartitionId()); partitionPumps.remove(claimedOwnership.getPartitionId()); } } /* * Starts a new process tracing span and attaches the returned context to the EventData object for users. */ private Context startProcessTracingSpan(EventData eventData, String eventHubName, String fullyQualifiedNamespace) { Object diagnosticId = eventData.getProperties().get(DIAGNOSTIC_ID_KEY); if (diagnosticId == null || !tracerProvider.isEnabled()) { return Context.NONE; } Context spanContext = tracerProvider.extractContext(diagnosticId.toString(), Context.NONE) .addData(ENTITY_PATH_KEY, eventHubName) .addData(HOST_NAME_KEY, fullyQualifiedNamespace) .addData(AZ_TRACING_NAMESPACE_KEY, AZ_NAMESPACE_VALUE); spanContext = eventData.getEnqueuedTime() == null ? spanContext : spanContext.addData(MESSAGE_ENQUEUED_TIME, eventData.getEnqueuedTime().getEpochSecond()); return tracerProvider.startSpan(AZ_TRACING_SERVICE_NAME, spanContext, ProcessKind.PROCESS); } /* * Ends the process tracing span and the scope of that span. */ private void endProcessTracingSpan(Context processSpanContext, Signal<Void> signal) { if (processSpanContext == null) { return; } Optional<Object> spanScope = processSpanContext.getData(SCOPE_KEY); if (!spanScope.isPresent() || !tracerProvider.isEnabled()) { return; } if (spanScope.get() instanceof Closeable) { Closeable close = (Closeable) processSpanContext.getData(SCOPE_KEY).get(); try { close.close(); tracerProvider.endSpan(processSpanContext, signal); } catch (IOException ioException) { logger.error(Messages.EVENT_PROCESSOR_RUN_END, ioException); } } else { logger.warning(String.format(Locale.US, Messages.PROCESS_SPAN_SCOPE_TYPE_ERROR, spanScope.get() != null ? spanScope.getClass() : "null")); } } }
For logs, the last arg can be an exception that isn't part of the message param.
void verifyPartitionConnection(PartitionOwnership ownership) { String partitionId = ownership.getPartitionId(); if (partitionPumps.containsKey(partitionId)) { EventHubConsumerAsyncClient consumerClient = partitionPumps.get(partitionId); if (consumerClient.isConnectionClosed()) { logger.info("Connection closed for {}, partition {}. Removing the consumer.", ownership.getEventHubName(), partitionId); try { partitionPumps.get(partitionId).close(); } catch (Exception ex) { logger.warning(Messages.FAILED_CLOSE_CONSUMER_PARTITION, partitionId, ex); } finally { partitionPumps.remove(partitionId); } } } }
logger.warning(Messages.FAILED_CLOSE_CONSUMER_PARTITION, partitionId, ex);
void verifyPartitionConnection(PartitionOwnership ownership) { String partitionId = ownership.getPartitionId(); if (partitionPumps.containsKey(partitionId)) { EventHubConsumerAsyncClient consumerClient = partitionPumps.get(partitionId); if (consumerClient.isConnectionClosed()) { logger.info("Connection closed for {}, partition {}. Removing the consumer.", ownership.getEventHubName(), partitionId); try { partitionPumps.get(partitionId).close(); } catch (Exception ex) { logger.warning(Messages.FAILED_CLOSE_CONSUMER_PARTITION, partitionId, ex); } finally { partitionPumps.remove(partitionId); } } } }
class PartitionPumpManager { private final ClientLogger logger = new ClientLogger(PartitionPumpManager.class); private final CheckpointStore checkpointStore; private final Map<String, EventHubConsumerAsyncClient> partitionPumps = new ConcurrentHashMap<>(); private final Supplier<PartitionProcessor> partitionProcessorFactory; private final EventHubClientBuilder eventHubClientBuilder; private final TracerProvider tracerProvider; private final boolean trackLastEnqueuedEventProperties; private final Map<String, EventPosition> initialPartitionEventPosition; private final Duration maxWaitTime; private final int maxBatchSize; private final boolean batchReceiveMode; /** * Creates an instance of partition pump manager. * * @param checkpointStore The partition manager that is used to store and update checkpoints. * @param partitionProcessorFactory The partition processor factory that is used to create new instances of {@link * PartitionProcessor} when new partition pumps are started. * @param eventHubClientBuilder The client builder used to create new clients (and new connections) for each * partition processed by this {@link EventProcessorClient}. * @param trackLastEnqueuedEventProperties If set to {@code true}, all events received by this EventProcessorClient * will also include the last enqueued event properties for it's respective partitions. * @param tracerProvider The tracer implementation. * @param initialPartitionEventPosition Map of initial event positions for partition ids. * @param maxBatchSize The maximum batch size to receive per users' process handler invocation. * @param maxWaitTime The maximum time to wait to receive a batch or a single event. * @param batchReceiveMode The boolean value indicating if this processor is configured to receive in batches or * single events. */ PartitionPumpManager(CheckpointStore checkpointStore, Supplier<PartitionProcessor> partitionProcessorFactory, EventHubClientBuilder eventHubClientBuilder, boolean trackLastEnqueuedEventProperties, TracerProvider tracerProvider, Map<String, EventPosition> initialPartitionEventPosition, int maxBatchSize, Duration maxWaitTime, boolean batchReceiveMode) { this.checkpointStore = checkpointStore; this.partitionProcessorFactory = partitionProcessorFactory; this.eventHubClientBuilder = eventHubClientBuilder; this.trackLastEnqueuedEventProperties = trackLastEnqueuedEventProperties; this.tracerProvider = tracerProvider; this.initialPartitionEventPosition = initialPartitionEventPosition; this.maxBatchSize = maxBatchSize; this.maxWaitTime = maxWaitTime; this.batchReceiveMode = batchReceiveMode; } /** * Stops all partition pumps that are actively consuming events. This method is invoked when the {@link * EventProcessorClient} is requested to stop. */ void stopAllPartitionPumps() { this.partitionPumps.forEach((partitionId, eventHubConsumer) -> { try { eventHubConsumer.close(); } catch (Exception ex) { logger.warning(Messages.FAILED_CLOSE_CONSUMER_PARTITION, partitionId, ex); } finally { partitionPumps.remove(partitionId); } }); } /** * Checks the state of the connection for the given partition. If the connection is closed, then this method will * remove the partition from the list of partition pumps. * * @param ownership The partition ownership information for which the connection state will be verified. */ /** * Starts a new partition pump for the newly claimed partition. If the partition already has an active partition * pump, this will not create a new consumer. * * @param claimedOwnership The details of partition ownership for which new partition pump is requested to start. */ void startPartitionPump(PartitionOwnership claimedOwnership, Checkpoint checkpoint) { if (partitionPumps.containsKey(claimedOwnership.getPartitionId())) { logger.verbose("Consumer is already running for this partition {}", claimedOwnership.getPartitionId()); return; } try { PartitionContext partitionContext = new PartitionContext(claimedOwnership.getFullyQualifiedNamespace(), claimedOwnership.getEventHubName(), claimedOwnership.getConsumerGroup(), claimedOwnership.getPartitionId()); PartitionProcessor partitionProcessor = this.partitionProcessorFactory.get(); InitializationContext initializationContext = new InitializationContext(partitionContext); partitionProcessor.initialize(initializationContext); EventPosition startFromEventPosition = null; if (checkpoint != null && checkpoint.getOffset() != null) { startFromEventPosition = EventPosition.fromOffset(checkpoint.getOffset()); } else if (checkpoint != null && checkpoint.getSequenceNumber() != null) { startFromEventPosition = EventPosition.fromSequenceNumber(checkpoint.getSequenceNumber()); } else if (initialPartitionEventPosition.containsKey(claimedOwnership.getPartitionId())) { startFromEventPosition = initialPartitionEventPosition.get(claimedOwnership.getPartitionId()); } else { startFromEventPosition = EventPosition.latest(); } logger.info("Starting event processing from {} for partition {}", startFromEventPosition, claimedOwnership.getPartitionId()); ReceiveOptions receiveOptions = new ReceiveOptions().setOwnerLevel(0L) .setTrackLastEnqueuedEventProperties(trackLastEnqueuedEventProperties); EventHubConsumerAsyncClient eventHubConsumer = eventHubClientBuilder.buildAsyncClient() .createConsumer(claimedOwnership.getConsumerGroup(), EventHubClientBuilder.DEFAULT_PREFETCH_COUNT); partitionPumps.put(claimedOwnership.getPartitionId(), eventHubConsumer); Flux<Flux<PartitionEvent>> partitionEventFlux; Flux<PartitionEvent> receiver = eventHubConsumer .receiveFromPartition(claimedOwnership.getPartitionId(), startFromEventPosition, receiveOptions) .doOnNext(partitionEvent -> { if (logger.canLogAtLevel(LogLevel.VERBOSE)) { logger.verbose("On next {}, {}, {}", partitionContext.getEventHubName(), partitionContext.getPartitionId(), partitionEvent.getData().getSequenceNumber()); } }); if (maxWaitTime != null) { partitionEventFlux = receiver .windowTimeout(maxBatchSize, maxWaitTime); } else { partitionEventFlux = receiver .window(maxBatchSize); } partitionEventFlux .concatMap(Flux::collectList) .publishOn(Schedulers.boundedElastic()) .subscribe(partitionEventBatch -> { processEvents(partitionContext, partitionProcessor, eventHubConsumer, partitionEventBatch); }, /* EventHubConsumer receive() returned an error */ ex -> handleError(claimedOwnership, eventHubConsumer, partitionProcessor, ex, partitionContext), () -> { partitionProcessor.close(new CloseContext(partitionContext, CloseReason.EVENT_PROCESSOR_SHUTDOWN)); cleanup(claimedOwnership, eventHubConsumer); }); } catch (Exception ex) { if (partitionPumps.containsKey(claimedOwnership.getPartitionId())) { cleanup(claimedOwnership, partitionPumps.get(claimedOwnership.getPartitionId())); } throw logger.logExceptionAsError( new PartitionProcessorException( "Error occurred while starting partition pump for partition " + claimedOwnership.getPartitionId(), ex)); } } private void processEvent(PartitionContext partitionContext, PartitionProcessor partitionProcessor, EventHubConsumerAsyncClient eventHubConsumer, EventContext eventContext) { Context processSpanContext = null; EventData eventData = eventContext.getEventData(); if (eventData != null) { processSpanContext = startProcessTracingSpan(eventData, eventHubConsumer.getEventHubName(), eventHubConsumer.getFullyQualifiedNamespace()); if (processSpanContext.getData(SPAN_CONTEXT_KEY).isPresent()) { eventData.addContext(SPAN_CONTEXT_KEY, processSpanContext); } } try { if (logger.canLogAtLevel(LogLevel.VERBOSE)) { logger.verbose("Processing event {}, {}", partitionContext.getEventHubName(), partitionContext.getPartitionId()); } partitionProcessor.processEvent(new EventContext(partitionContext, eventData, checkpointStore, eventContext.getLastEnqueuedEventProperties())); if (logger.canLogAtLevel(LogLevel.VERBOSE)) { logger.verbose("Completed processing event {}, {}", partitionContext.getEventHubName(), partitionContext.getPartitionId()); } endProcessTracingSpan(processSpanContext, Signal.complete()); } catch (Throwable throwable) { /* user code for event processing threw an exception - log and bubble up */ endProcessTracingSpan(processSpanContext, Signal.error(throwable)); throw logger.logExceptionAsError(new PartitionProcessorException("Error in event processing callback", throwable)); } } private void processEvents(PartitionContext partitionContext, PartitionProcessor partitionProcessor, EventHubConsumerAsyncClient eventHubConsumer, List<PartitionEvent> partitionEventBatch) { try { if (batchReceiveMode) { LastEnqueuedEventProperties[] lastEnqueuedEventProperties = new LastEnqueuedEventProperties[1]; List<EventData> eventDataList = partitionEventBatch.stream() .map(partitionEvent -> { lastEnqueuedEventProperties[0] = partitionEvent.getLastEnqueuedEventProperties(); return partitionEvent.getData(); }) .collect(Collectors.toList()); EventBatchContext eventBatchContext = new EventBatchContext(partitionContext, eventDataList, checkpointStore, lastEnqueuedEventProperties[0]); if (logger.canLogAtLevel(LogLevel.VERBOSE)) { logger.verbose("Processing event batch {}, {}", partitionContext.getEventHubName(), partitionContext.getPartitionId()); } partitionProcessor.processEventBatch(eventBatchContext); if (logger.canLogAtLevel(LogLevel.VERBOSE)) { logger.verbose("Completed processing event batch{}, {}", partitionContext.getEventHubName(), partitionContext.getPartitionId()); } } else { EventData eventData = (partitionEventBatch.size() == 1 ? partitionEventBatch.get(0).getData() : null); LastEnqueuedEventProperties lastEnqueuedEventProperties = (partitionEventBatch.size() == 1 ? partitionEventBatch.get(0).getLastEnqueuedEventProperties() : null); EventContext eventContext = new EventContext(partitionContext, eventData, checkpointStore, lastEnqueuedEventProperties); processEvent(partitionContext, partitionProcessor, eventHubConsumer, eventContext); } } catch (Throwable throwable) { /* user code for event processing threw an exception - log and bubble up */ throw logger.logExceptionAsError(new PartitionProcessorException("Error in event processing callback", throwable)); } } Map<String, EventHubConsumerAsyncClient> getPartitionPumps() { return this.partitionPumps; } private void handleError(PartitionOwnership claimedOwnership, EventHubConsumerAsyncClient eventHubConsumer, PartitionProcessor partitionProcessor, Throwable throwable, PartitionContext partitionContext) { boolean shouldRethrow = true; if (!(throwable instanceof PartitionProcessorException)) { shouldRethrow = false; logger.warning("Error receiving events from partition {}", partitionContext.getPartitionId(), throwable); partitionProcessor.processError(new ErrorContext(partitionContext, throwable)); } CloseReason closeReason = CloseReason.LOST_PARTITION_OWNERSHIP; partitionProcessor.close(new CloseContext(partitionContext, closeReason)); cleanup(claimedOwnership, eventHubConsumer); if (shouldRethrow) { PartitionProcessorException exception = (PartitionProcessorException) throwable; throw logger.logExceptionAsError(exception); } } private void cleanup(PartitionOwnership claimedOwnership, EventHubConsumerAsyncClient eventHubConsumer) { try { logger.info("Closing consumer for partition id {}", claimedOwnership.getPartitionId()); eventHubConsumer.close(); } finally { logger.info("Removing partition id {} from list of processing partitions", claimedOwnership.getPartitionId()); partitionPumps.remove(claimedOwnership.getPartitionId()); } } /* * Starts a new process tracing span and attaches the returned context to the EventData object for users. */ private Context startProcessTracingSpan(EventData eventData, String eventHubName, String fullyQualifiedNamespace) { Object diagnosticId = eventData.getProperties().get(DIAGNOSTIC_ID_KEY); if (diagnosticId == null || !tracerProvider.isEnabled()) { return Context.NONE; } Context spanContext = tracerProvider.extractContext(diagnosticId.toString(), Context.NONE) .addData(ENTITY_PATH_KEY, eventHubName) .addData(HOST_NAME_KEY, fullyQualifiedNamespace) .addData(AZ_TRACING_NAMESPACE_KEY, AZ_NAMESPACE_VALUE); spanContext = eventData.getEnqueuedTime() == null ? spanContext : spanContext.addData(MESSAGE_ENQUEUED_TIME, eventData.getEnqueuedTime().getEpochSecond()); return tracerProvider.startSpan(AZ_TRACING_SERVICE_NAME, spanContext, ProcessKind.PROCESS); } /* * Ends the process tracing span and the scope of that span. */ private void endProcessTracingSpan(Context processSpanContext, Signal<Void> signal) { if (processSpanContext == null) { return; } Optional<Object> spanScope = processSpanContext.getData(SCOPE_KEY); if (!spanScope.isPresent() || !tracerProvider.isEnabled()) { return; } if (spanScope.get() instanceof Closeable) { Closeable close = (Closeable) processSpanContext.getData(SCOPE_KEY).get(); try { close.close(); tracerProvider.endSpan(processSpanContext, signal); } catch (IOException ioException) { logger.error(Messages.EVENT_PROCESSOR_RUN_END, ioException); } } else { logger.warning(String.format(Locale.US, Messages.PROCESS_SPAN_SCOPE_TYPE_ERROR, spanScope.get() != null ? spanScope.getClass() : "null")); } } }
class PartitionPumpManager { private final ClientLogger logger = new ClientLogger(PartitionPumpManager.class); private final CheckpointStore checkpointStore; private final Map<String, EventHubConsumerAsyncClient> partitionPumps = new ConcurrentHashMap<>(); private final Supplier<PartitionProcessor> partitionProcessorFactory; private final EventHubClientBuilder eventHubClientBuilder; private final TracerProvider tracerProvider; private final boolean trackLastEnqueuedEventProperties; private final Map<String, EventPosition> initialPartitionEventPosition; private final Duration maxWaitTime; private final int maxBatchSize; private final boolean batchReceiveMode; /** * Creates an instance of partition pump manager. * * @param checkpointStore The partition manager that is used to store and update checkpoints. * @param partitionProcessorFactory The partition processor factory that is used to create new instances of {@link * PartitionProcessor} when new partition pumps are started. * @param eventHubClientBuilder The client builder used to create new clients (and new connections) for each * partition processed by this {@link EventProcessorClient}. * @param trackLastEnqueuedEventProperties If set to {@code true}, all events received by this EventProcessorClient * will also include the last enqueued event properties for it's respective partitions. * @param tracerProvider The tracer implementation. * @param initialPartitionEventPosition Map of initial event positions for partition ids. * @param maxBatchSize The maximum batch size to receive per users' process handler invocation. * @param maxWaitTime The maximum time to wait to receive a batch or a single event. * @param batchReceiveMode The boolean value indicating if this processor is configured to receive in batches or * single events. */ PartitionPumpManager(CheckpointStore checkpointStore, Supplier<PartitionProcessor> partitionProcessorFactory, EventHubClientBuilder eventHubClientBuilder, boolean trackLastEnqueuedEventProperties, TracerProvider tracerProvider, Map<String, EventPosition> initialPartitionEventPosition, int maxBatchSize, Duration maxWaitTime, boolean batchReceiveMode) { this.checkpointStore = checkpointStore; this.partitionProcessorFactory = partitionProcessorFactory; this.eventHubClientBuilder = eventHubClientBuilder; this.trackLastEnqueuedEventProperties = trackLastEnqueuedEventProperties; this.tracerProvider = tracerProvider; this.initialPartitionEventPosition = initialPartitionEventPosition; this.maxBatchSize = maxBatchSize; this.maxWaitTime = maxWaitTime; this.batchReceiveMode = batchReceiveMode; } /** * Stops all partition pumps that are actively consuming events. This method is invoked when the {@link * EventProcessorClient} is requested to stop. */ void stopAllPartitionPumps() { this.partitionPumps.forEach((partitionId, eventHubConsumer) -> { try { eventHubConsumer.close(); } catch (Exception ex) { logger.warning(Messages.FAILED_CLOSE_CONSUMER_PARTITION, partitionId, ex); } finally { partitionPumps.remove(partitionId); } }); } /** * Checks the state of the connection for the given partition. If the connection is closed, then this method will * remove the partition from the list of partition pumps. * * @param ownership The partition ownership information for which the connection state will be verified. */ /** * Starts a new partition pump for the newly claimed partition. If the partition already has an active partition * pump, this will not create a new consumer. * * @param claimedOwnership The details of partition ownership for which new partition pump is requested to start. */ void startPartitionPump(PartitionOwnership claimedOwnership, Checkpoint checkpoint) { if (partitionPumps.containsKey(claimedOwnership.getPartitionId())) { logger.verbose("Consumer is already running for this partition {}", claimedOwnership.getPartitionId()); return; } try { PartitionContext partitionContext = new PartitionContext(claimedOwnership.getFullyQualifiedNamespace(), claimedOwnership.getEventHubName(), claimedOwnership.getConsumerGroup(), claimedOwnership.getPartitionId()); PartitionProcessor partitionProcessor = this.partitionProcessorFactory.get(); InitializationContext initializationContext = new InitializationContext(partitionContext); partitionProcessor.initialize(initializationContext); EventPosition startFromEventPosition = null; if (checkpoint != null && checkpoint.getOffset() != null) { startFromEventPosition = EventPosition.fromOffset(checkpoint.getOffset()); } else if (checkpoint != null && checkpoint.getSequenceNumber() != null) { startFromEventPosition = EventPosition.fromSequenceNumber(checkpoint.getSequenceNumber()); } else if (initialPartitionEventPosition.containsKey(claimedOwnership.getPartitionId())) { startFromEventPosition = initialPartitionEventPosition.get(claimedOwnership.getPartitionId()); } else { startFromEventPosition = EventPosition.latest(); } logger.info("Starting event processing from {} for partition {}", startFromEventPosition, claimedOwnership.getPartitionId()); ReceiveOptions receiveOptions = new ReceiveOptions().setOwnerLevel(0L) .setTrackLastEnqueuedEventProperties(trackLastEnqueuedEventProperties); EventHubConsumerAsyncClient eventHubConsumer = eventHubClientBuilder.buildAsyncClient() .createConsumer(claimedOwnership.getConsumerGroup(), EventHubClientBuilder.DEFAULT_PREFETCH_COUNT); partitionPumps.put(claimedOwnership.getPartitionId(), eventHubConsumer); Flux<Flux<PartitionEvent>> partitionEventFlux; Flux<PartitionEvent> receiver = eventHubConsumer .receiveFromPartition(claimedOwnership.getPartitionId(), startFromEventPosition, receiveOptions) .doOnNext(partitionEvent -> { if (logger.canLogAtLevel(LogLevel.VERBOSE)) { logger.verbose("On next {}, {}, {}", partitionContext.getEventHubName(), partitionContext.getPartitionId(), partitionEvent.getData().getSequenceNumber()); } }); if (maxWaitTime != null) { partitionEventFlux = receiver .windowTimeout(maxBatchSize, maxWaitTime); } else { partitionEventFlux = receiver .window(maxBatchSize); } partitionEventFlux .concatMap(Flux::collectList) .publishOn(Schedulers.boundedElastic()) .subscribe(partitionEventBatch -> { processEvents(partitionContext, partitionProcessor, eventHubConsumer, partitionEventBatch); }, /* EventHubConsumer receive() returned an error */ ex -> handleError(claimedOwnership, eventHubConsumer, partitionProcessor, ex, partitionContext), () -> { partitionProcessor.close(new CloseContext(partitionContext, CloseReason.EVENT_PROCESSOR_SHUTDOWN)); cleanup(claimedOwnership, eventHubConsumer); }); } catch (Exception ex) { if (partitionPumps.containsKey(claimedOwnership.getPartitionId())) { cleanup(claimedOwnership, partitionPumps.get(claimedOwnership.getPartitionId())); } throw logger.logExceptionAsError( new PartitionProcessorException( "Error occurred while starting partition pump for partition " + claimedOwnership.getPartitionId(), ex)); } } private void processEvent(PartitionContext partitionContext, PartitionProcessor partitionProcessor, EventHubConsumerAsyncClient eventHubConsumer, EventContext eventContext) { Context processSpanContext = null; EventData eventData = eventContext.getEventData(); if (eventData != null) { processSpanContext = startProcessTracingSpan(eventData, eventHubConsumer.getEventHubName(), eventHubConsumer.getFullyQualifiedNamespace()); if (processSpanContext.getData(SPAN_CONTEXT_KEY).isPresent()) { eventData.addContext(SPAN_CONTEXT_KEY, processSpanContext); } } try { if (logger.canLogAtLevel(LogLevel.VERBOSE)) { logger.verbose("Processing event {}, {}", partitionContext.getEventHubName(), partitionContext.getPartitionId()); } partitionProcessor.processEvent(new EventContext(partitionContext, eventData, checkpointStore, eventContext.getLastEnqueuedEventProperties())); if (logger.canLogAtLevel(LogLevel.VERBOSE)) { logger.verbose("Completed processing event {}, {}", partitionContext.getEventHubName(), partitionContext.getPartitionId()); } endProcessTracingSpan(processSpanContext, Signal.complete()); } catch (Throwable throwable) { /* user code for event processing threw an exception - log and bubble up */ endProcessTracingSpan(processSpanContext, Signal.error(throwable)); throw logger.logExceptionAsError(new PartitionProcessorException("Error in event processing callback", throwable)); } } private void processEvents(PartitionContext partitionContext, PartitionProcessor partitionProcessor, EventHubConsumerAsyncClient eventHubConsumer, List<PartitionEvent> partitionEventBatch) { try { if (batchReceiveMode) { LastEnqueuedEventProperties[] lastEnqueuedEventProperties = new LastEnqueuedEventProperties[1]; List<EventData> eventDataList = partitionEventBatch.stream() .map(partitionEvent -> { lastEnqueuedEventProperties[0] = partitionEvent.getLastEnqueuedEventProperties(); return partitionEvent.getData(); }) .collect(Collectors.toList()); EventBatchContext eventBatchContext = new EventBatchContext(partitionContext, eventDataList, checkpointStore, lastEnqueuedEventProperties[0]); if (logger.canLogAtLevel(LogLevel.VERBOSE)) { logger.verbose("Processing event batch {}, {}", partitionContext.getEventHubName(), partitionContext.getPartitionId()); } partitionProcessor.processEventBatch(eventBatchContext); if (logger.canLogAtLevel(LogLevel.VERBOSE)) { logger.verbose("Completed processing event batch{}, {}", partitionContext.getEventHubName(), partitionContext.getPartitionId()); } } else { EventData eventData = (partitionEventBatch.size() == 1 ? partitionEventBatch.get(0).getData() : null); LastEnqueuedEventProperties lastEnqueuedEventProperties = (partitionEventBatch.size() == 1 ? partitionEventBatch.get(0).getLastEnqueuedEventProperties() : null); EventContext eventContext = new EventContext(partitionContext, eventData, checkpointStore, lastEnqueuedEventProperties); processEvent(partitionContext, partitionProcessor, eventHubConsumer, eventContext); } } catch (Throwable throwable) { /* user code for event processing threw an exception - log and bubble up */ throw logger.logExceptionAsError(new PartitionProcessorException("Error in event processing callback", throwable)); } } Map<String, EventHubConsumerAsyncClient> getPartitionPumps() { return this.partitionPumps; } private void handleError(PartitionOwnership claimedOwnership, EventHubConsumerAsyncClient eventHubConsumer, PartitionProcessor partitionProcessor, Throwable throwable, PartitionContext partitionContext) { boolean shouldRethrow = true; if (!(throwable instanceof PartitionProcessorException)) { shouldRethrow = false; logger.warning("Error receiving events from partition {}", partitionContext.getPartitionId(), throwable); partitionProcessor.processError(new ErrorContext(partitionContext, throwable)); } CloseReason closeReason = CloseReason.LOST_PARTITION_OWNERSHIP; partitionProcessor.close(new CloseContext(partitionContext, closeReason)); cleanup(claimedOwnership, eventHubConsumer); if (shouldRethrow) { PartitionProcessorException exception = (PartitionProcessorException) throwable; throw logger.logExceptionAsError(exception); } } private void cleanup(PartitionOwnership claimedOwnership, EventHubConsumerAsyncClient eventHubConsumer) { try { logger.info("Closing consumer for partition id {}", claimedOwnership.getPartitionId()); eventHubConsumer.close(); } finally { logger.info("Removing partition id {} from list of processing partitions", claimedOwnership.getPartitionId()); partitionPumps.remove(claimedOwnership.getPartitionId()); } } /* * Starts a new process tracing span and attaches the returned context to the EventData object for users. */ private Context startProcessTracingSpan(EventData eventData, String eventHubName, String fullyQualifiedNamespace) { Object diagnosticId = eventData.getProperties().get(DIAGNOSTIC_ID_KEY); if (diagnosticId == null || !tracerProvider.isEnabled()) { return Context.NONE; } Context spanContext = tracerProvider.extractContext(diagnosticId.toString(), Context.NONE) .addData(ENTITY_PATH_KEY, eventHubName) .addData(HOST_NAME_KEY, fullyQualifiedNamespace) .addData(AZ_TRACING_NAMESPACE_KEY, AZ_NAMESPACE_VALUE); spanContext = eventData.getEnqueuedTime() == null ? spanContext : spanContext.addData(MESSAGE_ENQUEUED_TIME, eventData.getEnqueuedTime().getEpochSecond()); return tracerProvider.startSpan(AZ_TRACING_SERVICE_NAME, spanContext, ProcessKind.PROCESS); } /* * Ends the process tracing span and the scope of that span. */ private void endProcessTracingSpan(Context processSpanContext, Signal<Void> signal) { if (processSpanContext == null) { return; } Optional<Object> spanScope = processSpanContext.getData(SCOPE_KEY); if (!spanScope.isPresent() || !tracerProvider.isEnabled()) { return; } if (spanScope.get() instanceof Closeable) { Closeable close = (Closeable) processSpanContext.getData(SCOPE_KEY).get(); try { close.close(); tracerProvider.endSpan(processSpanContext, signal); } catch (IOException ioException) { logger.error(Messages.EVENT_PROCESSOR_RUN_END, ioException); } } else { logger.warning(String.format(Locale.US, Messages.PROCESS_SPAN_SCOPE_TYPE_ERROR, spanScope.get() != null ? spanScope.getClass() : "null")); } } }
Can you make it a const (DEFAULT_POLL_INTERVAL) and reuse in all other methods as well? I guess the only other operation right now is the beginCreateSearch but you will also add the beginReleasePhoneNumber, and you can use the same default for everything
PhoneNumberSearch> beginPurchaseSearch(String searchId, Duration pollInterval) { Objects.requireNonNull(searchId, "'searchId' can not be null."); if (pollInterval == null) { pollInterval = Duration.ofSeconds(5); } return new PollerFlux<PhoneNumberSearch, PhoneNumberSearch>(pollInterval, purchaseSearchActivationOperation(searchId), purchaseSearchPollOperation(searchId), (activationResponse, pollingContext) -> Mono.error(new RuntimeException("Cancellation is not supported")), purchaseSearchFetchResultOperation()); }
pollInterval = Duration.ofSeconds(5);
new RuntimeException("Cancellation is not supported")), purchaseSearchFetchResultOperation()); } private Function<PollingContext<Void>, Mono<Void>> purchaseSearchActivationOperation(String searchId) { return (pollingContext) -> { return purchaseSearch(searchId); }; }
class PhoneNumberAsyncClient { private final ClientLogger logger = new ClientLogger(PhoneNumberAsyncClient.class); private final PhoneNumberAdministrationsImpl phoneNumberAdministrations; PhoneNumberAsyncClient(PhoneNumberAdminClientImpl phoneNumberAdminClient) { this.phoneNumberAdministrations = phoneNumberAdminClient.getPhoneNumberAdministrations(); } /** * Gets the list of the acquired phone numbers. * * @param locale A language-locale pairing which will be used to localise the names of countries. * @return A {@link PagedFlux} of {@link AcquiredPhoneNumber} instances representing acquired telephone numbers. */ @ServiceMethod(returns = ReturnType.COLLECTION) public PagedFlux<AcquiredPhoneNumber> listAllPhoneNumbers(String locale) { return listAllPhoneNumbers(locale, null); } PagedFlux<AcquiredPhoneNumber> listAllPhoneNumbers(String locale, Context context) { try { if (context == null) { return phoneNumberAdministrations.getAllPhoneNumbersAsync(locale, null, null); } else { return phoneNumberAdministrations.getAllPhoneNumbersAsync(locale, null, null, context); } } catch (RuntimeException ex) { return pagedFluxError(logger, ex); } } /** * Gets a list of the supported area codes. * * @param locationType The type of location information required by the plan. * @param countryCode The ISO 3166-2 country code. * @param phonePlanId The plan id from which to search area codes. * @param locationOptions A {@link List} of {@link LocationOptionsQuery} for querying the area codes. * @return A {@link Mono} containing a {@link AreaCodes} representing area codes. */ @ServiceMethod(returns = ReturnType.SINGLE) public Mono<AreaCodes> getAllAreaCodes( String locationType, String countryCode, String phonePlanId, List<LocationOptionsQuery> locationOptions) { return getAllAreaCodesWithResponse(locationType, countryCode, phonePlanId, locationOptions) .flatMap(FluxUtil::toMono); } /** * Gets a list of the supported area codes. * * @param locationType The type of location information required by the plan. * @param countryCode The ISO 3166-2 country code. * @param phonePlanId The plan id from which to search area codes. * @param locationOptions A {@link List} of {@link LocationOptionsQuery} for querying the area codes. * @return A {@link Mono} containing a {@link Response} whose {@link Response * a {@link AreaCodes} representing area codes. */ @ServiceMethod(returns = ReturnType.SINGLE) public Mono<Response<AreaCodes>> getAllAreaCodesWithResponse( String locationType, String countryCode, String phonePlanId, List<LocationOptionsQuery> locationOptions) { return getAllAreaCodesWithResponse(locationType, countryCode, phonePlanId, locationOptions, null); } Mono<Response<AreaCodes>> getAllAreaCodesWithResponse( String locationType, String countryCode, String phonePlanId, List<LocationOptionsQuery> locationOptions, Context context) { Objects.requireNonNull(locationType, "'locationType' cannot be null."); Objects.requireNonNull(countryCode, "'countryCode' cannot be null."); Objects.requireNonNull(phonePlanId, "'phonePlanId' cannot be null."); LocationOptionsQueries locationOptionsQueries = new LocationOptionsQueries(); locationOptionsQueries.setLocationOptions(locationOptions); try { if (context == null) { return phoneNumberAdministrations.getAllAreaCodesWithResponseAsync( locationType, countryCode, phonePlanId, locationOptionsQueries); } else { return phoneNumberAdministrations.getAllAreaCodesWithResponseAsync( locationType, countryCode, phonePlanId, locationOptionsQueries, context); } } catch (RuntimeException ex) { return monoError(logger, ex); } } /** * Gets the information for a phone number capabilities update * * @param capabilitiesId ID of the capabilities update. * @return A {@link Mono} containing * a {@link UpdatePhoneNumberCapabilitiesResponse} representing the capabilities update. */ @ServiceMethod(returns = ReturnType.SINGLE) public Mono<UpdatePhoneNumberCapabilitiesResponse> getCapabilitiesUpdate(String capabilitiesId) { return getCapabilitiesUpdateWithResponse(capabilitiesId).flatMap(FluxUtil::toMono); } /** * Gets the information for a phone number capabilities update * * @param capabilitiesId ID of the capabilities update. * @return A {@link Mono} containing a {@link Response} whose {@link Response * a {@link UpdatePhoneNumberCapabilitiesResponse} representing the capabilities update. */ @ServiceMethod(returns = ReturnType.SINGLE) public Mono<Response<UpdatePhoneNumberCapabilitiesResponse>> getCapabilitiesUpdateWithResponse( String capabilitiesId) { return getCapabilitiesUpdateWithResponse(capabilitiesId, null); } Mono<Response<UpdatePhoneNumberCapabilitiesResponse>> getCapabilitiesUpdateWithResponse( String capabilitiesId, Context context) { Objects.requireNonNull(capabilitiesId, "'capabilitiesId' cannot be null."); try { if (context == null) { return phoneNumberAdministrations.getCapabilitiesUpdateWithResponseAsync(capabilitiesId); } else { return phoneNumberAdministrations.getCapabilitiesUpdateWithResponseAsync(capabilitiesId, context); } } catch (RuntimeException ex) { return monoError(logger, ex); } } /** * Adds or removes phone number capabilities. * * @param phoneNumberCapabilitiesUpdate {@link Map} with the updates to perform * @return A {@link Mono} containing * a {@link UpdatePhoneNumberCapabilitiesResponse} representing the capabilities update. */ @ServiceMethod(returns = ReturnType.SINGLE) public Mono<UpdateNumberCapabilitiesResponse> updateCapabilities( Map<PhoneNumber, NumberUpdateCapabilities> phoneNumberCapabilitiesUpdate) { return updateCapabilitiesWithResponse(phoneNumberCapabilitiesUpdate).flatMap(FluxUtil::toMono); } /** * Adds or removes phone number capabilities. * * @param phoneNumberCapabilitiesUpdate {@link Map} with the updates to perform * @return A {@link Mono} containing a {@link Response} whose {@link Response * a {@link UpdatePhoneNumberCapabilitiesResponse} representing the capabilities update. */ @ServiceMethod(returns = ReturnType.SINGLE) public Mono<Response<UpdateNumberCapabilitiesResponse>> updateCapabilitiesWithResponse( Map<PhoneNumber, NumberUpdateCapabilities> phoneNumberCapabilitiesUpdate) { return updateCapabilitiesWithResponse(phoneNumberCapabilitiesUpdate, null); } Mono<Response<UpdateNumberCapabilitiesResponse>> updateCapabilitiesWithResponse( Map<PhoneNumber, NumberUpdateCapabilities> phoneNumberCapabilitiesUpdate, Context context) { Objects.requireNonNull(phoneNumberCapabilitiesUpdate, "'phoneNumberCapabilitiesUpdate' cannot be null."); Map<String, NumberUpdateCapabilities> capabilitiesMap = new HashMap<>(); for (Map.Entry<PhoneNumber, NumberUpdateCapabilities> entry : phoneNumberCapabilitiesUpdate.entrySet()) { capabilitiesMap.put(entry.getKey().getValue(), entry.getValue()); } UpdateNumberCapabilitiesRequest updateNumberCapabilitiesRequest = new UpdateNumberCapabilitiesRequest(); updateNumberCapabilitiesRequest.setPhoneNumberCapabilitiesUpdate(capabilitiesMap); try { if (context == null) { return phoneNumberAdministrations.updateCapabilitiesWithResponseAsync( updateNumberCapabilitiesRequest); } else { return phoneNumberAdministrations.updateCapabilitiesWithResponseAsync( updateNumberCapabilitiesRequest, context); } } catch (RuntimeException ex) { return monoError(logger, ex); } } /** * Gets a list of supported countries. * * @param locale A language-locale pairing which will be used to localise the names of countries. * @return A {@link PagedFlux} of {@link PhoneNumberCountry} instances representing supported countries. */ @ServiceMethod(returns = ReturnType.COLLECTION) public PagedFlux<PhoneNumberCountry> listAllSupportedCountries(String locale) { return listAllSupportedCountries(locale, null); } PagedFlux<PhoneNumberCountry> listAllSupportedCountries(String locale, Context context) { try { if (context == null) { return phoneNumberAdministrations.getAllSupportedCountriesAsync(locale, null, null); } else { return phoneNumberAdministrations.getAllSupportedCountriesAsync(locale, null, null, context); } } catch (RuntimeException ex) { return pagedFluxError(logger, ex); } } /** * Gets the configuration of a given phone number. * * @param phoneNumber A {@link PhoneNumber} representing the phone number. * @return A {@link Mono} containing a {@link NumberConfigurationResponse} representing the configuration. */ @ServiceMethod(returns = ReturnType.SINGLE) public Mono<NumberConfigurationResponse> getNumberConfiguration(PhoneNumber phoneNumber) { return getNumberConfigurationWithResponse(phoneNumber).flatMap(FluxUtil::toMono); } /** * Gets the configuration of a given phone number. * * @param phoneNumber A {@link PhoneNumber} representing the phone number. * @return A {@link Mono} containing a {@link Response} whose {@link Response * a {@link NumberConfigurationResponse} representing the configuration. */ @ServiceMethod(returns = ReturnType.SINGLE) public Mono<Response<NumberConfigurationResponse>> getNumberConfigurationWithResponse(PhoneNumber phoneNumber) { return getNumberConfigurationWithResponse(phoneNumber, null); } Mono<Response<NumberConfigurationResponse>> getNumberConfigurationWithResponse( PhoneNumber phoneNumber, Context context) { Objects.requireNonNull(phoneNumber, "'phoneNumber' cannot be null."); NumberConfigurationPhoneNumber configurationPhoneNumber = new NumberConfigurationPhoneNumber(); configurationPhoneNumber.setPhoneNumber(phoneNumber.getValue()); try { if (context == null) { return phoneNumberAdministrations.getNumberConfigurationWithResponseAsync( configurationPhoneNumber); } else { return phoneNumberAdministrations.getNumberConfigurationWithResponseAsync( configurationPhoneNumber, context); } } catch (RuntimeException ex) { return monoError(logger, ex); } } /** * Associates a phone number with a PSTN Configuration. * * @param phoneNumber A {@link PhoneNumber} representing the phone number. * @param pstnConfiguration A {@link PstnConfiguration} containing the pstn number configuration options. * @return A {@link Mono} for the asynchronous return */ @ServiceMethod(returns = ReturnType.SINGLE) public Mono<Void> configureNumber(PhoneNumber phoneNumber, PstnConfiguration pstnConfiguration) { return configureNumberWithResponse(phoneNumber, pstnConfiguration).flatMap(FluxUtil::toMono); } /** * Associates a phone number with a PSTN Configuration. * * @param phoneNumber A {@link PhoneNumber} representing the phone number. * @param pstnConfiguration A {@link PstnConfiguration} containing the pstn number configuration options. * @return A {@link Mono} containing a {@link Response} for the operation. */ @ServiceMethod(returns = ReturnType.SINGLE) public Mono<Response<Void>> configureNumberWithResponse( PhoneNumber phoneNumber, PstnConfiguration pstnConfiguration) { return configureNumberWithResponse(phoneNumber, pstnConfiguration, null); } Mono<Response<Void>> configureNumberWithResponse( PhoneNumber phoneNumber, PstnConfiguration pstnConfiguration, Context context) { Objects.requireNonNull(phoneNumber, "'phoneNumber' cannot be null."); Objects.requireNonNull(pstnConfiguration, "'pstnConfiguration' cannot be null."); NumberConfiguration numberConfiguration = new NumberConfiguration(); numberConfiguration.setPhoneNumber(phoneNumber.getValue()).setPstnConfiguration(pstnConfiguration); try { if (context == null) { return phoneNumberAdministrations.configureNumberWithResponseAsync(numberConfiguration); } else { return phoneNumberAdministrations.configureNumberWithResponseAsync(numberConfiguration, context); } } catch (RuntimeException ex) { return monoError(logger, ex); } } /** * Removes the PSTN Configuration from a phone number. * * @param phoneNumber A {@link PhoneNumber} representing the phone number. * @return A {@link Mono} for the asynchronous return */ @ServiceMethod(returns = ReturnType.SINGLE) public Mono<Void> unconfigureNumber(PhoneNumber phoneNumber) { return unconfigureNumberWithResponse(phoneNumber).flatMap(FluxUtil::toMono); } /** * Removes the PSTN Configuration from a phone number. * * @param phoneNumber A {@link PhoneNumber} representing the phone number. * @return A {@link Mono} containing a {@link Response} for the operation. */ @ServiceMethod(returns = ReturnType.SINGLE) public Mono<Response<Void>> unconfigureNumberWithResponse(PhoneNumber phoneNumber) { return unconfigureNumberWithResponse(phoneNumber, null); } Mono<Response<Void>> unconfigureNumberWithResponse(PhoneNumber phoneNumber, Context context) { Objects.requireNonNull(phoneNumber, "'phoneNumber' cannot be null."); NumberConfigurationPhoneNumber configurationPhoneNumber = new NumberConfigurationPhoneNumber(); configurationPhoneNumber.setPhoneNumber(phoneNumber.getValue()); try { if (context == null) { return phoneNumberAdministrations.unconfigureNumberWithResponseAsync(configurationPhoneNumber); } else { return phoneNumberAdministrations.unconfigureNumberWithResponseAsync(configurationPhoneNumber, context); } } catch (RuntimeException ex) { return monoError(logger, ex); } } /** * Gets a list of phone plan groups for the given country. * * @param countryCode The ISO 3166-2 country code. * @param locale A language-locale pairing which will be used to localise the names of countries. * @param includeRateInformation Flag to indicate if rate information should be returned. * @return A {@link PagedFlux} of {@link PhonePlanGroup} instances representing phone plan groups */ @ServiceMethod(returns = ReturnType.COLLECTION) public PagedFlux<PhonePlanGroup> listPhonePlanGroups( String countryCode, String locale, Boolean includeRateInformation) { return listPhonePlanGroups(countryCode, locale, includeRateInformation, null); } PagedFlux<PhonePlanGroup> listPhonePlanGroups( String countryCode, String locale, Boolean includeRateInformation, Context context) { Objects.requireNonNull(countryCode, "'countryCode' cannot be null."); try { if (context == null) { return phoneNumberAdministrations.getPhonePlanGroupsAsync( countryCode, locale, includeRateInformation, null, null); } else { return phoneNumberAdministrations.getPhonePlanGroupsAsync( countryCode, locale, includeRateInformation, null, null, context); } } catch (RuntimeException ex) { return pagedFluxError(logger, ex); } } /** * Gets a list of phone plans for a phone plan group * * @param countryCode The ISO 3166-2 country code. * @param phonePlanGroupId ID of the Phone Plan Group * @param locale A language-locale pairing which will be used to localise the names of countries. * @return A {@link PagedFlux} of {@link PhonePlan} instances representing phone plans */ @ServiceMethod(returns = ReturnType.COLLECTION) public PagedFlux<PhonePlan> listPhonePlans(String countryCode, String phonePlanGroupId, String locale) { return listPhonePlans(countryCode, phonePlanGroupId, locale, null); } PagedFlux<PhonePlan> listPhonePlans(String countryCode, String phonePlanGroupId, String locale, Context context) { Objects.requireNonNull(countryCode, "'countryCode' cannot be null."); Objects.requireNonNull(phonePlanGroupId, "'phonePlanGroupId' cannot be null."); try { if (context == null) { return phoneNumberAdministrations.getPhonePlansAsync( countryCode, phonePlanGroupId, locale, null, null); } else { return phoneNumberAdministrations.getPhonePlansAsync( countryCode, phonePlanGroupId, locale, null, null, context); } } catch (RuntimeException ex) { return pagedFluxError(logger, ex); } } /** * Gets the location options for a phone plan. * * @param countryCode The ISO 3166-2 country code. * @param phonePlanGroupId ID of the Phone Plan Group * @param phonePlanId ID of the Phone Plan * @param locale A language-locale pairing which will be used to localise the names of countries. * @return A {@link Mono} containing a {@link LocationOptionsResponse} representing the location options */ @ServiceMethod(returns = ReturnType.SINGLE) public Mono<LocationOptionsResponse> getPhonePlanLocationOptions( String countryCode, String phonePlanGroupId, String phonePlanId, String locale) { return getPhonePlanLocationOptionsWithResponse(countryCode, phonePlanGroupId, phonePlanId, locale) .flatMap(FluxUtil::toMono); } /** * Gets the location options for a phone plan. * * @param countryCode The ISO 3166-2 country code. * @param phonePlanGroupId ID of the Phone Plan Group * @param phonePlanId ID of the Phone Plan * @param locale A language-locale pairing which will be used to localise the names of countries. * @return A {@link Mono} containing a {@link Response} whose {@link Response * a {@link LocationOptionsResponse} representing the location options */ @ServiceMethod(returns = ReturnType.SINGLE) public Mono<Response<LocationOptionsResponse>> getPhonePlanLocationOptionsWithResponse( String countryCode, String phonePlanGroupId, String phonePlanId, String locale) { return getPhonePlanLocationOptionsWithResponse( countryCode, phonePlanGroupId, phonePlanId, locale, null); } Mono<Response<LocationOptionsResponse>> getPhonePlanLocationOptionsWithResponse( String countryCode, String phonePlanGroupId, String phonePlanId, String locale, Context context) { Objects.requireNonNull(countryCode, "'countryCode' cannot be null."); Objects.requireNonNull(phonePlanGroupId, "'phonePlanGroupId' cannot be null."); Objects.requireNonNull(phonePlanId, "'phonePlanId' cannot be null."); try { if (context == null) { return phoneNumberAdministrations.getPhonePlanLocationOptionsWithResponseAsync( countryCode, phonePlanGroupId, phonePlanId, locale); } else { return phoneNumberAdministrations.getPhonePlanLocationOptionsWithResponseAsync( countryCode, phonePlanGroupId, phonePlanId, locale, context); } } catch (RuntimeException ex) { return monoError(logger, ex); } } /** * Gets a release by ID. * * @param releaseId ID of the Release * @return A {@link Mono} containing a {@link PhoneNumberRelease} representing the release. */ @ServiceMethod(returns = ReturnType.SINGLE) public Mono<PhoneNumberRelease> getReleaseById(String releaseId) { return getReleaseByIdWithResponse(releaseId).flatMap(FluxUtil::toMono); } /** * Gets a release by ID. * * @param releaseId ID of the Release * @return A {@link Mono} containing a {@link Response} whose {@link Response * a {@link PhoneNumberRelease} representing the release. */ @ServiceMethod(returns = ReturnType.SINGLE) public Mono<Response<PhoneNumberRelease>> getReleaseByIdWithResponse(String releaseId) { return getReleaseByIdWithResponse(releaseId, null); } Mono<Response<PhoneNumberRelease>> getReleaseByIdWithResponse(String releaseId, Context context) { Objects.requireNonNull(releaseId, "'releaseId' cannot be null."); try { if (context == null) { return phoneNumberAdministrations.getReleaseByIdWithResponseAsync(releaseId); } else { return phoneNumberAdministrations.getReleaseByIdWithResponseAsync(releaseId, context); } } catch (RuntimeException ex) { return monoError(logger, ex); } } /** * Creates a release for the given phone numbers. * * @param phoneNumbers {@link List} of {@link PhoneNumber} objects with the phone numbers. * @return A {@link Mono} containing a {@link ReleaseResponse} representing the release. */ @ServiceMethod(returns = ReturnType.SINGLE) public Mono<ReleaseResponse> releasePhoneNumbers(List<PhoneNumber> phoneNumbers) { return releasePhoneNumbersWithResponse(phoneNumbers).flatMap(FluxUtil::toMono); } /** * Creates a release for the given phone numbers. * * @param phoneNumbers {@link List} of {@link PhoneNumber} objects with the phone numbers. * @return A {@link Mono} containing a {@link Response} whose {@link Response * a {@link ReleaseResponse} representing the release. */ @ServiceMethod(returns = ReturnType.SINGLE) public Mono<Response<ReleaseResponse>> releasePhoneNumbersWithResponse(List<PhoneNumber> phoneNumbers) { return releasePhoneNumbersWithResponse(phoneNumbers, null); } Mono<Response<ReleaseResponse>> releasePhoneNumbersWithResponse(List<PhoneNumber> phoneNumbers, Context context) { Objects.requireNonNull(phoneNumbers, "'phoneNumbers' cannot be null."); List<String> phoneNumberStrings = phoneNumbers.stream().map(PhoneNumber::getValue).collect(Collectors.toList()); ReleaseRequest releaseRequest = new ReleaseRequest(); releaseRequest.setPhoneNumbers(phoneNumberStrings); try { if (context == null) { return phoneNumberAdministrations.releasePhoneNumbersWithResponseAsync(releaseRequest); } else { return phoneNumberAdministrations.releasePhoneNumbersWithResponseAsync(releaseRequest, context); } } catch (RuntimeException ex) { return monoError(logger, ex); } } /** * Gets the list of all releases * * @return A {@link PagedFlux} of {@link PhoneNumberEntity} instances representing releases. */ @ServiceMethod(returns = ReturnType.COLLECTION) public PagedFlux<PhoneNumberEntity> listAllReleases() { return listAllReleases(null); } PagedFlux<PhoneNumberEntity> listAllReleases(Context context) { try { if (context == null) { return phoneNumberAdministrations.getAllReleasesAsync(null, null); } else { return phoneNumberAdministrations.getAllReleasesAsync(null, null, context); } } catch (RuntimeException ex) { return pagedFluxError(logger, ex); } } /** * Gets a search by ID. * * @param searchId ID of the search * @return A {@link Mono} containing a {@link PhoneNumberSearch} representing the search. */ @ServiceMethod(returns = ReturnType.SINGLE) public Mono<PhoneNumberSearch> getSearchById(String searchId) { return getSearchByIdWithResponse(searchId).flatMap(FluxUtil::toMono); } /** * Gets a search by ID. * * @param searchId ID of the search * @return A {@link Mono} containing a {@link Response} whose {@link Response * a {@link PhoneNumberSearch} representing the search. */ @ServiceMethod(returns = ReturnType.SINGLE) public Mono<Response<PhoneNumberSearch>> getSearchByIdWithResponse(String searchId) { return getSearchByIdWithResponse(searchId, null); } Mono<Response<PhoneNumberSearch>> getSearchByIdWithResponse(String searchId, Context context) { Objects.requireNonNull(searchId, "'searchId' cannot be null."); try { if (context == null) { return phoneNumberAdministrations.getSearchByIdWithResponseAsync(searchId); } else { return phoneNumberAdministrations.getSearchByIdWithResponseAsync(searchId, context); } } catch (RuntimeException ex) { return monoError(logger, ex); } } /** * Create a phone number search. * * @param searchOptions A {@link CreateSearchOptions} with the search options * @return A {@link Mono} containing a {@link CreateSearchResponse} representing the search. */ @ServiceMethod(returns = ReturnType.SINGLE) public Mono<CreateSearchResponse> createSearch(CreateSearchOptions searchOptions) { return createSearchWithResponse(searchOptions).flatMap(FluxUtil::toMono); } /** * Create a phone number search. * * @param searchOptions A {@link CreateSearchOptions} with the search options * @return A {@link Mono} containing a {@link Response} whose {@link Response * a {@link CreateSearchResponse} representing the search. */ @ServiceMethod(returns = ReturnType.SINGLE) public Mono<Response<CreateSearchResponse>> createSearchWithResponse(CreateSearchOptions searchOptions) { return createSearchWithResponse(searchOptions, null); } Mono<Response<CreateSearchResponse>> createSearchWithResponse(CreateSearchOptions searchOptions, Context context) { Objects.requireNonNull(searchOptions, "'searchOptions' cannot be null."); try { if (context == null) { return phoneNumberAdministrations.createSearchWithResponseAsync(searchOptions); } else { return phoneNumberAdministrations.createSearchWithResponseAsync(searchOptions, context); } } catch (RuntimeException ex) { return monoError(logger, ex); } } /** * Gets the list of all searches * * @return A {@link PagedFlux} of {@link PhoneNumberEntity} instances representing searches. */ @ServiceMethod(returns = ReturnType.COLLECTION) public PagedFlux<PhoneNumberEntity> listAllSearches() { return listAllSearches(null); } PagedFlux<PhoneNumberEntity> listAllSearches(Context context) { try { if (context == null) { return phoneNumberAdministrations.getAllSearchesAsync(null, null); } else { return phoneNumberAdministrations.getAllSearchesAsync(null, null, context); } } catch (RuntimeException ex) { return pagedFluxError(logger, ex); } } /** * Cancels the search. This means existing numbers in the search will be made available. * * @param searchId ID of the search * @return A {@link Mono} for the asynchronous return */ @ServiceMethod(returns = ReturnType.SINGLE) public Mono<Void> cancelSearch(String searchId) { return cancelSearchWithResponse(searchId).flatMap(FluxUtil::toMono); } /** * Cancels the search. This means existing numbers in the search will be made available. * * @param searchId ID of the search * @return A {@link Mono} containing a {@link Response} for the operation */ @ServiceMethod(returns = ReturnType.SINGLE) public Mono<Response<Void>> cancelSearchWithResponse(String searchId) { return cancelSearchWithResponse(searchId, null); } Mono<Response<Void>> cancelSearchWithResponse(String searchId, Context context) { Objects.requireNonNull(searchId, "'searchId' cannot be null."); try { if (context == null) { return phoneNumberAdministrations.cancelSearchWithResponseAsync(searchId); } else { return phoneNumberAdministrations.cancelSearchWithResponseAsync(searchId, context); } } catch (RuntimeException ex) { return monoError(logger, ex); } } /** * Purchases the phone number search. * * @param searchId ID of the search * @return A {@link Mono} for the asynchronous return */ @ServiceMethod(returns = ReturnType.SINGLE) public Mono<Void> purchaseSearch(String searchId) { return purchaseSearchWithResponse(searchId).flatMap(FluxUtil::toMono); } /** * Purchases the phone number search. * * @param searchId ID of the search * @return A {@link Mono} containing a {@link Response} for the operation */ @ServiceMethod(returns = ReturnType.SINGLE) public Mono<Response<Void>> purchaseSearchWithResponse(String searchId) { return purchaseSearchWithResponse(searchId, null); } Mono<Response<Void>> purchaseSearchWithResponse(String searchId, Context context) { Objects.requireNonNull(searchId, "'searchId' cannot be null."); try { if (context == null) { return phoneNumberAdministrations.purchaseSearchWithResponseAsync(searchId); } else { return phoneNumberAdministrations.purchaseSearchWithResponseAsync(searchId, context); } } catch (RuntimeException ex) { return monoError(logger, ex); } } /** * Initiates a search and returns a {@link PhoneNumberSearch} usable by other functions * This function returns a Long Running Operation poller that allows you to * wait indefinitely until the operation is complete. * * @param options A {@link CreateSearchOptions} with the search options * @param pollInterval The time our long running operation will keep on polling * until it gets a result from the server * @return A {@link PollerFlux} object with the search result */ @ServiceMethod(returns = ReturnType.COLLECTION) public PollerFlux<PhoneNumberSearch, PhoneNumberSearch> beginCreateSearch( CreateSearchOptions options, Duration pollInterval) { Objects.requireNonNull(options, "'options' cannot be null."); Objects.requireNonNull(pollInterval, "'pollInterval' cannot be null."); return new PollerFlux<PhoneNumberSearch, PhoneNumberSearch>(pollInterval, createSearchActivationOperation(options), createSearchPollOperation(), cancelSearchOperation(), createSearchFetchResultOperation()); } private Function<PollingContext<PhoneNumberSearch>, Mono<PhoneNumberSearch>> createSearchActivationOperation(CreateSearchOptions options) { return (pollingContext) -> { Mono<PhoneNumberSearch> response = createSearch(options).flatMap(createSearchResponse -> { String searchId = createSearchResponse.getSearchId(); Mono<PhoneNumberSearch> phoneNumberSearch = getSearchById(searchId); return phoneNumberSearch; }); return response; }; } private Function<PollingContext<PhoneNumberSearch>, Mono<PollResponse<PhoneNumberSearch>>> createSearchPollOperation() { return pollingContext -> getSearchById(pollingContext.getLatestResponse().getValue().getSearchId()) .flatMap(getSearchResponse -> { SearchStatus status = getSearchResponse.getStatus(); if (status.equals(SearchStatus.EXPIRED) || status.equals(SearchStatus.CANCELLED) || status.equals(SearchStatus.RESERVED)) { return Mono.just(new PollResponse<>( LongRunningOperationStatus.SUCCESSFULLY_COMPLETED, getSearchResponse)); } if (status.equals(SearchStatus.ERROR)) { return Mono.just(new PollResponse<>( LongRunningOperationStatus.FAILED, getSearchResponse)); } return Mono.just(new PollResponse<>(LongRunningOperationStatus.IN_PROGRESS, getSearchResponse)); }); } private BiFunction<PollingContext<PhoneNumberSearch>, PollResponse<PhoneNumberSearch>, Mono<PhoneNumberSearch>> cancelSearchOperation() { return (pollingContext, firstResponse) -> { cancelSearch(pollingContext.getLatestResponse().getValue().getSearchId()); return Mono.just(pollingContext.getLatestResponse().getValue()); }; } private Function<PollingContext<PhoneNumberSearch>, Mono<PhoneNumberSearch>> createSearchFetchResultOperation() { return pollingContext -> { return Mono.just(pollingContext.getLatestResponse().getValue()); }; } /** * Initiates a purchase process and polls until a terminal state is reached * This function returns a Long Running Operation poller that allows you to * wait indefinitely until the operation is complete. * * @param searchId ID of the search * @param pollInterval The time our long running operation will keep on polling * until it gets a result from the server * @return A {@link PollerFlux} object. */ @ServiceMethod(returns = ReturnType.COLLECTION) public PollerFlux<PhoneNumberSearch, PhoneNumberSearch> beginPurchaseSearch(String searchId, Duration pollInterval) { Objects.requireNonNull(searchId, "'searchId' can not be null."); if (pollInterval == null) { pollInterval = Duration.ofSeconds(5); } return new PollerFlux<PhoneNumberSearch, PhoneNumberSearch>(pollInterval, purchaseSearchActivationOperation(searchId), purchaseSearchPollOperation(searchId), (activationResponse, pollingContext) -> Mono.error( private Function<PollingContext<PhoneNumberSearch>, Mono<PollResponse<PhoneNumberSearch>>> purchaseSearchPollOperation(String searchId) { return (pollingContext) -> getSearchById(searchId) .flatMap(getSearchResponse -> { SearchStatus statusResponse = getSearchResponse.getStatus(); if (statusResponse.equals(SearchStatus.EXPIRED) || statusResponse.equals(SearchStatus.SUCCESS)) { return Mono.just(new PollResponse<>( LongRunningOperationStatus.SUCCESSFULLY_COMPLETED, getSearchResponse)); } if (statusResponse.equals(SearchStatus.ERROR)) { return Mono.just(new PollResponse<>( LongRunningOperationStatus.FAILED, getSearchResponse)); } return Mono.just(new PollResponse<>(LongRunningOperationStatus.IN_PROGRESS, getSearchResponse)); }); } private Function<PollingContext<PhoneNumberSearch>, Mono<PhoneNumberSearch>> purchaseSearchFetchResultOperation() { return pollingContext -> { return Mono.just(pollingContext.getLatestResponse().getValue()); }; } }
class PhoneNumberAsyncClient { private final ClientLogger logger = new ClientLogger(PhoneNumberAsyncClient.class); private final PhoneNumberAdministrationsImpl phoneNumberAdministrations; private final Duration defaultPollInterval = Duration.ofSeconds(1); PhoneNumberAsyncClient(PhoneNumberAdminClientImpl phoneNumberAdminClient) { this.phoneNumberAdministrations = phoneNumberAdminClient.getPhoneNumberAdministrations(); } /** * Gets the list of the acquired phone numbers. * * @param locale A language-locale pairing which will be used to localise the names of countries. * @return A {@link PagedFlux} of {@link AcquiredPhoneNumber} instances representing acquired telephone numbers. */ @ServiceMethod(returns = ReturnType.COLLECTION) public PagedFlux<AcquiredPhoneNumber> listAllPhoneNumbers(String locale) { return listAllPhoneNumbers(locale, null); } PagedFlux<AcquiredPhoneNumber> listAllPhoneNumbers(String locale, Context context) { try { if (context == null) { return phoneNumberAdministrations.getAllPhoneNumbersAsync(locale, null, null); } else { return phoneNumberAdministrations.getAllPhoneNumbersAsync(locale, null, null, context); } } catch (RuntimeException ex) { return pagedFluxError(logger, ex); } } /** * Gets a list of the supported area codes. * * @param locationType The type of location information required by the plan. * @param countryCode The ISO 3166-2 country code. * @param phonePlanId The plan id from which to search area codes. * @param locationOptions A {@link List} of {@link LocationOptionsQuery} for querying the area codes. * @return A {@link Mono} containing a {@link AreaCodes} representing area codes. */ @ServiceMethod(returns = ReturnType.SINGLE) public Mono<AreaCodes> getAllAreaCodes( String locationType, String countryCode, String phonePlanId, List<LocationOptionsQuery> locationOptions) { return getAllAreaCodesWithResponse(locationType, countryCode, phonePlanId, locationOptions) .flatMap(FluxUtil::toMono); } /** * Gets a list of the supported area codes. * * @param locationType The type of location information required by the plan. * @param countryCode The ISO 3166-2 country code. * @param phonePlanId The plan id from which to search area codes. * @param locationOptions A {@link List} of {@link LocationOptionsQuery} for querying the area codes. * @return A {@link Mono} containing a {@link Response} whose {@link Response * a {@link AreaCodes} representing area codes. */ @ServiceMethod(returns = ReturnType.SINGLE) public Mono<Response<AreaCodes>> getAllAreaCodesWithResponse( String locationType, String countryCode, String phonePlanId, List<LocationOptionsQuery> locationOptions) { return getAllAreaCodesWithResponse(locationType, countryCode, phonePlanId, locationOptions, null); } Mono<Response<AreaCodes>> getAllAreaCodesWithResponse( String locationType, String countryCode, String phonePlanId, List<LocationOptionsQuery> locationOptions, Context context) { Objects.requireNonNull(locationType, "'locationType' cannot be null."); Objects.requireNonNull(countryCode, "'countryCode' cannot be null."); Objects.requireNonNull(phonePlanId, "'phonePlanId' cannot be null."); LocationOptionsQueries locationOptionsQueries = new LocationOptionsQueries(); locationOptionsQueries.setLocationOptions(locationOptions); try { if (context == null) { return phoneNumberAdministrations.getAllAreaCodesWithResponseAsync( locationType, countryCode, phonePlanId, locationOptionsQueries); } else { return phoneNumberAdministrations.getAllAreaCodesWithResponseAsync( locationType, countryCode, phonePlanId, locationOptionsQueries, context); } } catch (RuntimeException ex) { return monoError(logger, ex); } } /** * Gets the information for a phone number capabilities update * * @param capabilitiesId ID of the capabilities update. * @return A {@link Mono} containing * a {@link UpdatePhoneNumberCapabilitiesResponse} representing the capabilities update. */ @ServiceMethod(returns = ReturnType.SINGLE) public Mono<UpdatePhoneNumberCapabilitiesResponse> getCapabilitiesUpdate(String capabilitiesId) { return getCapabilitiesUpdateWithResponse(capabilitiesId).flatMap(FluxUtil::toMono); } /** * Gets the information for a phone number capabilities update * * @param capabilitiesId ID of the capabilities update. * @return A {@link Mono} containing a {@link Response} whose {@link Response * a {@link UpdatePhoneNumberCapabilitiesResponse} representing the capabilities update. */ @ServiceMethod(returns = ReturnType.SINGLE) public Mono<Response<UpdatePhoneNumberCapabilitiesResponse>> getCapabilitiesUpdateWithResponse( String capabilitiesId) { return getCapabilitiesUpdateWithResponse(capabilitiesId, null); } Mono<Response<UpdatePhoneNumberCapabilitiesResponse>> getCapabilitiesUpdateWithResponse( String capabilitiesId, Context context) { Objects.requireNonNull(capabilitiesId, "'capabilitiesId' cannot be null."); try { if (context == null) { return phoneNumberAdministrations.getCapabilitiesUpdateWithResponseAsync(capabilitiesId); } else { return phoneNumberAdministrations.getCapabilitiesUpdateWithResponseAsync(capabilitiesId, context); } } catch (RuntimeException ex) { return monoError(logger, ex); } } /** * Adds or removes phone number capabilities. * * @param phoneNumberCapabilitiesUpdate {@link Map} with the updates to perform * @return A {@link Mono} containing * a {@link UpdatePhoneNumberCapabilitiesResponse} representing the capabilities update. */ @ServiceMethod(returns = ReturnType.SINGLE) public Mono<UpdateNumberCapabilitiesResponse> updateCapabilities( Map<PhoneNumber, NumberUpdateCapabilities> phoneNumberCapabilitiesUpdate) { return updateCapabilitiesWithResponse(phoneNumberCapabilitiesUpdate).flatMap(FluxUtil::toMono); } /** * Adds or removes phone number capabilities. * * @param phoneNumberCapabilitiesUpdate {@link Map} with the updates to perform * @return A {@link Mono} containing a {@link Response} whose {@link Response * a {@link UpdatePhoneNumberCapabilitiesResponse} representing the capabilities update. */ @ServiceMethod(returns = ReturnType.SINGLE) public Mono<Response<UpdateNumberCapabilitiesResponse>> updateCapabilitiesWithResponse( Map<PhoneNumber, NumberUpdateCapabilities> phoneNumberCapabilitiesUpdate) { return updateCapabilitiesWithResponse(phoneNumberCapabilitiesUpdate, null); } Mono<Response<UpdateNumberCapabilitiesResponse>> updateCapabilitiesWithResponse( Map<PhoneNumber, NumberUpdateCapabilities> phoneNumberCapabilitiesUpdate, Context context) { Objects.requireNonNull(phoneNumberCapabilitiesUpdate, "'phoneNumberCapabilitiesUpdate' cannot be null."); Map<String, NumberUpdateCapabilities> capabilitiesMap = new HashMap<>(); for (Map.Entry<PhoneNumber, NumberUpdateCapabilities> entry : phoneNumberCapabilitiesUpdate.entrySet()) { capabilitiesMap.put(entry.getKey().getValue(), entry.getValue()); } UpdateNumberCapabilitiesRequest updateNumberCapabilitiesRequest = new UpdateNumberCapabilitiesRequest(); updateNumberCapabilitiesRequest.setPhoneNumberCapabilitiesUpdate(capabilitiesMap); try { if (context == null) { return phoneNumberAdministrations.updateCapabilitiesWithResponseAsync( updateNumberCapabilitiesRequest); } else { return phoneNumberAdministrations.updateCapabilitiesWithResponseAsync( updateNumberCapabilitiesRequest, context); } } catch (RuntimeException ex) { return monoError(logger, ex); } } /** * Gets a list of supported countries. * * @param locale A language-locale pairing which will be used to localise the names of countries. * @return A {@link PagedFlux} of {@link PhoneNumberCountry} instances representing supported countries. */ @ServiceMethod(returns = ReturnType.COLLECTION) public PagedFlux<PhoneNumberCountry> listAllSupportedCountries(String locale) { return listAllSupportedCountries(locale, null); } PagedFlux<PhoneNumberCountry> listAllSupportedCountries(String locale, Context context) { try { if (context == null) { return phoneNumberAdministrations.getAllSupportedCountriesAsync(locale, null, null); } else { return phoneNumberAdministrations.getAllSupportedCountriesAsync(locale, null, null, context); } } catch (RuntimeException ex) { return pagedFluxError(logger, ex); } } /** * Gets the configuration of a given phone number. * * @param phoneNumber A {@link PhoneNumber} representing the phone number. * @return A {@link Mono} containing a {@link NumberConfigurationResponse} representing the configuration. */ @ServiceMethod(returns = ReturnType.SINGLE) public Mono<NumberConfigurationResponse> getNumberConfiguration(PhoneNumber phoneNumber) { return getNumberConfigurationWithResponse(phoneNumber).flatMap(FluxUtil::toMono); } /** * Gets the configuration of a given phone number. * * @param phoneNumber A {@link PhoneNumber} representing the phone number. * @return A {@link Mono} containing a {@link Response} whose {@link Response * a {@link NumberConfigurationResponse} representing the configuration. */ @ServiceMethod(returns = ReturnType.SINGLE) public Mono<Response<NumberConfigurationResponse>> getNumberConfigurationWithResponse(PhoneNumber phoneNumber) { return getNumberConfigurationWithResponse(phoneNumber, null); } Mono<Response<NumberConfigurationResponse>> getNumberConfigurationWithResponse( PhoneNumber phoneNumber, Context context) { Objects.requireNonNull(phoneNumber, "'phoneNumber' cannot be null."); NumberConfigurationPhoneNumber configurationPhoneNumber = new NumberConfigurationPhoneNumber(); configurationPhoneNumber.setPhoneNumber(phoneNumber.getValue()); try { if (context == null) { return phoneNumberAdministrations.getNumberConfigurationWithResponseAsync( configurationPhoneNumber); } else { return phoneNumberAdministrations.getNumberConfigurationWithResponseAsync( configurationPhoneNumber, context); } } catch (RuntimeException ex) { return monoError(logger, ex); } } /** * Associates a phone number with a PSTN Configuration. * * @param phoneNumber A {@link PhoneNumber} representing the phone number. * @param pstnConfiguration A {@link PstnConfiguration} containing the pstn number configuration options. * @return A {@link Mono} for the asynchronous return */ @ServiceMethod(returns = ReturnType.SINGLE) public Mono<Void> configureNumber(PhoneNumber phoneNumber, PstnConfiguration pstnConfiguration) { return configureNumberWithResponse(phoneNumber, pstnConfiguration).flatMap(FluxUtil::toMono); } /** * Associates a phone number with a PSTN Configuration. * * @param phoneNumber A {@link PhoneNumber} representing the phone number. * @param pstnConfiguration A {@link PstnConfiguration} containing the pstn number configuration options. * @return A {@link Mono} containing a {@link Response} for the operation. */ @ServiceMethod(returns = ReturnType.SINGLE) public Mono<Response<Void>> configureNumberWithResponse( PhoneNumber phoneNumber, PstnConfiguration pstnConfiguration) { return configureNumberWithResponse(phoneNumber, pstnConfiguration, null); } Mono<Response<Void>> configureNumberWithResponse( PhoneNumber phoneNumber, PstnConfiguration pstnConfiguration, Context context) { Objects.requireNonNull(phoneNumber, "'phoneNumber' cannot be null."); Objects.requireNonNull(pstnConfiguration, "'pstnConfiguration' cannot be null."); NumberConfiguration numberConfiguration = new NumberConfiguration(); numberConfiguration.setPhoneNumber(phoneNumber.getValue()).setPstnConfiguration(pstnConfiguration); try { if (context == null) { return phoneNumberAdministrations.configureNumberWithResponseAsync(numberConfiguration); } else { return phoneNumberAdministrations.configureNumberWithResponseAsync(numberConfiguration, context); } } catch (RuntimeException ex) { return monoError(logger, ex); } } /** * Removes the PSTN Configuration from a phone number. * * @param phoneNumber A {@link PhoneNumber} representing the phone number. * @return A {@link Mono} for the asynchronous return */ @ServiceMethod(returns = ReturnType.SINGLE) public Mono<Void> unconfigureNumber(PhoneNumber phoneNumber) { return unconfigureNumberWithResponse(phoneNumber).flatMap(FluxUtil::toMono); } /** * Removes the PSTN Configuration from a phone number. * * @param phoneNumber A {@link PhoneNumber} representing the phone number. * @return A {@link Mono} containing a {@link Response} for the operation. */ @ServiceMethod(returns = ReturnType.SINGLE) public Mono<Response<Void>> unconfigureNumberWithResponse(PhoneNumber phoneNumber) { return unconfigureNumberWithResponse(phoneNumber, null); } Mono<Response<Void>> unconfigureNumberWithResponse(PhoneNumber phoneNumber, Context context) { Objects.requireNonNull(phoneNumber, "'phoneNumber' cannot be null."); NumberConfigurationPhoneNumber configurationPhoneNumber = new NumberConfigurationPhoneNumber(); configurationPhoneNumber.setPhoneNumber(phoneNumber.getValue()); try { if (context == null) { return phoneNumberAdministrations.unconfigureNumberWithResponseAsync(configurationPhoneNumber); } else { return phoneNumberAdministrations.unconfigureNumberWithResponseAsync(configurationPhoneNumber, context); } } catch (RuntimeException ex) { return monoError(logger, ex); } } /** * Gets a list of phone plan groups for the given country. * * @param countryCode The ISO 3166-2 country code. * @param locale A language-locale pairing which will be used to localise the names of countries. * @param includeRateInformation Flag to indicate if rate information should be returned. * @return A {@link PagedFlux} of {@link PhonePlanGroup} instances representing phone plan groups */ @ServiceMethod(returns = ReturnType.COLLECTION) public PagedFlux<PhonePlanGroup> listPhonePlanGroups( String countryCode, String locale, Boolean includeRateInformation) { return listPhonePlanGroups(countryCode, locale, includeRateInformation, null); } PagedFlux<PhonePlanGroup> listPhonePlanGroups( String countryCode, String locale, Boolean includeRateInformation, Context context) { Objects.requireNonNull(countryCode, "'countryCode' cannot be null."); try { if (context == null) { return phoneNumberAdministrations.getPhonePlanGroupsAsync( countryCode, locale, includeRateInformation, null, null); } else { return phoneNumberAdministrations.getPhonePlanGroupsAsync( countryCode, locale, includeRateInformation, null, null, context); } } catch (RuntimeException ex) { return pagedFluxError(logger, ex); } } /** * Gets a list of phone plans for a phone plan group * * @param countryCode The ISO 3166-2 country code. * @param phonePlanGroupId ID of the Phone Plan Group * @param locale A language-locale pairing which will be used to localise the names of countries. * @return A {@link PagedFlux} of {@link PhonePlan} instances representing phone plans */ @ServiceMethod(returns = ReturnType.COLLECTION) public PagedFlux<PhonePlan> listPhonePlans(String countryCode, String phonePlanGroupId, String locale) { return listPhonePlans(countryCode, phonePlanGroupId, locale, null); } PagedFlux<PhonePlan> listPhonePlans(String countryCode, String phonePlanGroupId, String locale, Context context) { Objects.requireNonNull(countryCode, "'countryCode' cannot be null."); Objects.requireNonNull(phonePlanGroupId, "'phonePlanGroupId' cannot be null."); try { if (context == null) { return phoneNumberAdministrations.getPhonePlansAsync( countryCode, phonePlanGroupId, locale, null, null); } else { return phoneNumberAdministrations.getPhonePlansAsync( countryCode, phonePlanGroupId, locale, null, null, context); } } catch (RuntimeException ex) { return pagedFluxError(logger, ex); } } /** * Gets the location options for a phone plan. * * @param countryCode The ISO 3166-2 country code. * @param phonePlanGroupId ID of the Phone Plan Group * @param phonePlanId ID of the Phone Plan * @param locale A language-locale pairing which will be used to localise the names of countries. * @return A {@link Mono} containing a {@link LocationOptionsResponse} representing the location options */ @ServiceMethod(returns = ReturnType.SINGLE) public Mono<LocationOptionsResponse> getPhonePlanLocationOptions( String countryCode, String phonePlanGroupId, String phonePlanId, String locale) { return getPhonePlanLocationOptionsWithResponse(countryCode, phonePlanGroupId, phonePlanId, locale) .flatMap(FluxUtil::toMono); } /** * Gets the location options for a phone plan. * * @param countryCode The ISO 3166-2 country code. * @param phonePlanGroupId ID of the Phone Plan Group * @param phonePlanId ID of the Phone Plan * @param locale A language-locale pairing which will be used to localise the names of countries. * @return A {@link Mono} containing a {@link Response} whose {@link Response * a {@link LocationOptionsResponse} representing the location options */ @ServiceMethod(returns = ReturnType.SINGLE) public Mono<Response<LocationOptionsResponse>> getPhonePlanLocationOptionsWithResponse( String countryCode, String phonePlanGroupId, String phonePlanId, String locale) { return getPhonePlanLocationOptionsWithResponse( countryCode, phonePlanGroupId, phonePlanId, locale, null); } Mono<Response<LocationOptionsResponse>> getPhonePlanLocationOptionsWithResponse( String countryCode, String phonePlanGroupId, String phonePlanId, String locale, Context context) { Objects.requireNonNull(countryCode, "'countryCode' cannot be null."); Objects.requireNonNull(phonePlanGroupId, "'phonePlanGroupId' cannot be null."); Objects.requireNonNull(phonePlanId, "'phonePlanId' cannot be null."); try { if (context == null) { return phoneNumberAdministrations.getPhonePlanLocationOptionsWithResponseAsync( countryCode, phonePlanGroupId, phonePlanId, locale); } else { return phoneNumberAdministrations.getPhonePlanLocationOptionsWithResponseAsync( countryCode, phonePlanGroupId, phonePlanId, locale, context); } } catch (RuntimeException ex) { return monoError(logger, ex); } } /** * Gets a release by ID. * * @param releaseId ID of the Release * @return A {@link Mono} containing a {@link PhoneNumberRelease} representing the release. */ @ServiceMethod(returns = ReturnType.SINGLE) public Mono<PhoneNumberRelease> getReleaseById(String releaseId) { return getReleaseByIdWithResponse(releaseId).flatMap(FluxUtil::toMono); } /** * Gets a release by ID. * * @param releaseId ID of the Release * @return A {@link Mono} containing a {@link Response} whose {@link Response * a {@link PhoneNumberRelease} representing the release. */ @ServiceMethod(returns = ReturnType.SINGLE) public Mono<Response<PhoneNumberRelease>> getReleaseByIdWithResponse(String releaseId) { return getReleaseByIdWithResponse(releaseId, null); } Mono<Response<PhoneNumberRelease>> getReleaseByIdWithResponse(String releaseId, Context context) { Objects.requireNonNull(releaseId, "'releaseId' cannot be null."); try { if (context == null) { return phoneNumberAdministrations.getReleaseByIdWithResponseAsync(releaseId); } else { return phoneNumberAdministrations.getReleaseByIdWithResponseAsync(releaseId, context); } } catch (RuntimeException ex) { return monoError(logger, ex); } } /** * Creates a release for the given phone numbers. * * @param phoneNumbers {@link List} of {@link PhoneNumber} objects with the phone numbers. * @return A {@link Mono} containing a {@link ReleaseResponse} representing the release. */ @ServiceMethod(returns = ReturnType.SINGLE) public Mono<ReleaseResponse> releasePhoneNumbers(List<PhoneNumber> phoneNumbers) { return releasePhoneNumbersWithResponse(phoneNumbers).flatMap(FluxUtil::toMono); } /** * Creates a release for the given phone numbers. * * @param phoneNumbers {@link List} of {@link PhoneNumber} objects with the phone numbers. * @return A {@link Mono} containing a {@link Response} whose {@link Response * a {@link ReleaseResponse} representing the release. */ @ServiceMethod(returns = ReturnType.SINGLE) public Mono<Response<ReleaseResponse>> releasePhoneNumbersWithResponse(List<PhoneNumber> phoneNumbers) { return releasePhoneNumbersWithResponse(phoneNumbers, null); } Mono<Response<ReleaseResponse>> releasePhoneNumbersWithResponse(List<PhoneNumber> phoneNumbers, Context context) { Objects.requireNonNull(phoneNumbers, "'phoneNumbers' cannot be null."); List<String> phoneNumberStrings = phoneNumbers.stream().map(PhoneNumber::getValue).collect(Collectors.toList()); ReleaseRequest releaseRequest = new ReleaseRequest(); releaseRequest.setPhoneNumbers(phoneNumberStrings); try { if (context == null) { return phoneNumberAdministrations.releasePhoneNumbersWithResponseAsync(releaseRequest); } else { return phoneNumberAdministrations.releasePhoneNumbersWithResponseAsync(releaseRequest, context); } } catch (RuntimeException ex) { return monoError(logger, ex); } } /** * Gets the list of all releases * * @return A {@link PagedFlux} of {@link PhoneNumberEntity} instances representing releases. */ @ServiceMethod(returns = ReturnType.COLLECTION) public PagedFlux<PhoneNumberEntity> listAllReleases() { return listAllReleases(null); } PagedFlux<PhoneNumberEntity> listAllReleases(Context context) { try { if (context == null) { return phoneNumberAdministrations.getAllReleasesAsync(null, null); } else { return phoneNumberAdministrations.getAllReleasesAsync(null, null, context); } } catch (RuntimeException ex) { return pagedFluxError(logger, ex); } } /** * Gets a search by ID. * * @param searchId ID of the search * @return A {@link Mono} containing a {@link PhoneNumberSearch} representing the search. */ @ServiceMethod(returns = ReturnType.SINGLE) public Mono<PhoneNumberSearch> getSearchById(String searchId) { return getSearchByIdWithResponse(searchId).flatMap(FluxUtil::toMono); } /** * Gets a search by ID. * * @param searchId ID of the search * @return A {@link Mono} containing a {@link Response} whose {@link Response * a {@link PhoneNumberSearch} representing the search. */ @ServiceMethod(returns = ReturnType.SINGLE) public Mono<Response<PhoneNumberSearch>> getSearchByIdWithResponse(String searchId) { return getSearchByIdWithResponse(searchId, null); } Mono<Response<PhoneNumberSearch>> getSearchByIdWithResponse(String searchId, Context context) { Objects.requireNonNull(searchId, "'searchId' cannot be null."); try { if (context == null) { return phoneNumberAdministrations.getSearchByIdWithResponseAsync(searchId); } else { return phoneNumberAdministrations.getSearchByIdWithResponseAsync(searchId, context); } } catch (RuntimeException ex) { return monoError(logger, ex); } } /** * Create a phone number search. * * @param searchOptions A {@link CreateSearchOptions} with the search options * @return A {@link Mono} containing a {@link CreateSearchResponse} representing the search. */ @ServiceMethod(returns = ReturnType.SINGLE) public Mono<CreateSearchResponse> createSearch(CreateSearchOptions searchOptions) { return createSearchWithResponse(searchOptions).flatMap(FluxUtil::toMono); } /** * Create a phone number search. * * @param searchOptions A {@link CreateSearchOptions} with the search options * @return A {@link Mono} containing a {@link Response} whose {@link Response * a {@link CreateSearchResponse} representing the search. */ @ServiceMethod(returns = ReturnType.SINGLE) public Mono<Response<CreateSearchResponse>> createSearchWithResponse(CreateSearchOptions searchOptions) { return createSearchWithResponse(searchOptions, null); } Mono<Response<CreateSearchResponse>> createSearchWithResponse(CreateSearchOptions searchOptions, Context context) { Objects.requireNonNull(searchOptions, "'searchOptions' cannot be null."); try { if (context == null) { return phoneNumberAdministrations.createSearchWithResponseAsync(searchOptions); } else { return phoneNumberAdministrations.createSearchWithResponseAsync(searchOptions, context); } } catch (RuntimeException ex) { return monoError(logger, ex); } } /** * Gets the list of all searches * * @return A {@link PagedFlux} of {@link PhoneNumberEntity} instances representing searches. */ @ServiceMethod(returns = ReturnType.COLLECTION) public PagedFlux<PhoneNumberEntity> listAllSearches() { return listAllSearches(null); } PagedFlux<PhoneNumberEntity> listAllSearches(Context context) { try { if (context == null) { return phoneNumberAdministrations.getAllSearchesAsync(null, null); } else { return phoneNumberAdministrations.getAllSearchesAsync(null, null, context); } } catch (RuntimeException ex) { return pagedFluxError(logger, ex); } } /** * Cancels the search. This means existing numbers in the search will be made available. * * @param searchId ID of the search * @return A {@link Mono} for the asynchronous return */ @ServiceMethod(returns = ReturnType.SINGLE) public Mono<Void> cancelSearch(String searchId) { return cancelSearchWithResponse(searchId).flatMap(FluxUtil::toMono); } /** * Cancels the search. This means existing numbers in the search will be made available. * * @param searchId ID of the search * @return A {@link Mono} containing a {@link Response} for the operation */ @ServiceMethod(returns = ReturnType.SINGLE) public Mono<Response<Void>> cancelSearchWithResponse(String searchId) { return cancelSearchWithResponse(searchId, null); } Mono<Response<Void>> cancelSearchWithResponse(String searchId, Context context) { Objects.requireNonNull(searchId, "'searchId' cannot be null."); try { if (context == null) { return phoneNumberAdministrations.cancelSearchWithResponseAsync(searchId); } else { return phoneNumberAdministrations.cancelSearchWithResponseAsync(searchId, context); } } catch (RuntimeException ex) { return monoError(logger, ex); } } /** * Purchases the phone number search. * * @param searchId ID of the search * @return A {@link Mono} for the asynchronous return */ private Mono<Void> purchaseSearch(String searchId) { return purchaseSearchWithResponse(searchId).flatMap(FluxUtil::toMono); } /** * Purchases the phone number search. * * @param searchId ID of the search * @return A {@link Mono} containing a {@link Response} for the operation */ private Mono<Response<Void>> purchaseSearchWithResponse(String searchId) { return purchaseSearchWithResponse(searchId, null); } private Mono<Response<Void>> purchaseSearchWithResponse(String searchId, Context context) { Objects.requireNonNull(searchId, "'searchId' cannot be null."); try { if (context == null) { return phoneNumberAdministrations.purchaseSearchWithResponseAsync(searchId); } else { return phoneNumberAdministrations.purchaseSearchWithResponseAsync(searchId, context); } } catch (RuntimeException ex) { return monoError(logger, ex); } } /** * Initiates a search and returns a {@link PhoneNumberSearch} usable by other functions * This function returns a Long Running Operation poller that allows you to * wait indefinitely until the operation is complete. * * @param options A {@link CreateSearchOptions} with the search options * @param pollInterval The time our long running operation will keep on polling * until it gets a result from the server * @return A {@link PollerFlux} object with the search result */ @ServiceMethod(returns = ReturnType.COLLECTION) public PollerFlux<PhoneNumberSearch, PhoneNumberSearch> beginCreateSearch( CreateSearchOptions options, Duration pollInterval) { Objects.requireNonNull(options, "'options' cannot be null."); if (pollInterval == null) { pollInterval = defaultPollInterval; } return new PollerFlux<PhoneNumberSearch, PhoneNumberSearch>(pollInterval, createSearchActivationOperation(options), createSearchPollOperation(), cancelSearchOperation(), createSearchFetchResultOperation()); } private Function<PollingContext<PhoneNumberSearch>, Mono<PhoneNumberSearch>> createSearchActivationOperation(CreateSearchOptions options) { return (pollingContext) -> { Mono<PhoneNumberSearch> response = createSearch(options).flatMap(createSearchResponse -> { String searchId = createSearchResponse.getSearchId(); Mono<PhoneNumberSearch> phoneNumberSearch = getSearchById(searchId); return phoneNumberSearch; }); return response; }; } private Function<PollingContext<PhoneNumberSearch>, Mono<PollResponse<PhoneNumberSearch>>> createSearchPollOperation() { return pollingContext -> getSearchById(pollingContext.getLatestResponse().getValue().getSearchId()) .flatMap(getSearchResponse -> { SearchStatus status = getSearchResponse.getStatus(); if (status.equals(SearchStatus.EXPIRED) || status.equals(SearchStatus.CANCELLED) || status.equals(SearchStatus.RESERVED)) { return Mono.just(new PollResponse<>( LongRunningOperationStatus.SUCCESSFULLY_COMPLETED, getSearchResponse)); } if (status.equals(SearchStatus.ERROR)) { return Mono.just(new PollResponse<>( LongRunningOperationStatus.FAILED, getSearchResponse)); } return Mono.just(new PollResponse<>(LongRunningOperationStatus.IN_PROGRESS, getSearchResponse)); }); } private BiFunction<PollingContext<PhoneNumberSearch>, PollResponse<PhoneNumberSearch>, Mono<PhoneNumberSearch>> cancelSearchOperation() { return (pollingContext, firstResponse) -> { cancelSearch(pollingContext.getLatestResponse().getValue().getSearchId()); return Mono.just(pollingContext.getLatestResponse().getValue()); }; } private Function<PollingContext<PhoneNumberSearch>, Mono<PhoneNumberSearch>> createSearchFetchResultOperation() { return pollingContext -> { return Mono.just(pollingContext.getLatestResponse().getValue()); }; } /** * Initiates a purchase process and polls until a terminal state is reached * This function returns a Long Running Operation poller that allows you to * wait indefinitely until the operation is complete. * * @param searchId ID of the search * @param pollInterval The time our long running operation will keep on polling * until it gets a result from the server * @return A {@link PollerFlux} object. */ @ServiceMethod(returns = ReturnType.COLLECTION) public PollerFlux<Void, Void> beginPurchaseSearch(String searchId, Duration pollInterval) { Objects.requireNonNull(searchId, "'searchId' can not be null."); if (pollInterval == null) { pollInterval = defaultPollInterval; } return new PollerFlux<Void, Void>(pollInterval, purchaseSearchActivationOperation(searchId), purchaseSearchPollOperation(searchId), (activationResponse, pollingContext) -> Mono.error( private Function<PollingContext<Void>, Mono<PollResponse<Void>>> purchaseSearchPollOperation(String searchId) { return (pollingContext) -> getSearchById(searchId) .flatMap(getSearchResponse -> { SearchStatus statusResponse = getSearchResponse.getStatus(); if (statusResponse.equals(SearchStatus.SUCCESS)) { return Mono.just(new PollResponse<>( LongRunningOperationStatus.SUCCESSFULLY_COMPLETED, null)); } if (statusResponse.equals(SearchStatus.ERROR) || statusResponse.equals(SearchStatus.EXPIRED)) { return Mono.just(new PollResponse<>( LongRunningOperationStatus.FAILED, null)); } return Mono.just(new PollResponse<>(LongRunningOperationStatus.IN_PROGRESS, null)); }); } private Function<PollingContext<Void>, Mono<Void>> purchaseSearchFetchResultOperation() { return pollingContext -> { return Mono.empty(); }; } }
I think we should also check that there aren't existing etag conditions. If they pass in an old etag that they want to target, and we just got a new one possibly after some updates, we might not be reading the version they requested.
public BlobInputStream openInputStream(BlobInputStreamOptions options) { options = options == null ? new BlobInputStreamOptions() : options; BlobProperties properties = getProperties(); BlobRange range = options.getRange() == null ? new BlobRange(0) : options.getRange(); int chunkSize = options.getBlockSize() == null ? 4 * Constants.MB : options.getBlockSize(); BlobRequestConditions requestConditions = options.getRequestConditions() == null ? new BlobRequestConditions() : options.getRequestConditions(); requestConditions.setIfMatch(properties.getETag()); return new BlobInputStream(client, range.getOffset(), range.getCount(), chunkSize, requestConditions, properties); }
requestConditions.setIfMatch(properties.getETag());
public BlobInputStream openInputStream(BlobInputStreamOptions options) { options = options == null ? new BlobInputStreamOptions() : options; BlobProperties properties = getProperties(); BlobRange range = options.getRange() == null ? new BlobRange(0) : options.getRange(); int chunkSize = options.getBlockSize() == null ? 4 * Constants.MB : options.getBlockSize(); BlobRequestConditions requestConditions = options.getRequestConditions() == null ? new BlobRequestConditions() : options.getRequestConditions(); if (requestConditions.getIfMatch() == null) { requestConditions.setIfMatch(properties.getETag()); } return new BlobInputStream(client, range.getOffset(), range.getCount(), chunkSize, requestConditions, properties); }
class BlobClientBase { private final ClientLogger logger = new ClientLogger(BlobClientBase.class); private final BlobAsyncClientBase client; /** * Constructor used by {@link SpecializedBlobClientBuilder}. * * @param client the async blob client */ protected BlobClientBase(BlobAsyncClientBase client) { this.client = client; } /** * Creates a new {@link BlobClientBase} linked to the {@code snapshot} of this blob resource. * * @param snapshot the identifier for a specific snapshot of this blob * @return a {@link BlobClientBase} used to interact with the specific snapshot. */ public BlobClientBase getSnapshotClient(String snapshot) { return new BlobClientBase(client.getSnapshotClient(snapshot)); } /** * Creates a new {@link BlobClientBase} linked to the {@code version} of this blob resource. * * @param versionId the identifier for a specific version of this blob, * pass {@code null} to interact with the latest blob version. * @return a {@link BlobClientBase} used to interact with the specific version. */ public BlobClientBase getVersionClient(String versionId) { return new BlobClientBase(client.getVersionClient(versionId)); } /** * Gets the URL of the blob represented by this client. * * @return the URL. */ public String getBlobUrl() { return client.getBlobUrl(); } /** * Get associated account name. * * @return account name associated with this storage resource. */ public String getAccountName() { return client.getAccountName(); } /** * Get the container name. * * <p><strong>Code Samples</strong></p> * * {@codesnippet com.azure.storage.blob.specialized.BlobClientBase.getContainerName} * * @return The name of the container. */ public final String getContainerName() { return client.getContainerName(); } /** * Decodes and gets the blob name. * * <p><strong>Code Samples</strong></p> * * {@codesnippet com.azure.storage.blob.specialized.BlobClientBase.getBlobName} * * @return The decoded name of the blob. */ public final String getBlobName() { return client.getBlobName(); } /** * Gets the {@link HttpPipeline} powering this client. * * @return The pipeline. */ public HttpPipeline getHttpPipeline() { return client.getHttpPipeline(); } /** * Gets the {@link CpkInfo} used to encrypt this blob's content on the server. * * @return the customer provided key used for encryption. */ public CpkInfo getCustomerProvidedKey() { return client.getCustomerProvidedKey(); } /** * Gets the {@code encryption scope} used to encrypt this blob's content on the server. * * @return the encryption scope used for encryption. */ String getEncryptionScope() { return client.getEncryptionScope(); } /** * Gets the service version the client is using. * * @return the service version the client is using. */ public BlobServiceVersion getServiceVersion() { return client.getServiceVersion(); } /** * Gets the snapshotId for a blob resource * * @return A string that represents the snapshotId of the snapshot blob */ public String getSnapshotId() { return client.getSnapshotId(); } /** * Gets the versionId for a blob resource * * @return A string that represents the versionId of the snapshot blob */ public String getVersionId() { return client.getVersionId(); } /** * Determines if a blob is a snapshot * * @return A boolean that indicates if a blob is a snapshot */ public boolean isSnapshot() { return client.isSnapshot(); } /** * Opens a blob input stream to download the blob. * <p> * * @return An <code>InputStream</code> object that represents the stream to use for reading from the blob. * @throws BlobStorageException If a storage service error occurred. */ public final BlobInputStream openInputStream() { return openInputStream(null, null); } /** * Opens a blob input stream to download the specified range of the blob. * <p> * * @param range {@link BlobRange} * @param requestConditions An {@link BlobRequestConditions} object that represents the access conditions for the * blob. * @return An <code>InputStream</code> object that represents the stream to use for reading from the blob. * @throws BlobStorageException If a storage service error occurred. */ public final BlobInputStream openInputStream(BlobRange range, BlobRequestConditions requestConditions) { return openInputStream(new BlobInputStreamOptions().setRange(range).setRequestConditions(requestConditions)); } /** * Opens a blob input stream to download the specified range of the blob. * * @param options {@link BlobInputStreamOptions} * @return An <code>InputStream</code> object that represents the stream to use for reading from the blob. * @throws BlobStorageException If a storage service error occurred. */ /** * Gets if the blob this client represents exists in the cloud. * * <p><strong>Code Samples</strong></p> * * {@codesnippet com.azure.storage.blob.specialized.BlobClientBase.exists} * * @return true if the blob exists, false if it doesn't */ public Boolean exists() { return existsWithResponse(null, Context.NONE).getValue(); } /** * Gets if the blob this client represents exists in the cloud. * * <p><strong>Code Samples</strong></p> * * {@codesnippet com.azure.storage.blob.specialized.BlobClientBase.existsWithResponse * * @param timeout An optional timeout value beyond which a {@link RuntimeException} will be raised. * @param context Additional context that is passed through the Http pipeline during the service call. * @return true if the blob exists, false if it doesn't */ public Response<Boolean> existsWithResponse(Duration timeout, Context context) { Mono<Response<Boolean>> response = client.existsWithResponse(context); return blockWithOptionalTimeout(response, timeout); } /** * Copies the data at the source URL to a blob. * <p> * This method triggers a long-running, asynchronous operations. The source may be another blob or an Azure File. If * the source is in another account, the source must either be public or authenticated with a SAS token. If the * source is in the same account, the Shared Key authorization on the destination will also be applied to the * source. The source URL must be URL encoded. * * <p><strong>Code Samples</strong></p> * * {@codesnippet com.azure.storage.blob.specialized.BlobClientBase.beginCopy * * <p>For more information, see the * <a href="https: * * @param sourceUrl The source URL to copy from. URLs outside of Azure may only be copied to block blobs. * @param pollInterval Duration between each poll for the copy status. If none is specified, a default of one second * is used. * @return A {@link SyncPoller} to poll the progress of blob copy operation. */ public SyncPoller<BlobCopyInfo, Void> beginCopy(String sourceUrl, Duration pollInterval) { return beginCopy(sourceUrl, null, null, null, null, null, pollInterval); } /** * Copies the data at the source URL to a blob. * <p> * This method triggers a long-running, asynchronous operations. The source may be another blob or an Azure File. If * the source is in another account, the source must either be public or authenticated with a SAS token. If the * source is in the same account, the Shared Key authorization on the destination will also be applied to the * source. The source URL must be URL encoded. * * <p><strong>Code Samples</strong></p> * * {@codesnippet com.azure.storage.blob.specialized.BlobClientBase.beginCopy * * <p>For more information, see the * <a href="https: * * @param sourceUrl The source URL to copy from. URLs outside of Azure may only be copied to block blobs. * @param metadata Metadata to associate with the destination blob. * @param tier {@link AccessTier} for the destination blob. * @param priority {@link RehydratePriority} for rehydrating the blob. * @param sourceModifiedRequestConditions {@link RequestConditions} against the source. Standard HTTP Access * conditions related to the modification of data. ETag and LastModifiedTime are used to construct conditions * related to when the blob was changed relative to the given request. The request will fail if the specified * condition is not satisfied. * @param destRequestConditions {@link BlobRequestConditions} against the destination. * @param pollInterval Duration between each poll for the copy status. If none is specified, a default of one second * is used. * @return A {@link SyncPoller} to poll the progress of blob copy operation. */ public SyncPoller<BlobCopyInfo, Void> beginCopy(String sourceUrl, Map<String, String> metadata, AccessTier tier, RehydratePriority priority, RequestConditions sourceModifiedRequestConditions, BlobRequestConditions destRequestConditions, Duration pollInterval) { return this.beginCopy(new BlobBeginCopyOptions(sourceUrl).setMetadata(metadata).setTier(tier) .setRehydratePriority(priority).setSourceRequestConditions( ModelHelper.populateBlobSourceRequestConditions(sourceModifiedRequestConditions)) .setDestinationRequestConditions(destRequestConditions).setPollInterval(pollInterval)); } /** * Copies the data at the source URL to a blob. * <p> * This method triggers a long-running, asynchronous operations. The source may be another blob or an Azure File. If * the source is in another account, the source must either be public or authenticated with a SAS token. If the * source is in the same account, the Shared Key authorization on the destination will also be applied to the * source. The source URL must be URL encoded. * * <p><strong>Code Samples</strong></p> * * {@codesnippet com.azure.storage.blob.specialized.BlobClientBase.beginCopy * * <p>For more information, see the * <a href="https: * * @param options {@link BlobBeginCopyOptions} * @return A {@link SyncPoller} to poll the progress of blob copy operation. */ public SyncPoller<BlobCopyInfo, Void> beginCopy(BlobBeginCopyOptions options) { return client.beginCopy(options).getSyncPoller(); } /** * Stops a pending copy that was previously started and leaves a destination blob with 0 length and metadata. * * <p><strong>Code Samples</strong></p> * * {@codesnippet com.azure.storage.blob.specialized.BlobClientBase.abortCopyFromUrl * * <p>For more information, see the * <a href="https: * * @param copyId The id of the copy operation to abort. */ public void abortCopyFromUrl(String copyId) { abortCopyFromUrlWithResponse(copyId, null, null, Context.NONE); } /** * Stops a pending copy that was previously started and leaves a destination blob with 0 length and metadata. * * <p><strong>Code Samples</strong></p> * * {@codesnippet com.azure.storage.blob.specialized.BlobClientBase.abortCopyFromUrlWithResponse * * <p>For more information, see the * <a href="https: * * @param copyId The id of the copy operation to abort. * @param leaseId The lease ID the active lease on the blob must match. * @param timeout An optional timeout value beyond which a {@link RuntimeException} will be raised. * @param context Additional context that is passed through the Http pipeline during the service call. * @return A response containing status code and HTTP headers. */ public Response<Void> abortCopyFromUrlWithResponse(String copyId, String leaseId, Duration timeout, Context context) { return blockWithOptionalTimeout(client.abortCopyFromUrlWithResponse(copyId, leaseId, context), timeout); } /** * Copies the data at the source URL to a blob and waits for the copy to complete before returning a response. * <p> * The source must be a block blob no larger than 256MB. The source must also be either public or have a sas token * attached. The URL must be URL encoded. * * <p><strong>Code Samples</strong></p> * * {@codesnippet com.azure.storage.blob.specialized.BlobClientBase.copyFromUrl * * <p>For more information, see the * <a href="https: * * @param copySource The source URL to copy from. * @return The copy ID for the long running operation. * @throws IllegalArgumentException If {@code copySource} is a malformed {@link URL}. */ public String copyFromUrl(String copySource) { return copyFromUrlWithResponse(copySource, null, null, null, null, null, Context.NONE).getValue(); } /** * Copies the data at the source URL to a blob and waits for the copy to complete before returning a response. * <p> * The source must be a block blob no larger than 256MB. The source must also be either public or have a sas token * attached. The URL must be URL encoded. * * <p><strong>Code Samples</strong></p> * * {@codesnippet com.azure.storage.blob.specialized.BlobClientBase.copyFromUrlWithResponse * * <p>For more information, see the * <a href="https: * * @param copySource The source URL to copy from. URLs outside of Azure may only be copied to block blobs. * @param metadata Metadata to associate with the destination blob. * @param tier {@link AccessTier} for the destination blob. * @param sourceModifiedRequestConditions {@link RequestConditions} against the source. Standard HTTP Access * conditions related to the modification of data. ETag and LastModifiedTime are used to construct conditions * related to when the blob was changed relative to the given request. The request will fail if the specified * condition is not satisfied. * @param destRequestConditions {@link BlobRequestConditions} against the destination. * @param timeout An optional timeout value beyond which a {@link RuntimeException} will be raised. * @param context Additional context that is passed through the Http pipeline during the service call. * @return The copy ID for the long running operation. * @throws IllegalArgumentException If {@code copySource} is a malformed {@link URL}. */ public Response<String> copyFromUrlWithResponse(String copySource, Map<String, String> metadata, AccessTier tier, RequestConditions sourceModifiedRequestConditions, BlobRequestConditions destRequestConditions, Duration timeout, Context context) { return this.copyFromUrlWithResponse(new BlobCopyFromUrlOptions(copySource).setMetadata(metadata) .setTier(tier).setSourceRequestConditions(sourceModifiedRequestConditions) .setDestinationRequestConditions(destRequestConditions), timeout, context); } /** * Copies the data at the source URL to a blob and waits for the copy to complete before returning a response. * <p> * The source must be a block blob no larger than 256MB. The source must also be either public or have a sas token * attached. The URL must be URL encoded. * * <p><strong>Code Samples</strong></p> * * {@codesnippet com.azure.storage.blob.specialized.BlobClientBase.copyFromUrlWithResponse * * <p>For more information, see the * <a href="https: * * @param options {@link BlobCopyFromUrlOptions} * @param timeout An optional timeout value beyond which a {@link RuntimeException} will be raised. * @param context Additional context that is passed through the Http pipeline during the service call. * @return The copy ID for the long running operation. * @throws IllegalArgumentException If {@code copySource} is a malformed {@link URL}. */ public Response<String> copyFromUrlWithResponse(BlobCopyFromUrlOptions options, Duration timeout, Context context) { Mono<Response<String>> response = client .copyFromUrlWithResponse(options, context); return blockWithOptionalTimeout(response, timeout); } /** * Downloads the entire blob into an output stream. Uploading data must be done from the {@link BlockBlobClient}, * {@link PageBlobClient}, or {@link AppendBlobClient}. * * <p><strong>Code Samples</strong></p> * * {@codesnippet com.azure.storage.blob.specialized.BlobClientBase.download * * <p>For more information, see the * <a href="https: * * @param stream A non-null {@link OutputStream} instance where the downloaded data will be written. * @throws UncheckedIOException If an I/O error occurs. * @throws NullPointerException if {@code stream} is null */ public void download(OutputStream stream) { downloadWithResponse(stream, null, null, null, false, null, Context.NONE); } /** * Downloads a range of bytes from a blob into an output stream. Uploading data must be done from the {@link * BlockBlobClient}, {@link PageBlobClient}, or {@link AppendBlobClient}. * * <p><strong>Code Samples</strong></p> * * {@codesnippet com.azure.storage.blob.specialized.BlobClientBase.downloadWithResponse * * <p>For more information, see the * <a href="https: * * @param stream A non-null {@link OutputStream} instance where the downloaded data will be written. * @param range {@link BlobRange} * @param options {@link DownloadRetryOptions} * @param requestConditions {@link BlobRequestConditions} * @param getRangeContentMd5 Whether the contentMD5 for the specified blob range should be returned. * @param timeout An optional timeout value beyond which a {@link RuntimeException} will be raised. * @param context Additional context that is passed through the Http pipeline during the service call. * @return A response containing status code and HTTP headers. * @throws UncheckedIOException If an I/O error occurs. * @throws NullPointerException if {@code stream} is null */ public BlobDownloadResponse downloadWithResponse(OutputStream stream, BlobRange range, DownloadRetryOptions options, BlobRequestConditions requestConditions, boolean getRangeContentMd5, Duration timeout, Context context) { StorageImplUtils.assertNotNull("stream", stream); Mono<BlobDownloadResponse> download = client .downloadWithResponse(range, options, requestConditions, getRangeContentMd5, context) .flatMap(response -> response.getValue().reduce(stream, (outputStream, buffer) -> { try { outputStream.write(FluxUtil.byteBufferToArray(buffer)); return outputStream; } catch (IOException ex) { throw logger.logExceptionAsError(Exceptions.propagate(new UncheckedIOException(ex))); } }).thenReturn(new BlobDownloadResponse(response))); return blockWithOptionalTimeout(download, timeout); } /** * Downloads the entire blob into a file specified by the path. * * <p>The file will be created and must not exist, if the file already exists a {@link FileAlreadyExistsException} * will be thrown.</p> * * <p><strong>Code Samples</strong></p> * * {@codesnippet com.azure.storage.blob.specialized.BlobClientBase.downloadToFile * * <p>For more information, see the * <a href="https: * * @param filePath A {@link String} representing the filePath where the downloaded data will be written. * @return The blob properties and metadata. * @throws UncheckedIOException If an I/O error occurs */ public BlobProperties downloadToFile(String filePath) { return downloadToFile(filePath, false); } /** * Downloads the entire blob into a file specified by the path. * * <p>If overwrite is set to false, the file will be created and must not exist, if the file already exists a * {@link FileAlreadyExistsException} will be thrown.</p> * * <p><strong>Code Samples</strong></p> * * {@codesnippet com.azure.storage.blob.specialized.BlobClientBase.downloadToFile * * <p>For more information, see the * <a href="https: * * @param filePath A {@link String} representing the filePath where the downloaded data will be written. * @param overwrite Whether or not to overwrite the file, should the file exist. * @return The blob properties and metadata. * @throws UncheckedIOException If an I/O error occurs */ public BlobProperties downloadToFile(String filePath, boolean overwrite) { Set<OpenOption> openOptions = null; if (overwrite) { openOptions = new HashSet<>(); openOptions.add(StandardOpenOption.CREATE); openOptions.add(StandardOpenOption.TRUNCATE_EXISTING); openOptions.add(StandardOpenOption.READ); openOptions.add(StandardOpenOption.WRITE); } return downloadToFileWithResponse(filePath, null, null, null, null, false, openOptions, null, Context.NONE) .getValue(); } /** * Downloads the entire blob into a file specified by the path. * * <p>The file will be created and must not exist, if the file already exists a {@link FileAlreadyExistsException} * will be thrown.</p> * * <p><strong>Code Samples</strong></p> * * {@codesnippet com.azure.storage.blob.specialized.BlobClientBase.downloadToFileWithResponse * * <p>For more information, see the * <a href="https: * * @param filePath A {@link String} representing the filePath where the downloaded data will be written. * @param range {@link BlobRange} * @param parallelTransferOptions {@link ParallelTransferOptions} to use to download to file. Number of parallel * transfers parameter is ignored. * @param downloadRetryOptions {@link DownloadRetryOptions} * @param requestConditions {@link BlobRequestConditions} * @param rangeGetContentMd5 Whether the contentMD5 for the specified blob range should be returned. * @param timeout An optional timeout value beyond which a {@link RuntimeException} will be raised. * @param context Additional context that is passed through the Http pipeline during the service call. * @return A response containing the blob properties and metadata. * @throws UncheckedIOException If an I/O error occurs. */ public Response<BlobProperties> downloadToFileWithResponse(String filePath, BlobRange range, ParallelTransferOptions parallelTransferOptions, DownloadRetryOptions downloadRetryOptions, BlobRequestConditions requestConditions, boolean rangeGetContentMd5, Duration timeout, Context context) { return downloadToFileWithResponse(filePath, range, parallelTransferOptions, downloadRetryOptions, requestConditions, rangeGetContentMd5, null, timeout, context); } /** * Downloads the entire blob into a file specified by the path. * * <p>By default the file will be created and must not exist, if the file already exists a * {@link FileAlreadyExistsException} will be thrown. To override this behavior, provide appropriate * {@link OpenOption OpenOptions} </p> * * <p><strong>Code Samples</strong></p> * * {@codesnippet com.azure.storage.blob.specialized.BlobClientBase.downloadToFileWithResponse * * <p>For more information, see the * <a href="https: * * @param filePath A {@link String} representing the filePath where the downloaded data will be written. * @param range {@link BlobRange} * @param parallelTransferOptions {@link ParallelTransferOptions} to use to download to file. Number of parallel * transfers parameter is ignored. * @param downloadRetryOptions {@link DownloadRetryOptions} * @param requestConditions {@link BlobRequestConditions} * @param rangeGetContentMd5 Whether the contentMD5 for the specified blob range should be returned. * @param openOptions {@link OpenOption OpenOptions} to use to configure how to open or create the file. * @param timeout An optional timeout value beyond which a {@link RuntimeException} will be raised. * @param context Additional context that is passed through the Http pipeline during the service call. * @return A response containing the blob properties and metadata. * @throws UncheckedIOException If an I/O error occurs. */ public Response<BlobProperties> downloadToFileWithResponse(String filePath, BlobRange range, ParallelTransferOptions parallelTransferOptions, DownloadRetryOptions downloadRetryOptions, BlobRequestConditions requestConditions, boolean rangeGetContentMd5, Set<OpenOption> openOptions, Duration timeout, Context context) { final com.azure.storage.common.ParallelTransferOptions finalParallelTransferOptions = ModelHelper.wrapBlobOptions(ModelHelper.populateAndApplyDefaults(parallelTransferOptions)); return downloadToFileWithResponse(new BlobDownloadToFileOptions(filePath).setRange(range) .setParallelTransferOptions(finalParallelTransferOptions) .setDownloadRetryOptions(downloadRetryOptions).setRequestConditions(requestConditions) .setRangeGetContentMd5(rangeGetContentMd5).setOpenOptions(openOptions), timeout, context); } /** * Downloads the entire blob into a file specified by the path. * * <p>By default the file will be created and must not exist, if the file already exists a * {@link FileAlreadyExistsException} will be thrown. To override this behavior, provide appropriate * {@link OpenOption OpenOptions} </p> * * <p><strong>Code Samples</strong></p> * * {@codesnippet com.azure.storage.blob.specialized.BlobClientBase.downloadToFileWithResponse * * <p>For more information, see the * <a href="https: * * @param options {@link BlobDownloadToFileOptions} * @param timeout An optional timeout value beyond which a {@link RuntimeException} will be raised. * @param context Additional context that is passed through the Http pipeline during the service call. * @return A response containing the blob properties and metadata. * @throws UncheckedIOException If an I/O error occurs. */ public Response<BlobProperties> downloadToFileWithResponse(BlobDownloadToFileOptions options, Duration timeout, Context context) { Mono<Response<BlobProperties>> download = client.downloadToFileWithResponse(options, context); return blockWithOptionalTimeout(download, timeout); } /** * Deletes the specified blob or snapshot. Note that deleting a blob also deletes all its snapshots. * * <p><strong>Code Samples</strong></p> * * {@codesnippet com.azure.storage.blob.specialized.BlobClientBase.delete} * * <p>For more information, see the * <a href="https: */ public void delete() { deleteWithResponse(null, null, null, Context.NONE); } /** * Deletes the specified blob or snapshot. Note that deleting a blob also deletes all its snapshots. * * <p><strong>Code Samples</strong></p> * * {@codesnippet com.azure.storage.blob.specialized.BlobClientBase.deleteWithResponse * * <p>For more information, see the * <a href="https: * * @param deleteBlobSnapshotOptions Specifies the behavior for deleting the snapshots on this blob. {@code Include} * will delete the base blob and all snapshots. {@code Only} will delete only the snapshots. If a snapshot is being * deleted, you must pass null. * @param requestConditions {@link BlobRequestConditions} * @param timeout An optional timeout value beyond which a {@link RuntimeException} will be raised. * @param context Additional context that is passed through the Http pipeline during the service call. * @return A response containing status code and HTTP headers. */ public Response<Void> deleteWithResponse(DeleteSnapshotsOptionType deleteBlobSnapshotOptions, BlobRequestConditions requestConditions, Duration timeout, Context context) { Mono<Response<Void>> response = client .deleteWithResponse(deleteBlobSnapshotOptions, requestConditions, context); return blockWithOptionalTimeout(response, timeout); } /** * Returns the blob's metadata and properties. * * <p><strong>Code Samples</strong></p> * * {@codesnippet com.azure.storage.blob.specialized.BlobClientBase.getProperties} * * <p>For more information, see the * <a href="https: * * @return The blob properties and metadata. */ public BlobProperties getProperties() { return getPropertiesWithResponse(null, null, Context.NONE).getValue(); } /** * Returns the blob's metadata and properties. * * <p><strong>Code Samples</strong></p> * * {@codesnippet com.azure.storage.blob.specialized.BlobClientBase.getPropertiesWithResponse * * <p>For more information, see the * <a href="https: * * @param requestConditions {@link BlobRequestConditions} * @param timeout An optional timeout value beyond which a {@link RuntimeException} will be raised. * @param context Additional context that is passed through the Http pipeline during the service call. * @return The blob properties and metadata. */ public Response<BlobProperties> getPropertiesWithResponse(BlobRequestConditions requestConditions, Duration timeout, Context context) { Mono<Response<BlobProperties>> response = client.getPropertiesWithResponse(requestConditions, context); return blockWithOptionalTimeout(response, timeout); } /** * Changes a blob's HTTP header properties. if only one HTTP header is updated, the others will all be erased. In * order to preserve existing values, they must be passed alongside the header being changed. * * <p><strong>Code Samples</strong></p> * * {@codesnippet com.azure.storage.blob.specialized.BlobClientBase.setHttpHeaders * * <p>For more information, see the * <a href="https: * * @param headers {@link BlobHttpHeaders} */ public void setHttpHeaders(BlobHttpHeaders headers) { setHttpHeadersWithResponse(headers, null, null, Context.NONE); } /** * Changes a blob's HTTP header properties. if only one HTTP header is updated, the others will all be erased. In * order to preserve existing values, they must be passed alongside the header being changed. * * <p><strong>Code Samples</strong></p> * * {@codesnippet com.azure.storage.blob.specialized.BlobClientBase.setHttpHeadersWithResponse * * <p>For more information, see the * <a href="https: * * @param headers {@link BlobHttpHeaders} * @param requestConditions {@link BlobRequestConditions} * @param timeout An optional timeout value beyond which a {@link RuntimeException} will be raised. * @param context Additional context that is passed through the Http pipeline during the service call. * @return A response containing status code and HTTP headers. */ public Response<Void> setHttpHeadersWithResponse(BlobHttpHeaders headers, BlobRequestConditions requestConditions, Duration timeout, Context context) { Mono<Response<Void>> response = client .setHttpHeadersWithResponse(headers, requestConditions, context); return blockWithOptionalTimeout(response, timeout); } /** * Changes a blob's metadata. The specified metadata in this method will replace existing metadata. If old values * must be preserved, they must be downloaded and included in the call to this method. * * <p><strong>Code Samples</strong></p> * * {@codesnippet com.azure.storage.blob.specialized.BlobClientBase.setMetadata * * <p>For more information, see the * <a href="https: * * @param metadata Metadata to associate with the blob. */ public void setMetadata(Map<String, String> metadata) { setMetadataWithResponse(metadata, null, null, Context.NONE); } /** * Changes a blob's metadata. The specified metadata in this method will replace existing metadata. If old values * must be preserved, they must be downloaded and included in the call to this method. * * <p><strong>Code Samples</strong></p> * * {@codesnippet com.azure.storage.blob.specialized.BlobClientBase.setMetadataWithResponse * * <p>For more information, see the * <a href="https: * * @param metadata Metadata to associate with the blob. * @param requestConditions {@link BlobRequestConditions} * @param timeout An optional timeout value beyond which a {@link RuntimeException} will be raised. * @param context Additional context that is passed through the Http pipeline during the service call. * @return A response containing status code and HTTP headers. */ public Response<Void> setMetadataWithResponse(Map<String, String> metadata, BlobRequestConditions requestConditions, Duration timeout, Context context) { Mono<Response<Void>> response = client.setMetadataWithResponse(metadata, requestConditions, context); return blockWithOptionalTimeout(response, timeout); } /** * Returns the blob's tags. * * <p><strong>Code Samples</strong></p> * * {@codesnippet com.azure.storage.blob.specialized.BlobClientBase.getTags} * * <p>For more information, see the * <a href="https: * * @return The blob's tags. */ public Map<String, String> getTags() { return this.getTagsWithResponse(new BlobGetTagsOptions(), null, Context.NONE).getValue(); } /** * Returns the blob's tags. * * <p><strong>Code Samples</strong></p> * * {@codesnippet com.azure.storage.blob.specialized.BlobClientBase.getTagsWithResponse * * <p>For more information, see the * <a href="https: * * @param options {@link BlobGetTagsOptions} * @param timeout An optional timeout value beyond which a {@link RuntimeException} will be raised. * @param context Additional context that is passed through the Http pipeline during the service call. * @return The blob's tags. */ public Response<Map<String, String>> getTagsWithResponse(BlobGetTagsOptions options, Duration timeout, Context context) { Mono<Response<Map<String, String>>> response = client.getTagsWithResponse(options, context); return blockWithOptionalTimeout(response, timeout); } /** * Sets user defined tags. The specified tags in this method will replace existing tags. If old values * must be preserved, they must be downloaded and included in the call to this method. * * <p><strong>Code Samples</strong></p> * * {@codesnippet com.azure.storage.blob.specialized.BlobClientBase.setTags * * <p>For more information, see the * <a href="https: * * @param tags Tags to associate with the blob. */ public void setTags(Map<String, String> tags) { this.setTagsWithResponse(new BlobSetTagsOptions(tags), null, Context.NONE); } /** * Sets user defined tags. The specified tags in this method will replace existing tags. If old values * must be preserved, they must be downloaded and included in the call to this method. * * <p><strong>Code Samples</strong></p> * * {@codesnippet com.azure.storage.blob.specialized.BlobClientBase.setTagsWithResponse * * <p>For more information, see the * <a href="https: * * @param options {@link BlobSetTagsOptions} * @param timeout An optional timeout value beyond which a {@link RuntimeException} will be raised. * @param context Additional context that is passed through the Http pipeline during the service call. * @return A response containing status code and HTTP headers. */ public Response<Void> setTagsWithResponse(BlobSetTagsOptions options, Duration timeout, Context context) { Mono<Response<Void>> response = client.setTagsWithResponse(options, context); return blockWithOptionalTimeout(response, timeout); } /** * Creates a read-only snapshot of the blob. * * <p><strong>Code Samples</strong></p> * * {@codesnippet com.azure.storage.blob.specialized.BlobClientBase.createSnapshot} * * <p>For more information, see the * <a href="https: * * @return A response containing a {@link BlobClientBase} which is used to interact with the created snapshot, use * {@link BlobClientBase */ public BlobClientBase createSnapshot() { return createSnapshotWithResponse(null, null, null, Context.NONE).getValue(); } /** * Creates a read-only snapshot of the blob. * * <p><strong>Code Samples</strong></p> * * {@codesnippet com.azure.storage.blob.specialized.BlobClientBase.createSnapshotWithResponse * * <p>For more information, see the * <a href="https: * * @param metadata Metadata to associate with the blob snapshot. * @param requestConditions {@link BlobRequestConditions} * @param timeout An optional timeout value beyond which a {@link RuntimeException} will be raised. * @param context Additional context that is passed through the Http pipeline during the service call. * @return A response containing a {@link BlobClientBase} which is used to interact with the created snapshot, use * {@link BlobClientBase */ public Response<BlobClientBase> createSnapshotWithResponse(Map<String, String> metadata, BlobRequestConditions requestConditions, Duration timeout, Context context) { Mono<Response<BlobClientBase>> response = client .createSnapshotWithResponse(metadata, requestConditions, context) .map(rb -> new SimpleResponse<>(rb, new BlobClientBase(rb.getValue()))); return blockWithOptionalTimeout(response, timeout); } /** * Sets the tier on a blob. The operation is allowed on a page blob in a premium storage account or a block blob in * a blob storage or GPV2 account. A premium page blob's tier determines the allowed size, IOPS, and bandwidth of * the blob. A block blob's tier determines the Hot/Cool/Archive storage type. This does not update the blob's * etag. * * <p><strong>Code Samples</strong></p> * * {@codesnippet com.azure.storage.blob.specialized.BlobClientBase.setAccessTier * * <p>For more information, see the * <a href="https: * * @param tier The new tier for the blob. */ public void setAccessTier(AccessTier tier) { setAccessTierWithResponse(tier, null, null, null, Context.NONE); } /** * Sets the tier on a blob. The operation is allowed on a page blob in a premium storage account or a block blob in * a blob storage or GPV2 account. A premium page blob's tier determines the allowed size, IOPS, and bandwidth of * the blob. A block blob's tier determines the Hot/Cool/Archive storage type. This does not update the blob's * etag. * * <p><strong>Code Samples</strong></p> * * {@codesnippet com.azure.storage.blob.specialized.BlobClientBase.setAccessTierWithResponse * * <p>For more information, see the * <a href="https: * * @param tier The new tier for the blob. * @param priority Optional priority to set for re-hydrating blobs. * @param leaseId The lease ID the active lease on the blob must match. * @param timeout An optional timeout value beyond which a {@link RuntimeException} will be raised. * @param context Additional context that is passed through the Http pipeline during the service call. * @return A response containing status code and HTTP headers. */ public Response<Void> setAccessTierWithResponse(AccessTier tier, RehydratePriority priority, String leaseId, Duration timeout, Context context) { return setAccessTierWithResponse(new BlobSetAccessTierOptions(tier).setPriority(priority).setLeaseId(leaseId), timeout, context); } /** * Sets the tier on a blob. The operation is allowed on a page blob in a premium storage account or a block blob in * a blob storage or GPV2 account. A premium page blob's tier determines the allowed size, IOPS, and bandwidth of * the blob. A block blob's tier determines the Hot/Cool/Archive storage type. This does not update the blob's * etag. * * <p><strong>Code Samples</strong></p> * * {@codesnippet com.azure.storage.blob.specialized.BlobClientBase.setAccessTierWithResponse * * <p>For more information, see the * <a href="https: * * @param options {@link BlobSetAccessTierOptions} * @param timeout An optional timeout value beyond which a {@link RuntimeException} will be raised. * @param context Additional context that is passed through the Http pipeline during the service call. * @return A response containing status code and HTTP headers. */ public Response<Void> setAccessTierWithResponse(BlobSetAccessTierOptions options, Duration timeout, Context context) { return blockWithOptionalTimeout(client.setTierWithResponse(options, context), timeout); } /** * Undelete restores the content and metadata of a soft-deleted blob and/or any associated soft-deleted snapshots. * * <p><strong>Code Samples</strong></p> * * {@codesnippet com.azure.storage.blob.specialized.BlobClientBase.undelete} * * <p>For more information, see the * <a href="https: */ public void undelete() { undeleteWithResponse(null, Context.NONE); } /** * Undelete restores the content and metadata of a soft-deleted blob and/or any associated soft-deleted snapshots. * * <p><strong>Code Samples</strong></p> * * {@codesnippet com.azure.storage.blob.specialized.BlobClientBase.undeleteWithResponse * * <p>For more information, see the * <a href="https: * * @param timeout An optional timeout value beyond which a {@link RuntimeException} will be raised. * @param context Additional context that is passed through the Http pipeline during the service call. * @return A response containing status code and HTTP headers. */ public Response<Void> undeleteWithResponse(Duration timeout, Context context) { Mono<Response<Void>> response = client.undeleteWithResponse(context); return blockWithOptionalTimeout(response, timeout); } /** * Returns the sku name and account kind for the account. * * <p><strong>Code Samples</strong></p> * * {@codesnippet com.azure.storage.blob.specialized.BlobClientBase.getAccountInfo} * * <p>For more information, see the * <a href="https: * * @return The sku name and account kind. */ public StorageAccountInfo getAccountInfo() { return getAccountInfoWithResponse(null, Context.NONE).getValue(); } /** * Returns the sku name and account kind for the account. * * <p><strong>Code Samples</strong></p> * * {@codesnippet com.azure.storage.blob.specialized.BlobClientBase.getAccountInfoWithResponse * * <p>For more information, see the * <a href="https: * * @param timeout An optional timeout value beyond which a {@link RuntimeException} will be raised. * @param context Additional context that is passed through the Http pipeline during the service call. * @return The sku name and account kind. */ public Response<StorageAccountInfo> getAccountInfoWithResponse(Duration timeout, Context context) { Mono<Response<StorageAccountInfo>> response = client.getAccountInfoWithResponse(context); return blockWithOptionalTimeout(response, timeout); } /** * Generates a user delegation SAS for the blob using the specified {@link BlobServiceSasSignatureValues}. * <p>See {@link BlobServiceSasSignatureValues} for more information on how to construct a user delegation SAS.</p> * * <p><strong>Code Samples</strong></p> * * {@codesnippet com.azure.storage.blob.specialized.BlobClientBase.generateUserDelegationSas * * @param blobServiceSasSignatureValues {@link BlobServiceSasSignatureValues} * @param userDelegationKey A {@link UserDelegationKey} object used to sign the SAS values. * @see BlobServiceClient * user delegation key. * @return A {@code String} representing all SAS query parameters. */ public String generateUserDelegationSas(BlobServiceSasSignatureValues blobServiceSasSignatureValues, UserDelegationKey userDelegationKey) { return this.client.generateUserDelegationSas(blobServiceSasSignatureValues, userDelegationKey); } /** * Generates a service SAS for the blob using the specified {@link BlobServiceSasSignatureValues} * Note : The client must be authenticated via {@link StorageSharedKeyCredential} * <p>See {@link BlobServiceSasSignatureValues} for more information on how to construct a service SAS.</p> * * <p><strong>Code Samples</strong></p> * * {@codesnippet com.azure.storage.blob.specialized.BlobClientBase.generateSas * * @param blobServiceSasSignatureValues {@link BlobServiceSasSignatureValues} * * @return A {@code String} representing all SAS query parameters. */ public String generateSas(BlobServiceSasSignatureValues blobServiceSasSignatureValues) { return this.client.generateSas(blobServiceSasSignatureValues); } /** * Opens a blob input stream to query the blob. * * <p>For more information, see the * <a href="https: * * <p><strong>Code Samples</strong></p> * * {@codesnippet com.azure.storage.blob.specialized.BlobClientBase.openQueryInputStream * * @param expression The query expression. * @return An <code>InputStream</code> object that represents the stream to use for reading the query response. */ public InputStream openQueryInputStream(String expression) { return openQueryInputStreamWithResponse(new BlobQueryOptions(expression)).getValue(); } /** * Opens a blob input stream to query the blob. * * <p>For more information, see the * <a href="https: * * <p><strong>Code Samples</strong></p> * * {@codesnippet com.azure.storage.blob.specialized.BlobClientBase.openQueryInputStream * * @param queryOptions {@link BlobQueryOptions The query options}. * @return A response containing status code and HTTP headers including an <code>InputStream</code> object * that represents the stream to use for reading the query response. */ public Response<InputStream> openQueryInputStreamWithResponse(BlobQueryOptions queryOptions) { BlobQueryAsyncResponse response = client.queryWithResponse(queryOptions).block(); if (response == null) { throw logger.logExceptionAsError(new IllegalStateException("Query response cannot be null")); } return new ResponseBase<>(response.getRequest(), response.getStatusCode(), response.getHeaders(), new FluxInputStream(response.getValue()), response.getDeserializedHeaders()); } /** * Queries an entire blob into an output stream. * * <p>For more information, see the * <a href="https: * * <p><strong>Code Samples</strong></p> * * {@codesnippet com.azure.storage.blob.specialized.BlobClientBase.query * * @param stream A non-null {@link OutputStream} instance where the downloaded data will be written. * @param expression The query expression. * @throws UncheckedIOException If an I/O error occurs. * @throws NullPointerException if {@code stream} is null. */ public void query(OutputStream stream, String expression) { queryWithResponse(new BlobQueryOptions(expression, stream), null, Context.NONE); } /** * Queries an entire blob into an output stream. * * <p>For more information, see the * <a href="https: * * <p><strong>Code Samples</strong></p> * * {@codesnippet com.azure.storage.blob.specialized.BlobClientBase.queryWithResponse * * @param queryOptions {@link BlobQueryOptions The query options}. * @param timeout An optional timeout value beyond which a {@link RuntimeException} will be raised. * @param context Additional context that is passed through the Http pipeline during the service call. * @return A response containing status code and HTTP headers. * @throws UncheckedIOException If an I/O error occurs. * @throws NullPointerException if {@code stream} is null. */ public BlobQueryResponse queryWithResponse(BlobQueryOptions queryOptions, Duration timeout, Context context) { StorageImplUtils.assertNotNull("options", queryOptions); StorageImplUtils.assertNotNull("outputStream", queryOptions.getOutputStream()); Mono<BlobQueryResponse> download = client .queryWithResponse(queryOptions, context) .flatMap(response -> response.getValue().reduce(queryOptions.getOutputStream(), (outputStream, buffer) -> { try { outputStream.write(FluxUtil.byteBufferToArray(buffer)); return outputStream; } catch (IOException ex) { throw logger.logExceptionAsError(Exceptions.propagate(new UncheckedIOException(ex))); } }).thenReturn(new BlobQueryResponse(response))); return blockWithOptionalTimeout(download, timeout); } }
class BlobClientBase { private final ClientLogger logger = new ClientLogger(BlobClientBase.class); private final BlobAsyncClientBase client; /** * Constructor used by {@link SpecializedBlobClientBuilder}. * * @param client the async blob client */ protected BlobClientBase(BlobAsyncClientBase client) { this.client = client; } /** * Creates a new {@link BlobClientBase} linked to the {@code snapshot} of this blob resource. * * @param snapshot the identifier for a specific snapshot of this blob * @return a {@link BlobClientBase} used to interact with the specific snapshot. */ public BlobClientBase getSnapshotClient(String snapshot) { return new BlobClientBase(client.getSnapshotClient(snapshot)); } /** * Creates a new {@link BlobClientBase} linked to the {@code version} of this blob resource. * * @param versionId the identifier for a specific version of this blob, * pass {@code null} to interact with the latest blob version. * @return a {@link BlobClientBase} used to interact with the specific version. */ public BlobClientBase getVersionClient(String versionId) { return new BlobClientBase(client.getVersionClient(versionId)); } /** * Gets the URL of the blob represented by this client. * * @return the URL. */ public String getBlobUrl() { return client.getBlobUrl(); } /** * Get associated account name. * * @return account name associated with this storage resource. */ public String getAccountName() { return client.getAccountName(); } /** * Get the container name. * * <p><strong>Code Samples</strong></p> * * {@codesnippet com.azure.storage.blob.specialized.BlobClientBase.getContainerName} * * @return The name of the container. */ public final String getContainerName() { return client.getContainerName(); } /** * Decodes and gets the blob name. * * <p><strong>Code Samples</strong></p> * * {@codesnippet com.azure.storage.blob.specialized.BlobClientBase.getBlobName} * * @return The decoded name of the blob. */ public final String getBlobName() { return client.getBlobName(); } /** * Gets the {@link HttpPipeline} powering this client. * * @return The pipeline. */ public HttpPipeline getHttpPipeline() { return client.getHttpPipeline(); } /** * Gets the {@link CpkInfo} used to encrypt this blob's content on the server. * * @return the customer provided key used for encryption. */ public CpkInfo getCustomerProvidedKey() { return client.getCustomerProvidedKey(); } /** * Gets the {@code encryption scope} used to encrypt this blob's content on the server. * * @return the encryption scope used for encryption. */ String getEncryptionScope() { return client.getEncryptionScope(); } /** * Gets the service version the client is using. * * @return the service version the client is using. */ public BlobServiceVersion getServiceVersion() { return client.getServiceVersion(); } /** * Gets the snapshotId for a blob resource * * @return A string that represents the snapshotId of the snapshot blob */ public String getSnapshotId() { return client.getSnapshotId(); } /** * Gets the versionId for a blob resource * * @return A string that represents the versionId of the snapshot blob */ public String getVersionId() { return client.getVersionId(); } /** * Determines if a blob is a snapshot * * @return A boolean that indicates if a blob is a snapshot */ public boolean isSnapshot() { return client.isSnapshot(); } /** * Opens a blob input stream to download the blob. * <p> * * @return An <code>InputStream</code> object that represents the stream to use for reading from the blob. * @throws BlobStorageException If a storage service error occurred. */ public final BlobInputStream openInputStream() { return openInputStream(null, null); } /** * Opens a blob input stream to download the specified range of the blob. * <p> * * @param range {@link BlobRange} * @param requestConditions An {@link BlobRequestConditions} object that represents the access conditions for the * blob. * @return An <code>InputStream</code> object that represents the stream to use for reading from the blob. * @throws BlobStorageException If a storage service error occurred. */ public final BlobInputStream openInputStream(BlobRange range, BlobRequestConditions requestConditions) { return openInputStream(new BlobInputStreamOptions().setRange(range).setRequestConditions(requestConditions)); } /** * Opens a blob input stream to download the specified range of the blob. * * @param options {@link BlobInputStreamOptions} * @return An <code>InputStream</code> object that represents the stream to use for reading from the blob. * @throws BlobStorageException If a storage service error occurred. */ /** * Gets if the blob this client represents exists in the cloud. * * <p><strong>Code Samples</strong></p> * * {@codesnippet com.azure.storage.blob.specialized.BlobClientBase.exists} * * @return true if the blob exists, false if it doesn't */ public Boolean exists() { return existsWithResponse(null, Context.NONE).getValue(); } /** * Gets if the blob this client represents exists in the cloud. * * <p><strong>Code Samples</strong></p> * * {@codesnippet com.azure.storage.blob.specialized.BlobClientBase.existsWithResponse * * @param timeout An optional timeout value beyond which a {@link RuntimeException} will be raised. * @param context Additional context that is passed through the Http pipeline during the service call. * @return true if the blob exists, false if it doesn't */ public Response<Boolean> existsWithResponse(Duration timeout, Context context) { Mono<Response<Boolean>> response = client.existsWithResponse(context); return blockWithOptionalTimeout(response, timeout); } /** * Copies the data at the source URL to a blob. * <p> * This method triggers a long-running, asynchronous operations. The source may be another blob or an Azure File. If * the source is in another account, the source must either be public or authenticated with a SAS token. If the * source is in the same account, the Shared Key authorization on the destination will also be applied to the * source. The source URL must be URL encoded. * * <p><strong>Code Samples</strong></p> * * {@codesnippet com.azure.storage.blob.specialized.BlobClientBase.beginCopy * * <p>For more information, see the * <a href="https: * * @param sourceUrl The source URL to copy from. URLs outside of Azure may only be copied to block blobs. * @param pollInterval Duration between each poll for the copy status. If none is specified, a default of one second * is used. * @return A {@link SyncPoller} to poll the progress of blob copy operation. */ public SyncPoller<BlobCopyInfo, Void> beginCopy(String sourceUrl, Duration pollInterval) { return beginCopy(sourceUrl, null, null, null, null, null, pollInterval); } /** * Copies the data at the source URL to a blob. * <p> * This method triggers a long-running, asynchronous operations. The source may be another blob or an Azure File. If * the source is in another account, the source must either be public or authenticated with a SAS token. If the * source is in the same account, the Shared Key authorization on the destination will also be applied to the * source. The source URL must be URL encoded. * * <p><strong>Code Samples</strong></p> * * {@codesnippet com.azure.storage.blob.specialized.BlobClientBase.beginCopy * * <p>For more information, see the * <a href="https: * * @param sourceUrl The source URL to copy from. URLs outside of Azure may only be copied to block blobs. * @param metadata Metadata to associate with the destination blob. * @param tier {@link AccessTier} for the destination blob. * @param priority {@link RehydratePriority} for rehydrating the blob. * @param sourceModifiedRequestConditions {@link RequestConditions} against the source. Standard HTTP Access * conditions related to the modification of data. ETag and LastModifiedTime are used to construct conditions * related to when the blob was changed relative to the given request. The request will fail if the specified * condition is not satisfied. * @param destRequestConditions {@link BlobRequestConditions} against the destination. * @param pollInterval Duration between each poll for the copy status. If none is specified, a default of one second * is used. * @return A {@link SyncPoller} to poll the progress of blob copy operation. */ public SyncPoller<BlobCopyInfo, Void> beginCopy(String sourceUrl, Map<String, String> metadata, AccessTier tier, RehydratePriority priority, RequestConditions sourceModifiedRequestConditions, BlobRequestConditions destRequestConditions, Duration pollInterval) { return this.beginCopy(new BlobBeginCopyOptions(sourceUrl).setMetadata(metadata).setTier(tier) .setRehydratePriority(priority).setSourceRequestConditions( ModelHelper.populateBlobSourceRequestConditions(sourceModifiedRequestConditions)) .setDestinationRequestConditions(destRequestConditions).setPollInterval(pollInterval)); } /** * Copies the data at the source URL to a blob. * <p> * This method triggers a long-running, asynchronous operations. The source may be another blob or an Azure File. If * the source is in another account, the source must either be public or authenticated with a SAS token. If the * source is in the same account, the Shared Key authorization on the destination will also be applied to the * source. The source URL must be URL encoded. * * <p><strong>Code Samples</strong></p> * * {@codesnippet com.azure.storage.blob.specialized.BlobClientBase.beginCopy * * <p>For more information, see the * <a href="https: * * @param options {@link BlobBeginCopyOptions} * @return A {@link SyncPoller} to poll the progress of blob copy operation. */ public SyncPoller<BlobCopyInfo, Void> beginCopy(BlobBeginCopyOptions options) { return client.beginCopy(options).getSyncPoller(); } /** * Stops a pending copy that was previously started and leaves a destination blob with 0 length and metadata. * * <p><strong>Code Samples</strong></p> * * {@codesnippet com.azure.storage.blob.specialized.BlobClientBase.abortCopyFromUrl * * <p>For more information, see the * <a href="https: * * @param copyId The id of the copy operation to abort. */ public void abortCopyFromUrl(String copyId) { abortCopyFromUrlWithResponse(copyId, null, null, Context.NONE); } /** * Stops a pending copy that was previously started and leaves a destination blob with 0 length and metadata. * * <p><strong>Code Samples</strong></p> * * {@codesnippet com.azure.storage.blob.specialized.BlobClientBase.abortCopyFromUrlWithResponse * * <p>For more information, see the * <a href="https: * * @param copyId The id of the copy operation to abort. * @param leaseId The lease ID the active lease on the blob must match. * @param timeout An optional timeout value beyond which a {@link RuntimeException} will be raised. * @param context Additional context that is passed through the Http pipeline during the service call. * @return A response containing status code and HTTP headers. */ public Response<Void> abortCopyFromUrlWithResponse(String copyId, String leaseId, Duration timeout, Context context) { return blockWithOptionalTimeout(client.abortCopyFromUrlWithResponse(copyId, leaseId, context), timeout); } /** * Copies the data at the source URL to a blob and waits for the copy to complete before returning a response. * <p> * The source must be a block blob no larger than 256MB. The source must also be either public or have a sas token * attached. The URL must be URL encoded. * * <p><strong>Code Samples</strong></p> * * {@codesnippet com.azure.storage.blob.specialized.BlobClientBase.copyFromUrl * * <p>For more information, see the * <a href="https: * * @param copySource The source URL to copy from. * @return The copy ID for the long running operation. * @throws IllegalArgumentException If {@code copySource} is a malformed {@link URL}. */ public String copyFromUrl(String copySource) { return copyFromUrlWithResponse(copySource, null, null, null, null, null, Context.NONE).getValue(); } /** * Copies the data at the source URL to a blob and waits for the copy to complete before returning a response. * <p> * The source must be a block blob no larger than 256MB. The source must also be either public or have a sas token * attached. The URL must be URL encoded. * * <p><strong>Code Samples</strong></p> * * {@codesnippet com.azure.storage.blob.specialized.BlobClientBase.copyFromUrlWithResponse * * <p>For more information, see the * <a href="https: * * @param copySource The source URL to copy from. URLs outside of Azure may only be copied to block blobs. * @param metadata Metadata to associate with the destination blob. * @param tier {@link AccessTier} for the destination blob. * @param sourceModifiedRequestConditions {@link RequestConditions} against the source. Standard HTTP Access * conditions related to the modification of data. ETag and LastModifiedTime are used to construct conditions * related to when the blob was changed relative to the given request. The request will fail if the specified * condition is not satisfied. * @param destRequestConditions {@link BlobRequestConditions} against the destination. * @param timeout An optional timeout value beyond which a {@link RuntimeException} will be raised. * @param context Additional context that is passed through the Http pipeline during the service call. * @return The copy ID for the long running operation. * @throws IllegalArgumentException If {@code copySource} is a malformed {@link URL}. */ public Response<String> copyFromUrlWithResponse(String copySource, Map<String, String> metadata, AccessTier tier, RequestConditions sourceModifiedRequestConditions, BlobRequestConditions destRequestConditions, Duration timeout, Context context) { return this.copyFromUrlWithResponse(new BlobCopyFromUrlOptions(copySource).setMetadata(metadata) .setTier(tier).setSourceRequestConditions(sourceModifiedRequestConditions) .setDestinationRequestConditions(destRequestConditions), timeout, context); } /** * Copies the data at the source URL to a blob and waits for the copy to complete before returning a response. * <p> * The source must be a block blob no larger than 256MB. The source must also be either public or have a sas token * attached. The URL must be URL encoded. * * <p><strong>Code Samples</strong></p> * * {@codesnippet com.azure.storage.blob.specialized.BlobClientBase.copyFromUrlWithResponse * * <p>For more information, see the * <a href="https: * * @param options {@link BlobCopyFromUrlOptions} * @param timeout An optional timeout value beyond which a {@link RuntimeException} will be raised. * @param context Additional context that is passed through the Http pipeline during the service call. * @return The copy ID for the long running operation. * @throws IllegalArgumentException If {@code copySource} is a malformed {@link URL}. */ public Response<String> copyFromUrlWithResponse(BlobCopyFromUrlOptions options, Duration timeout, Context context) { Mono<Response<String>> response = client .copyFromUrlWithResponse(options, context); return blockWithOptionalTimeout(response, timeout); } /** * Downloads the entire blob into an output stream. Uploading data must be done from the {@link BlockBlobClient}, * {@link PageBlobClient}, or {@link AppendBlobClient}. * * <p><strong>Code Samples</strong></p> * * {@codesnippet com.azure.storage.blob.specialized.BlobClientBase.download * * <p>For more information, see the * <a href="https: * * @param stream A non-null {@link OutputStream} instance where the downloaded data will be written. * @throws UncheckedIOException If an I/O error occurs. * @throws NullPointerException if {@code stream} is null */ public void download(OutputStream stream) { downloadWithResponse(stream, null, null, null, false, null, Context.NONE); } /** * Downloads a range of bytes from a blob into an output stream. Uploading data must be done from the {@link * BlockBlobClient}, {@link PageBlobClient}, or {@link AppendBlobClient}. * * <p><strong>Code Samples</strong></p> * * {@codesnippet com.azure.storage.blob.specialized.BlobClientBase.downloadWithResponse * * <p>For more information, see the * <a href="https: * * @param stream A non-null {@link OutputStream} instance where the downloaded data will be written. * @param range {@link BlobRange} * @param options {@link DownloadRetryOptions} * @param requestConditions {@link BlobRequestConditions} * @param getRangeContentMd5 Whether the contentMD5 for the specified blob range should be returned. * @param timeout An optional timeout value beyond which a {@link RuntimeException} will be raised. * @param context Additional context that is passed through the Http pipeline during the service call. * @return A response containing status code and HTTP headers. * @throws UncheckedIOException If an I/O error occurs. * @throws NullPointerException if {@code stream} is null */ public BlobDownloadResponse downloadWithResponse(OutputStream stream, BlobRange range, DownloadRetryOptions options, BlobRequestConditions requestConditions, boolean getRangeContentMd5, Duration timeout, Context context) { StorageImplUtils.assertNotNull("stream", stream); Mono<BlobDownloadResponse> download = client .downloadWithResponse(range, options, requestConditions, getRangeContentMd5, context) .flatMap(response -> response.getValue().reduce(stream, (outputStream, buffer) -> { try { outputStream.write(FluxUtil.byteBufferToArray(buffer)); return outputStream; } catch (IOException ex) { throw logger.logExceptionAsError(Exceptions.propagate(new UncheckedIOException(ex))); } }).thenReturn(new BlobDownloadResponse(response))); return blockWithOptionalTimeout(download, timeout); } /** * Downloads the entire blob into a file specified by the path. * * <p>The file will be created and must not exist, if the file already exists a {@link FileAlreadyExistsException} * will be thrown.</p> * * <p><strong>Code Samples</strong></p> * * {@codesnippet com.azure.storage.blob.specialized.BlobClientBase.downloadToFile * * <p>For more information, see the * <a href="https: * * @param filePath A {@link String} representing the filePath where the downloaded data will be written. * @return The blob properties and metadata. * @throws UncheckedIOException If an I/O error occurs */ public BlobProperties downloadToFile(String filePath) { return downloadToFile(filePath, false); } /** * Downloads the entire blob into a file specified by the path. * * <p>If overwrite is set to false, the file will be created and must not exist, if the file already exists a * {@link FileAlreadyExistsException} will be thrown.</p> * * <p><strong>Code Samples</strong></p> * * {@codesnippet com.azure.storage.blob.specialized.BlobClientBase.downloadToFile * * <p>For more information, see the * <a href="https: * * @param filePath A {@link String} representing the filePath where the downloaded data will be written. * @param overwrite Whether or not to overwrite the file, should the file exist. * @return The blob properties and metadata. * @throws UncheckedIOException If an I/O error occurs */ public BlobProperties downloadToFile(String filePath, boolean overwrite) { Set<OpenOption> openOptions = null; if (overwrite) { openOptions = new HashSet<>(); openOptions.add(StandardOpenOption.CREATE); openOptions.add(StandardOpenOption.TRUNCATE_EXISTING); openOptions.add(StandardOpenOption.READ); openOptions.add(StandardOpenOption.WRITE); } return downloadToFileWithResponse(filePath, null, null, null, null, false, openOptions, null, Context.NONE) .getValue(); } /** * Downloads the entire blob into a file specified by the path. * * <p>The file will be created and must not exist, if the file already exists a {@link FileAlreadyExistsException} * will be thrown.</p> * * <p><strong>Code Samples</strong></p> * * {@codesnippet com.azure.storage.blob.specialized.BlobClientBase.downloadToFileWithResponse * * <p>For more information, see the * <a href="https: * * @param filePath A {@link String} representing the filePath where the downloaded data will be written. * @param range {@link BlobRange} * @param parallelTransferOptions {@link ParallelTransferOptions} to use to download to file. Number of parallel * transfers parameter is ignored. * @param downloadRetryOptions {@link DownloadRetryOptions} * @param requestConditions {@link BlobRequestConditions} * @param rangeGetContentMd5 Whether the contentMD5 for the specified blob range should be returned. * @param timeout An optional timeout value beyond which a {@link RuntimeException} will be raised. * @param context Additional context that is passed through the Http pipeline during the service call. * @return A response containing the blob properties and metadata. * @throws UncheckedIOException If an I/O error occurs. */ public Response<BlobProperties> downloadToFileWithResponse(String filePath, BlobRange range, ParallelTransferOptions parallelTransferOptions, DownloadRetryOptions downloadRetryOptions, BlobRequestConditions requestConditions, boolean rangeGetContentMd5, Duration timeout, Context context) { return downloadToFileWithResponse(filePath, range, parallelTransferOptions, downloadRetryOptions, requestConditions, rangeGetContentMd5, null, timeout, context); } /** * Downloads the entire blob into a file specified by the path. * * <p>By default the file will be created and must not exist, if the file already exists a * {@link FileAlreadyExistsException} will be thrown. To override this behavior, provide appropriate * {@link OpenOption OpenOptions} </p> * * <p><strong>Code Samples</strong></p> * * {@codesnippet com.azure.storage.blob.specialized.BlobClientBase.downloadToFileWithResponse * * <p>For more information, see the * <a href="https: * * @param filePath A {@link String} representing the filePath where the downloaded data will be written. * @param range {@link BlobRange} * @param parallelTransferOptions {@link ParallelTransferOptions} to use to download to file. Number of parallel * transfers parameter is ignored. * @param downloadRetryOptions {@link DownloadRetryOptions} * @param requestConditions {@link BlobRequestConditions} * @param rangeGetContentMd5 Whether the contentMD5 for the specified blob range should be returned. * @param openOptions {@link OpenOption OpenOptions} to use to configure how to open or create the file. * @param timeout An optional timeout value beyond which a {@link RuntimeException} will be raised. * @param context Additional context that is passed through the Http pipeline during the service call. * @return A response containing the blob properties and metadata. * @throws UncheckedIOException If an I/O error occurs. */ public Response<BlobProperties> downloadToFileWithResponse(String filePath, BlobRange range, ParallelTransferOptions parallelTransferOptions, DownloadRetryOptions downloadRetryOptions, BlobRequestConditions requestConditions, boolean rangeGetContentMd5, Set<OpenOption> openOptions, Duration timeout, Context context) { final com.azure.storage.common.ParallelTransferOptions finalParallelTransferOptions = ModelHelper.wrapBlobOptions(ModelHelper.populateAndApplyDefaults(parallelTransferOptions)); return downloadToFileWithResponse(new BlobDownloadToFileOptions(filePath).setRange(range) .setParallelTransferOptions(finalParallelTransferOptions) .setDownloadRetryOptions(downloadRetryOptions).setRequestConditions(requestConditions) .setRangeGetContentMd5(rangeGetContentMd5).setOpenOptions(openOptions), timeout, context); } /** * Downloads the entire blob into a file specified by the path. * * <p>By default the file will be created and must not exist, if the file already exists a * {@link FileAlreadyExistsException} will be thrown. To override this behavior, provide appropriate * {@link OpenOption OpenOptions} </p> * * <p><strong>Code Samples</strong></p> * * {@codesnippet com.azure.storage.blob.specialized.BlobClientBase.downloadToFileWithResponse * * <p>For more information, see the * <a href="https: * * @param options {@link BlobDownloadToFileOptions} * @param timeout An optional timeout value beyond which a {@link RuntimeException} will be raised. * @param context Additional context that is passed through the Http pipeline during the service call. * @return A response containing the blob properties and metadata. * @throws UncheckedIOException If an I/O error occurs. */ public Response<BlobProperties> downloadToFileWithResponse(BlobDownloadToFileOptions options, Duration timeout, Context context) { Mono<Response<BlobProperties>> download = client.downloadToFileWithResponse(options, context); return blockWithOptionalTimeout(download, timeout); } /** * Deletes the specified blob or snapshot. Note that deleting a blob also deletes all its snapshots. * * <p><strong>Code Samples</strong></p> * * {@codesnippet com.azure.storage.blob.specialized.BlobClientBase.delete} * * <p>For more information, see the * <a href="https: */ public void delete() { deleteWithResponse(null, null, null, Context.NONE); } /** * Deletes the specified blob or snapshot. Note that deleting a blob also deletes all its snapshots. * * <p><strong>Code Samples</strong></p> * * {@codesnippet com.azure.storage.blob.specialized.BlobClientBase.deleteWithResponse * * <p>For more information, see the * <a href="https: * * @param deleteBlobSnapshotOptions Specifies the behavior for deleting the snapshots on this blob. {@code Include} * will delete the base blob and all snapshots. {@code Only} will delete only the snapshots. If a snapshot is being * deleted, you must pass null. * @param requestConditions {@link BlobRequestConditions} * @param timeout An optional timeout value beyond which a {@link RuntimeException} will be raised. * @param context Additional context that is passed through the Http pipeline during the service call. * @return A response containing status code and HTTP headers. */ public Response<Void> deleteWithResponse(DeleteSnapshotsOptionType deleteBlobSnapshotOptions, BlobRequestConditions requestConditions, Duration timeout, Context context) { Mono<Response<Void>> response = client .deleteWithResponse(deleteBlobSnapshotOptions, requestConditions, context); return blockWithOptionalTimeout(response, timeout); } /** * Returns the blob's metadata and properties. * * <p><strong>Code Samples</strong></p> * * {@codesnippet com.azure.storage.blob.specialized.BlobClientBase.getProperties} * * <p>For more information, see the * <a href="https: * * @return The blob properties and metadata. */ public BlobProperties getProperties() { return getPropertiesWithResponse(null, null, Context.NONE).getValue(); } /** * Returns the blob's metadata and properties. * * <p><strong>Code Samples</strong></p> * * {@codesnippet com.azure.storage.blob.specialized.BlobClientBase.getPropertiesWithResponse * * <p>For more information, see the * <a href="https: * * @param requestConditions {@link BlobRequestConditions} * @param timeout An optional timeout value beyond which a {@link RuntimeException} will be raised. * @param context Additional context that is passed through the Http pipeline during the service call. * @return The blob properties and metadata. */ public Response<BlobProperties> getPropertiesWithResponse(BlobRequestConditions requestConditions, Duration timeout, Context context) { Mono<Response<BlobProperties>> response = client.getPropertiesWithResponse(requestConditions, context); return blockWithOptionalTimeout(response, timeout); } /** * Changes a blob's HTTP header properties. if only one HTTP header is updated, the others will all be erased. In * order to preserve existing values, they must be passed alongside the header being changed. * * <p><strong>Code Samples</strong></p> * * {@codesnippet com.azure.storage.blob.specialized.BlobClientBase.setHttpHeaders * * <p>For more information, see the * <a href="https: * * @param headers {@link BlobHttpHeaders} */ public void setHttpHeaders(BlobHttpHeaders headers) { setHttpHeadersWithResponse(headers, null, null, Context.NONE); } /** * Changes a blob's HTTP header properties. if only one HTTP header is updated, the others will all be erased. In * order to preserve existing values, they must be passed alongside the header being changed. * * <p><strong>Code Samples</strong></p> * * {@codesnippet com.azure.storage.blob.specialized.BlobClientBase.setHttpHeadersWithResponse * * <p>For more information, see the * <a href="https: * * @param headers {@link BlobHttpHeaders} * @param requestConditions {@link BlobRequestConditions} * @param timeout An optional timeout value beyond which a {@link RuntimeException} will be raised. * @param context Additional context that is passed through the Http pipeline during the service call. * @return A response containing status code and HTTP headers. */ public Response<Void> setHttpHeadersWithResponse(BlobHttpHeaders headers, BlobRequestConditions requestConditions, Duration timeout, Context context) { Mono<Response<Void>> response = client .setHttpHeadersWithResponse(headers, requestConditions, context); return blockWithOptionalTimeout(response, timeout); } /** * Changes a blob's metadata. The specified metadata in this method will replace existing metadata. If old values * must be preserved, they must be downloaded and included in the call to this method. * * <p><strong>Code Samples</strong></p> * * {@codesnippet com.azure.storage.blob.specialized.BlobClientBase.setMetadata * * <p>For more information, see the * <a href="https: * * @param metadata Metadata to associate with the blob. */ public void setMetadata(Map<String, String> metadata) { setMetadataWithResponse(metadata, null, null, Context.NONE); } /** * Changes a blob's metadata. The specified metadata in this method will replace existing metadata. If old values * must be preserved, they must be downloaded and included in the call to this method. * * <p><strong>Code Samples</strong></p> * * {@codesnippet com.azure.storage.blob.specialized.BlobClientBase.setMetadataWithResponse * * <p>For more information, see the * <a href="https: * * @param metadata Metadata to associate with the blob. * @param requestConditions {@link BlobRequestConditions} * @param timeout An optional timeout value beyond which a {@link RuntimeException} will be raised. * @param context Additional context that is passed through the Http pipeline during the service call. * @return A response containing status code and HTTP headers. */ public Response<Void> setMetadataWithResponse(Map<String, String> metadata, BlobRequestConditions requestConditions, Duration timeout, Context context) { Mono<Response<Void>> response = client.setMetadataWithResponse(metadata, requestConditions, context); return blockWithOptionalTimeout(response, timeout); } /** * Returns the blob's tags. * * <p><strong>Code Samples</strong></p> * * {@codesnippet com.azure.storage.blob.specialized.BlobClientBase.getTags} * * <p>For more information, see the * <a href="https: * * @return The blob's tags. */ public Map<String, String> getTags() { return this.getTagsWithResponse(new BlobGetTagsOptions(), null, Context.NONE).getValue(); } /** * Returns the blob's tags. * * <p><strong>Code Samples</strong></p> * * {@codesnippet com.azure.storage.blob.specialized.BlobClientBase.getTagsWithResponse * * <p>For more information, see the * <a href="https: * * @param options {@link BlobGetTagsOptions} * @param timeout An optional timeout value beyond which a {@link RuntimeException} will be raised. * @param context Additional context that is passed through the Http pipeline during the service call. * @return The blob's tags. */ public Response<Map<String, String>> getTagsWithResponse(BlobGetTagsOptions options, Duration timeout, Context context) { Mono<Response<Map<String, String>>> response = client.getTagsWithResponse(options, context); return blockWithOptionalTimeout(response, timeout); } /** * Sets user defined tags. The specified tags in this method will replace existing tags. If old values * must be preserved, they must be downloaded and included in the call to this method. * * <p><strong>Code Samples</strong></p> * * {@codesnippet com.azure.storage.blob.specialized.BlobClientBase.setTags * * <p>For more information, see the * <a href="https: * * @param tags Tags to associate with the blob. */ public void setTags(Map<String, String> tags) { this.setTagsWithResponse(new BlobSetTagsOptions(tags), null, Context.NONE); } /** * Sets user defined tags. The specified tags in this method will replace existing tags. If old values * must be preserved, they must be downloaded and included in the call to this method. * * <p><strong>Code Samples</strong></p> * * {@codesnippet com.azure.storage.blob.specialized.BlobClientBase.setTagsWithResponse * * <p>For more information, see the * <a href="https: * * @param options {@link BlobSetTagsOptions} * @param timeout An optional timeout value beyond which a {@link RuntimeException} will be raised. * @param context Additional context that is passed through the Http pipeline during the service call. * @return A response containing status code and HTTP headers. */ public Response<Void> setTagsWithResponse(BlobSetTagsOptions options, Duration timeout, Context context) { Mono<Response<Void>> response = client.setTagsWithResponse(options, context); return blockWithOptionalTimeout(response, timeout); } /** * Creates a read-only snapshot of the blob. * * <p><strong>Code Samples</strong></p> * * {@codesnippet com.azure.storage.blob.specialized.BlobClientBase.createSnapshot} * * <p>For more information, see the * <a href="https: * * @return A response containing a {@link BlobClientBase} which is used to interact with the created snapshot, use * {@link BlobClientBase */ public BlobClientBase createSnapshot() { return createSnapshotWithResponse(null, null, null, Context.NONE).getValue(); } /** * Creates a read-only snapshot of the blob. * * <p><strong>Code Samples</strong></p> * * {@codesnippet com.azure.storage.blob.specialized.BlobClientBase.createSnapshotWithResponse * * <p>For more information, see the * <a href="https: * * @param metadata Metadata to associate with the blob snapshot. * @param requestConditions {@link BlobRequestConditions} * @param timeout An optional timeout value beyond which a {@link RuntimeException} will be raised. * @param context Additional context that is passed through the Http pipeline during the service call. * @return A response containing a {@link BlobClientBase} which is used to interact with the created snapshot, use * {@link BlobClientBase */ public Response<BlobClientBase> createSnapshotWithResponse(Map<String, String> metadata, BlobRequestConditions requestConditions, Duration timeout, Context context) { Mono<Response<BlobClientBase>> response = client .createSnapshotWithResponse(metadata, requestConditions, context) .map(rb -> new SimpleResponse<>(rb, new BlobClientBase(rb.getValue()))); return blockWithOptionalTimeout(response, timeout); } /** * Sets the tier on a blob. The operation is allowed on a page blob in a premium storage account or a block blob in * a blob storage or GPV2 account. A premium page blob's tier determines the allowed size, IOPS, and bandwidth of * the blob. A block blob's tier determines the Hot/Cool/Archive storage type. This does not update the blob's * etag. * * <p><strong>Code Samples</strong></p> * * {@codesnippet com.azure.storage.blob.specialized.BlobClientBase.setAccessTier * * <p>For more information, see the * <a href="https: * * @param tier The new tier for the blob. */ public void setAccessTier(AccessTier tier) { setAccessTierWithResponse(tier, null, null, null, Context.NONE); } /** * Sets the tier on a blob. The operation is allowed on a page blob in a premium storage account or a block blob in * a blob storage or GPV2 account. A premium page blob's tier determines the allowed size, IOPS, and bandwidth of * the blob. A block blob's tier determines the Hot/Cool/Archive storage type. This does not update the blob's * etag. * * <p><strong>Code Samples</strong></p> * * {@codesnippet com.azure.storage.blob.specialized.BlobClientBase.setAccessTierWithResponse * * <p>For more information, see the * <a href="https: * * @param tier The new tier for the blob. * @param priority Optional priority to set for re-hydrating blobs. * @param leaseId The lease ID the active lease on the blob must match. * @param timeout An optional timeout value beyond which a {@link RuntimeException} will be raised. * @param context Additional context that is passed through the Http pipeline during the service call. * @return A response containing status code and HTTP headers. */ public Response<Void> setAccessTierWithResponse(AccessTier tier, RehydratePriority priority, String leaseId, Duration timeout, Context context) { return setAccessTierWithResponse(new BlobSetAccessTierOptions(tier).setPriority(priority).setLeaseId(leaseId), timeout, context); } /** * Sets the tier on a blob. The operation is allowed on a page blob in a premium storage account or a block blob in * a blob storage or GPV2 account. A premium page blob's tier determines the allowed size, IOPS, and bandwidth of * the blob. A block blob's tier determines the Hot/Cool/Archive storage type. This does not update the blob's * etag. * * <p><strong>Code Samples</strong></p> * * {@codesnippet com.azure.storage.blob.specialized.BlobClientBase.setAccessTierWithResponse * * <p>For more information, see the * <a href="https: * * @param options {@link BlobSetAccessTierOptions} * @param timeout An optional timeout value beyond which a {@link RuntimeException} will be raised. * @param context Additional context that is passed through the Http pipeline during the service call. * @return A response containing status code and HTTP headers. */ public Response<Void> setAccessTierWithResponse(BlobSetAccessTierOptions options, Duration timeout, Context context) { return blockWithOptionalTimeout(client.setTierWithResponse(options, context), timeout); } /** * Undelete restores the content and metadata of a soft-deleted blob and/or any associated soft-deleted snapshots. * * <p><strong>Code Samples</strong></p> * * {@codesnippet com.azure.storage.blob.specialized.BlobClientBase.undelete} * * <p>For more information, see the * <a href="https: */ public void undelete() { undeleteWithResponse(null, Context.NONE); } /** * Undelete restores the content and metadata of a soft-deleted blob and/or any associated soft-deleted snapshots. * * <p><strong>Code Samples</strong></p> * * {@codesnippet com.azure.storage.blob.specialized.BlobClientBase.undeleteWithResponse * * <p>For more information, see the * <a href="https: * * @param timeout An optional timeout value beyond which a {@link RuntimeException} will be raised. * @param context Additional context that is passed through the Http pipeline during the service call. * @return A response containing status code and HTTP headers. */ public Response<Void> undeleteWithResponse(Duration timeout, Context context) { Mono<Response<Void>> response = client.undeleteWithResponse(context); return blockWithOptionalTimeout(response, timeout); } /** * Returns the sku name and account kind for the account. * * <p><strong>Code Samples</strong></p> * * {@codesnippet com.azure.storage.blob.specialized.BlobClientBase.getAccountInfo} * * <p>For more information, see the * <a href="https: * * @return The sku name and account kind. */ public StorageAccountInfo getAccountInfo() { return getAccountInfoWithResponse(null, Context.NONE).getValue(); } /** * Returns the sku name and account kind for the account. * * <p><strong>Code Samples</strong></p> * * {@codesnippet com.azure.storage.blob.specialized.BlobClientBase.getAccountInfoWithResponse * * <p>For more information, see the * <a href="https: * * @param timeout An optional timeout value beyond which a {@link RuntimeException} will be raised. * @param context Additional context that is passed through the Http pipeline during the service call. * @return The sku name and account kind. */ public Response<StorageAccountInfo> getAccountInfoWithResponse(Duration timeout, Context context) { Mono<Response<StorageAccountInfo>> response = client.getAccountInfoWithResponse(context); return blockWithOptionalTimeout(response, timeout); } /** * Generates a user delegation SAS for the blob using the specified {@link BlobServiceSasSignatureValues}. * <p>See {@link BlobServiceSasSignatureValues} for more information on how to construct a user delegation SAS.</p> * * <p><strong>Code Samples</strong></p> * * {@codesnippet com.azure.storage.blob.specialized.BlobClientBase.generateUserDelegationSas * * @param blobServiceSasSignatureValues {@link BlobServiceSasSignatureValues} * @param userDelegationKey A {@link UserDelegationKey} object used to sign the SAS values. * @see BlobServiceClient * user delegation key. * @return A {@code String} representing all SAS query parameters. */ public String generateUserDelegationSas(BlobServiceSasSignatureValues blobServiceSasSignatureValues, UserDelegationKey userDelegationKey) { return this.client.generateUserDelegationSas(blobServiceSasSignatureValues, userDelegationKey); } /** * Generates a service SAS for the blob using the specified {@link BlobServiceSasSignatureValues} * Note : The client must be authenticated via {@link StorageSharedKeyCredential} * <p>See {@link BlobServiceSasSignatureValues} for more information on how to construct a service SAS.</p> * * <p><strong>Code Samples</strong></p> * * {@codesnippet com.azure.storage.blob.specialized.BlobClientBase.generateSas * * @param blobServiceSasSignatureValues {@link BlobServiceSasSignatureValues} * * @return A {@code String} representing all SAS query parameters. */ public String generateSas(BlobServiceSasSignatureValues blobServiceSasSignatureValues) { return this.client.generateSas(blobServiceSasSignatureValues); } /** * Opens a blob input stream to query the blob. * * <p>For more information, see the * <a href="https: * * <p><strong>Code Samples</strong></p> * * {@codesnippet com.azure.storage.blob.specialized.BlobClientBase.openQueryInputStream * * @param expression The query expression. * @return An <code>InputStream</code> object that represents the stream to use for reading the query response. */ public InputStream openQueryInputStream(String expression) { return openQueryInputStreamWithResponse(new BlobQueryOptions(expression)).getValue(); } /** * Opens a blob input stream to query the blob. * * <p>For more information, see the * <a href="https: * * <p><strong>Code Samples</strong></p> * * {@codesnippet com.azure.storage.blob.specialized.BlobClientBase.openQueryInputStream * * @param queryOptions {@link BlobQueryOptions The query options}. * @return A response containing status code and HTTP headers including an <code>InputStream</code> object * that represents the stream to use for reading the query response. */ public Response<InputStream> openQueryInputStreamWithResponse(BlobQueryOptions queryOptions) { BlobQueryAsyncResponse response = client.queryWithResponse(queryOptions).block(); if (response == null) { throw logger.logExceptionAsError(new IllegalStateException("Query response cannot be null")); } return new ResponseBase<>(response.getRequest(), response.getStatusCode(), response.getHeaders(), new FluxInputStream(response.getValue()), response.getDeserializedHeaders()); } /** * Queries an entire blob into an output stream. * * <p>For more information, see the * <a href="https: * * <p><strong>Code Samples</strong></p> * * {@codesnippet com.azure.storage.blob.specialized.BlobClientBase.query * * @param stream A non-null {@link OutputStream} instance where the downloaded data will be written. * @param expression The query expression. * @throws UncheckedIOException If an I/O error occurs. * @throws NullPointerException if {@code stream} is null. */ public void query(OutputStream stream, String expression) { queryWithResponse(new BlobQueryOptions(expression, stream), null, Context.NONE); } /** * Queries an entire blob into an output stream. * * <p>For more information, see the * <a href="https: * * <p><strong>Code Samples</strong></p> * * {@codesnippet com.azure.storage.blob.specialized.BlobClientBase.queryWithResponse * * @param queryOptions {@link BlobQueryOptions The query options}. * @param timeout An optional timeout value beyond which a {@link RuntimeException} will be raised. * @param context Additional context that is passed through the Http pipeline during the service call. * @return A response containing status code and HTTP headers. * @throws UncheckedIOException If an I/O error occurs. * @throws NullPointerException if {@code stream} is null. */ public BlobQueryResponse queryWithResponse(BlobQueryOptions queryOptions, Duration timeout, Context context) { StorageImplUtils.assertNotNull("options", queryOptions); StorageImplUtils.assertNotNull("outputStream", queryOptions.getOutputStream()); Mono<BlobQueryResponse> download = client .queryWithResponse(queryOptions, context) .flatMap(response -> response.getValue().reduce(queryOptions.getOutputStream(), (outputStream, buffer) -> { try { outputStream.write(FluxUtil.byteBufferToArray(buffer)); return outputStream; } catch (IOException ex) { throw logger.logExceptionAsError(Exceptions.propagate(new UncheckedIOException(ex))); } }).thenReturn(new BlobQueryResponse(response))); return blockWithOptionalTimeout(download, timeout); } }
Yeah makes sense
public BlobInputStream openInputStream(BlobInputStreamOptions options) { options = options == null ? new BlobInputStreamOptions() : options; BlobProperties properties = getProperties(); BlobRange range = options.getRange() == null ? new BlobRange(0) : options.getRange(); int chunkSize = options.getBlockSize() == null ? 4 * Constants.MB : options.getBlockSize(); BlobRequestConditions requestConditions = options.getRequestConditions() == null ? new BlobRequestConditions() : options.getRequestConditions(); requestConditions.setIfMatch(properties.getETag()); return new BlobInputStream(client, range.getOffset(), range.getCount(), chunkSize, requestConditions, properties); }
requestConditions.setIfMatch(properties.getETag());
public BlobInputStream openInputStream(BlobInputStreamOptions options) { options = options == null ? new BlobInputStreamOptions() : options; BlobProperties properties = getProperties(); BlobRange range = options.getRange() == null ? new BlobRange(0) : options.getRange(); int chunkSize = options.getBlockSize() == null ? 4 * Constants.MB : options.getBlockSize(); BlobRequestConditions requestConditions = options.getRequestConditions() == null ? new BlobRequestConditions() : options.getRequestConditions(); if (requestConditions.getIfMatch() == null) { requestConditions.setIfMatch(properties.getETag()); } return new BlobInputStream(client, range.getOffset(), range.getCount(), chunkSize, requestConditions, properties); }
class BlobClientBase { private final ClientLogger logger = new ClientLogger(BlobClientBase.class); private final BlobAsyncClientBase client; /** * Constructor used by {@link SpecializedBlobClientBuilder}. * * @param client the async blob client */ protected BlobClientBase(BlobAsyncClientBase client) { this.client = client; } /** * Creates a new {@link BlobClientBase} linked to the {@code snapshot} of this blob resource. * * @param snapshot the identifier for a specific snapshot of this blob * @return a {@link BlobClientBase} used to interact with the specific snapshot. */ public BlobClientBase getSnapshotClient(String snapshot) { return new BlobClientBase(client.getSnapshotClient(snapshot)); } /** * Creates a new {@link BlobClientBase} linked to the {@code version} of this blob resource. * * @param versionId the identifier for a specific version of this blob, * pass {@code null} to interact with the latest blob version. * @return a {@link BlobClientBase} used to interact with the specific version. */ public BlobClientBase getVersionClient(String versionId) { return new BlobClientBase(client.getVersionClient(versionId)); } /** * Gets the URL of the blob represented by this client. * * @return the URL. */ public String getBlobUrl() { return client.getBlobUrl(); } /** * Get associated account name. * * @return account name associated with this storage resource. */ public String getAccountName() { return client.getAccountName(); } /** * Get the container name. * * <p><strong>Code Samples</strong></p> * * {@codesnippet com.azure.storage.blob.specialized.BlobClientBase.getContainerName} * * @return The name of the container. */ public final String getContainerName() { return client.getContainerName(); } /** * Decodes and gets the blob name. * * <p><strong>Code Samples</strong></p> * * {@codesnippet com.azure.storage.blob.specialized.BlobClientBase.getBlobName} * * @return The decoded name of the blob. */ public final String getBlobName() { return client.getBlobName(); } /** * Gets the {@link HttpPipeline} powering this client. * * @return The pipeline. */ public HttpPipeline getHttpPipeline() { return client.getHttpPipeline(); } /** * Gets the {@link CpkInfo} used to encrypt this blob's content on the server. * * @return the customer provided key used for encryption. */ public CpkInfo getCustomerProvidedKey() { return client.getCustomerProvidedKey(); } /** * Gets the {@code encryption scope} used to encrypt this blob's content on the server. * * @return the encryption scope used for encryption. */ String getEncryptionScope() { return client.getEncryptionScope(); } /** * Gets the service version the client is using. * * @return the service version the client is using. */ public BlobServiceVersion getServiceVersion() { return client.getServiceVersion(); } /** * Gets the snapshotId for a blob resource * * @return A string that represents the snapshotId of the snapshot blob */ public String getSnapshotId() { return client.getSnapshotId(); } /** * Gets the versionId for a blob resource * * @return A string that represents the versionId of the snapshot blob */ public String getVersionId() { return client.getVersionId(); } /** * Determines if a blob is a snapshot * * @return A boolean that indicates if a blob is a snapshot */ public boolean isSnapshot() { return client.isSnapshot(); } /** * Opens a blob input stream to download the blob. * <p> * * @return An <code>InputStream</code> object that represents the stream to use for reading from the blob. * @throws BlobStorageException If a storage service error occurred. */ public final BlobInputStream openInputStream() { return openInputStream(null, null); } /** * Opens a blob input stream to download the specified range of the blob. * <p> * * @param range {@link BlobRange} * @param requestConditions An {@link BlobRequestConditions} object that represents the access conditions for the * blob. * @return An <code>InputStream</code> object that represents the stream to use for reading from the blob. * @throws BlobStorageException If a storage service error occurred. */ public final BlobInputStream openInputStream(BlobRange range, BlobRequestConditions requestConditions) { return openInputStream(new BlobInputStreamOptions().setRange(range).setRequestConditions(requestConditions)); } /** * Opens a blob input stream to download the specified range of the blob. * * @param options {@link BlobInputStreamOptions} * @return An <code>InputStream</code> object that represents the stream to use for reading from the blob. * @throws BlobStorageException If a storage service error occurred. */ /** * Gets if the blob this client represents exists in the cloud. * * <p><strong>Code Samples</strong></p> * * {@codesnippet com.azure.storage.blob.specialized.BlobClientBase.exists} * * @return true if the blob exists, false if it doesn't */ public Boolean exists() { return existsWithResponse(null, Context.NONE).getValue(); } /** * Gets if the blob this client represents exists in the cloud. * * <p><strong>Code Samples</strong></p> * * {@codesnippet com.azure.storage.blob.specialized.BlobClientBase.existsWithResponse * * @param timeout An optional timeout value beyond which a {@link RuntimeException} will be raised. * @param context Additional context that is passed through the Http pipeline during the service call. * @return true if the blob exists, false if it doesn't */ public Response<Boolean> existsWithResponse(Duration timeout, Context context) { Mono<Response<Boolean>> response = client.existsWithResponse(context); return blockWithOptionalTimeout(response, timeout); } /** * Copies the data at the source URL to a blob. * <p> * This method triggers a long-running, asynchronous operations. The source may be another blob or an Azure File. If * the source is in another account, the source must either be public or authenticated with a SAS token. If the * source is in the same account, the Shared Key authorization on the destination will also be applied to the * source. The source URL must be URL encoded. * * <p><strong>Code Samples</strong></p> * * {@codesnippet com.azure.storage.blob.specialized.BlobClientBase.beginCopy * * <p>For more information, see the * <a href="https: * * @param sourceUrl The source URL to copy from. URLs outside of Azure may only be copied to block blobs. * @param pollInterval Duration between each poll for the copy status. If none is specified, a default of one second * is used. * @return A {@link SyncPoller} to poll the progress of blob copy operation. */ public SyncPoller<BlobCopyInfo, Void> beginCopy(String sourceUrl, Duration pollInterval) { return beginCopy(sourceUrl, null, null, null, null, null, pollInterval); } /** * Copies the data at the source URL to a blob. * <p> * This method triggers a long-running, asynchronous operations. The source may be another blob or an Azure File. If * the source is in another account, the source must either be public or authenticated with a SAS token. If the * source is in the same account, the Shared Key authorization on the destination will also be applied to the * source. The source URL must be URL encoded. * * <p><strong>Code Samples</strong></p> * * {@codesnippet com.azure.storage.blob.specialized.BlobClientBase.beginCopy * * <p>For more information, see the * <a href="https: * * @param sourceUrl The source URL to copy from. URLs outside of Azure may only be copied to block blobs. * @param metadata Metadata to associate with the destination blob. * @param tier {@link AccessTier} for the destination blob. * @param priority {@link RehydratePriority} for rehydrating the blob. * @param sourceModifiedRequestConditions {@link RequestConditions} against the source. Standard HTTP Access * conditions related to the modification of data. ETag and LastModifiedTime are used to construct conditions * related to when the blob was changed relative to the given request. The request will fail if the specified * condition is not satisfied. * @param destRequestConditions {@link BlobRequestConditions} against the destination. * @param pollInterval Duration between each poll for the copy status. If none is specified, a default of one second * is used. * @return A {@link SyncPoller} to poll the progress of blob copy operation. */ public SyncPoller<BlobCopyInfo, Void> beginCopy(String sourceUrl, Map<String, String> metadata, AccessTier tier, RehydratePriority priority, RequestConditions sourceModifiedRequestConditions, BlobRequestConditions destRequestConditions, Duration pollInterval) { return this.beginCopy(new BlobBeginCopyOptions(sourceUrl).setMetadata(metadata).setTier(tier) .setRehydratePriority(priority).setSourceRequestConditions( ModelHelper.populateBlobSourceRequestConditions(sourceModifiedRequestConditions)) .setDestinationRequestConditions(destRequestConditions).setPollInterval(pollInterval)); } /** * Copies the data at the source URL to a blob. * <p> * This method triggers a long-running, asynchronous operations. The source may be another blob or an Azure File. If * the source is in another account, the source must either be public or authenticated with a SAS token. If the * source is in the same account, the Shared Key authorization on the destination will also be applied to the * source. The source URL must be URL encoded. * * <p><strong>Code Samples</strong></p> * * {@codesnippet com.azure.storage.blob.specialized.BlobClientBase.beginCopy * * <p>For more information, see the * <a href="https: * * @param options {@link BlobBeginCopyOptions} * @return A {@link SyncPoller} to poll the progress of blob copy operation. */ public SyncPoller<BlobCopyInfo, Void> beginCopy(BlobBeginCopyOptions options) { return client.beginCopy(options).getSyncPoller(); } /** * Stops a pending copy that was previously started and leaves a destination blob with 0 length and metadata. * * <p><strong>Code Samples</strong></p> * * {@codesnippet com.azure.storage.blob.specialized.BlobClientBase.abortCopyFromUrl * * <p>For more information, see the * <a href="https: * * @param copyId The id of the copy operation to abort. */ public void abortCopyFromUrl(String copyId) { abortCopyFromUrlWithResponse(copyId, null, null, Context.NONE); } /** * Stops a pending copy that was previously started and leaves a destination blob with 0 length and metadata. * * <p><strong>Code Samples</strong></p> * * {@codesnippet com.azure.storage.blob.specialized.BlobClientBase.abortCopyFromUrlWithResponse * * <p>For more information, see the * <a href="https: * * @param copyId The id of the copy operation to abort. * @param leaseId The lease ID the active lease on the blob must match. * @param timeout An optional timeout value beyond which a {@link RuntimeException} will be raised. * @param context Additional context that is passed through the Http pipeline during the service call. * @return A response containing status code and HTTP headers. */ public Response<Void> abortCopyFromUrlWithResponse(String copyId, String leaseId, Duration timeout, Context context) { return blockWithOptionalTimeout(client.abortCopyFromUrlWithResponse(copyId, leaseId, context), timeout); } /** * Copies the data at the source URL to a blob and waits for the copy to complete before returning a response. * <p> * The source must be a block blob no larger than 256MB. The source must also be either public or have a sas token * attached. The URL must be URL encoded. * * <p><strong>Code Samples</strong></p> * * {@codesnippet com.azure.storage.blob.specialized.BlobClientBase.copyFromUrl * * <p>For more information, see the * <a href="https: * * @param copySource The source URL to copy from. * @return The copy ID for the long running operation. * @throws IllegalArgumentException If {@code copySource} is a malformed {@link URL}. */ public String copyFromUrl(String copySource) { return copyFromUrlWithResponse(copySource, null, null, null, null, null, Context.NONE).getValue(); } /** * Copies the data at the source URL to a blob and waits for the copy to complete before returning a response. * <p> * The source must be a block blob no larger than 256MB. The source must also be either public or have a sas token * attached. The URL must be URL encoded. * * <p><strong>Code Samples</strong></p> * * {@codesnippet com.azure.storage.blob.specialized.BlobClientBase.copyFromUrlWithResponse * * <p>For more information, see the * <a href="https: * * @param copySource The source URL to copy from. URLs outside of Azure may only be copied to block blobs. * @param metadata Metadata to associate with the destination blob. * @param tier {@link AccessTier} for the destination blob. * @param sourceModifiedRequestConditions {@link RequestConditions} against the source. Standard HTTP Access * conditions related to the modification of data. ETag and LastModifiedTime are used to construct conditions * related to when the blob was changed relative to the given request. The request will fail if the specified * condition is not satisfied. * @param destRequestConditions {@link BlobRequestConditions} against the destination. * @param timeout An optional timeout value beyond which a {@link RuntimeException} will be raised. * @param context Additional context that is passed through the Http pipeline during the service call. * @return The copy ID for the long running operation. * @throws IllegalArgumentException If {@code copySource} is a malformed {@link URL}. */ public Response<String> copyFromUrlWithResponse(String copySource, Map<String, String> metadata, AccessTier tier, RequestConditions sourceModifiedRequestConditions, BlobRequestConditions destRequestConditions, Duration timeout, Context context) { return this.copyFromUrlWithResponse(new BlobCopyFromUrlOptions(copySource).setMetadata(metadata) .setTier(tier).setSourceRequestConditions(sourceModifiedRequestConditions) .setDestinationRequestConditions(destRequestConditions), timeout, context); } /** * Copies the data at the source URL to a blob and waits for the copy to complete before returning a response. * <p> * The source must be a block blob no larger than 256MB. The source must also be either public or have a sas token * attached. The URL must be URL encoded. * * <p><strong>Code Samples</strong></p> * * {@codesnippet com.azure.storage.blob.specialized.BlobClientBase.copyFromUrlWithResponse * * <p>For more information, see the * <a href="https: * * @param options {@link BlobCopyFromUrlOptions} * @param timeout An optional timeout value beyond which a {@link RuntimeException} will be raised. * @param context Additional context that is passed through the Http pipeline during the service call. * @return The copy ID for the long running operation. * @throws IllegalArgumentException If {@code copySource} is a malformed {@link URL}. */ public Response<String> copyFromUrlWithResponse(BlobCopyFromUrlOptions options, Duration timeout, Context context) { Mono<Response<String>> response = client .copyFromUrlWithResponse(options, context); return blockWithOptionalTimeout(response, timeout); } /** * Downloads the entire blob into an output stream. Uploading data must be done from the {@link BlockBlobClient}, * {@link PageBlobClient}, or {@link AppendBlobClient}. * * <p><strong>Code Samples</strong></p> * * {@codesnippet com.azure.storage.blob.specialized.BlobClientBase.download * * <p>For more information, see the * <a href="https: * * @param stream A non-null {@link OutputStream} instance where the downloaded data will be written. * @throws UncheckedIOException If an I/O error occurs. * @throws NullPointerException if {@code stream} is null */ public void download(OutputStream stream) { downloadWithResponse(stream, null, null, null, false, null, Context.NONE); } /** * Downloads a range of bytes from a blob into an output stream. Uploading data must be done from the {@link * BlockBlobClient}, {@link PageBlobClient}, or {@link AppendBlobClient}. * * <p><strong>Code Samples</strong></p> * * {@codesnippet com.azure.storage.blob.specialized.BlobClientBase.downloadWithResponse * * <p>For more information, see the * <a href="https: * * @param stream A non-null {@link OutputStream} instance where the downloaded data will be written. * @param range {@link BlobRange} * @param options {@link DownloadRetryOptions} * @param requestConditions {@link BlobRequestConditions} * @param getRangeContentMd5 Whether the contentMD5 for the specified blob range should be returned. * @param timeout An optional timeout value beyond which a {@link RuntimeException} will be raised. * @param context Additional context that is passed through the Http pipeline during the service call. * @return A response containing status code and HTTP headers. * @throws UncheckedIOException If an I/O error occurs. * @throws NullPointerException if {@code stream} is null */ public BlobDownloadResponse downloadWithResponse(OutputStream stream, BlobRange range, DownloadRetryOptions options, BlobRequestConditions requestConditions, boolean getRangeContentMd5, Duration timeout, Context context) { StorageImplUtils.assertNotNull("stream", stream); Mono<BlobDownloadResponse> download = client .downloadWithResponse(range, options, requestConditions, getRangeContentMd5, context) .flatMap(response -> response.getValue().reduce(stream, (outputStream, buffer) -> { try { outputStream.write(FluxUtil.byteBufferToArray(buffer)); return outputStream; } catch (IOException ex) { throw logger.logExceptionAsError(Exceptions.propagate(new UncheckedIOException(ex))); } }).thenReturn(new BlobDownloadResponse(response))); return blockWithOptionalTimeout(download, timeout); } /** * Downloads the entire blob into a file specified by the path. * * <p>The file will be created and must not exist, if the file already exists a {@link FileAlreadyExistsException} * will be thrown.</p> * * <p><strong>Code Samples</strong></p> * * {@codesnippet com.azure.storage.blob.specialized.BlobClientBase.downloadToFile * * <p>For more information, see the * <a href="https: * * @param filePath A {@link String} representing the filePath where the downloaded data will be written. * @return The blob properties and metadata. * @throws UncheckedIOException If an I/O error occurs */ public BlobProperties downloadToFile(String filePath) { return downloadToFile(filePath, false); } /** * Downloads the entire blob into a file specified by the path. * * <p>If overwrite is set to false, the file will be created and must not exist, if the file already exists a * {@link FileAlreadyExistsException} will be thrown.</p> * * <p><strong>Code Samples</strong></p> * * {@codesnippet com.azure.storage.blob.specialized.BlobClientBase.downloadToFile * * <p>For more information, see the * <a href="https: * * @param filePath A {@link String} representing the filePath where the downloaded data will be written. * @param overwrite Whether or not to overwrite the file, should the file exist. * @return The blob properties and metadata. * @throws UncheckedIOException If an I/O error occurs */ public BlobProperties downloadToFile(String filePath, boolean overwrite) { Set<OpenOption> openOptions = null; if (overwrite) { openOptions = new HashSet<>(); openOptions.add(StandardOpenOption.CREATE); openOptions.add(StandardOpenOption.TRUNCATE_EXISTING); openOptions.add(StandardOpenOption.READ); openOptions.add(StandardOpenOption.WRITE); } return downloadToFileWithResponse(filePath, null, null, null, null, false, openOptions, null, Context.NONE) .getValue(); } /** * Downloads the entire blob into a file specified by the path. * * <p>The file will be created and must not exist, if the file already exists a {@link FileAlreadyExistsException} * will be thrown.</p> * * <p><strong>Code Samples</strong></p> * * {@codesnippet com.azure.storage.blob.specialized.BlobClientBase.downloadToFileWithResponse * * <p>For more information, see the * <a href="https: * * @param filePath A {@link String} representing the filePath where the downloaded data will be written. * @param range {@link BlobRange} * @param parallelTransferOptions {@link ParallelTransferOptions} to use to download to file. Number of parallel * transfers parameter is ignored. * @param downloadRetryOptions {@link DownloadRetryOptions} * @param requestConditions {@link BlobRequestConditions} * @param rangeGetContentMd5 Whether the contentMD5 for the specified blob range should be returned. * @param timeout An optional timeout value beyond which a {@link RuntimeException} will be raised. * @param context Additional context that is passed through the Http pipeline during the service call. * @return A response containing the blob properties and metadata. * @throws UncheckedIOException If an I/O error occurs. */ public Response<BlobProperties> downloadToFileWithResponse(String filePath, BlobRange range, ParallelTransferOptions parallelTransferOptions, DownloadRetryOptions downloadRetryOptions, BlobRequestConditions requestConditions, boolean rangeGetContentMd5, Duration timeout, Context context) { return downloadToFileWithResponse(filePath, range, parallelTransferOptions, downloadRetryOptions, requestConditions, rangeGetContentMd5, null, timeout, context); } /** * Downloads the entire blob into a file specified by the path. * * <p>By default the file will be created and must not exist, if the file already exists a * {@link FileAlreadyExistsException} will be thrown. To override this behavior, provide appropriate * {@link OpenOption OpenOptions} </p> * * <p><strong>Code Samples</strong></p> * * {@codesnippet com.azure.storage.blob.specialized.BlobClientBase.downloadToFileWithResponse * * <p>For more information, see the * <a href="https: * * @param filePath A {@link String} representing the filePath where the downloaded data will be written. * @param range {@link BlobRange} * @param parallelTransferOptions {@link ParallelTransferOptions} to use to download to file. Number of parallel * transfers parameter is ignored. * @param downloadRetryOptions {@link DownloadRetryOptions} * @param requestConditions {@link BlobRequestConditions} * @param rangeGetContentMd5 Whether the contentMD5 for the specified blob range should be returned. * @param openOptions {@link OpenOption OpenOptions} to use to configure how to open or create the file. * @param timeout An optional timeout value beyond which a {@link RuntimeException} will be raised. * @param context Additional context that is passed through the Http pipeline during the service call. * @return A response containing the blob properties and metadata. * @throws UncheckedIOException If an I/O error occurs. */ public Response<BlobProperties> downloadToFileWithResponse(String filePath, BlobRange range, ParallelTransferOptions parallelTransferOptions, DownloadRetryOptions downloadRetryOptions, BlobRequestConditions requestConditions, boolean rangeGetContentMd5, Set<OpenOption> openOptions, Duration timeout, Context context) { final com.azure.storage.common.ParallelTransferOptions finalParallelTransferOptions = ModelHelper.wrapBlobOptions(ModelHelper.populateAndApplyDefaults(parallelTransferOptions)); return downloadToFileWithResponse(new BlobDownloadToFileOptions(filePath).setRange(range) .setParallelTransferOptions(finalParallelTransferOptions) .setDownloadRetryOptions(downloadRetryOptions).setRequestConditions(requestConditions) .setRangeGetContentMd5(rangeGetContentMd5).setOpenOptions(openOptions), timeout, context); } /** * Downloads the entire blob into a file specified by the path. * * <p>By default the file will be created and must not exist, if the file already exists a * {@link FileAlreadyExistsException} will be thrown. To override this behavior, provide appropriate * {@link OpenOption OpenOptions} </p> * * <p><strong>Code Samples</strong></p> * * {@codesnippet com.azure.storage.blob.specialized.BlobClientBase.downloadToFileWithResponse * * <p>For more information, see the * <a href="https: * * @param options {@link BlobDownloadToFileOptions} * @param timeout An optional timeout value beyond which a {@link RuntimeException} will be raised. * @param context Additional context that is passed through the Http pipeline during the service call. * @return A response containing the blob properties and metadata. * @throws UncheckedIOException If an I/O error occurs. */ public Response<BlobProperties> downloadToFileWithResponse(BlobDownloadToFileOptions options, Duration timeout, Context context) { Mono<Response<BlobProperties>> download = client.downloadToFileWithResponse(options, context); return blockWithOptionalTimeout(download, timeout); } /** * Deletes the specified blob or snapshot. Note that deleting a blob also deletes all its snapshots. * * <p><strong>Code Samples</strong></p> * * {@codesnippet com.azure.storage.blob.specialized.BlobClientBase.delete} * * <p>For more information, see the * <a href="https: */ public void delete() { deleteWithResponse(null, null, null, Context.NONE); } /** * Deletes the specified blob or snapshot. Note that deleting a blob also deletes all its snapshots. * * <p><strong>Code Samples</strong></p> * * {@codesnippet com.azure.storage.blob.specialized.BlobClientBase.deleteWithResponse * * <p>For more information, see the * <a href="https: * * @param deleteBlobSnapshotOptions Specifies the behavior for deleting the snapshots on this blob. {@code Include} * will delete the base blob and all snapshots. {@code Only} will delete only the snapshots. If a snapshot is being * deleted, you must pass null. * @param requestConditions {@link BlobRequestConditions} * @param timeout An optional timeout value beyond which a {@link RuntimeException} will be raised. * @param context Additional context that is passed through the Http pipeline during the service call. * @return A response containing status code and HTTP headers. */ public Response<Void> deleteWithResponse(DeleteSnapshotsOptionType deleteBlobSnapshotOptions, BlobRequestConditions requestConditions, Duration timeout, Context context) { Mono<Response<Void>> response = client .deleteWithResponse(deleteBlobSnapshotOptions, requestConditions, context); return blockWithOptionalTimeout(response, timeout); } /** * Returns the blob's metadata and properties. * * <p><strong>Code Samples</strong></p> * * {@codesnippet com.azure.storage.blob.specialized.BlobClientBase.getProperties} * * <p>For more information, see the * <a href="https: * * @return The blob properties and metadata. */ public BlobProperties getProperties() { return getPropertiesWithResponse(null, null, Context.NONE).getValue(); } /** * Returns the blob's metadata and properties. * * <p><strong>Code Samples</strong></p> * * {@codesnippet com.azure.storage.blob.specialized.BlobClientBase.getPropertiesWithResponse * * <p>For more information, see the * <a href="https: * * @param requestConditions {@link BlobRequestConditions} * @param timeout An optional timeout value beyond which a {@link RuntimeException} will be raised. * @param context Additional context that is passed through the Http pipeline during the service call. * @return The blob properties and metadata. */ public Response<BlobProperties> getPropertiesWithResponse(BlobRequestConditions requestConditions, Duration timeout, Context context) { Mono<Response<BlobProperties>> response = client.getPropertiesWithResponse(requestConditions, context); return blockWithOptionalTimeout(response, timeout); } /** * Changes a blob's HTTP header properties. if only one HTTP header is updated, the others will all be erased. In * order to preserve existing values, they must be passed alongside the header being changed. * * <p><strong>Code Samples</strong></p> * * {@codesnippet com.azure.storage.blob.specialized.BlobClientBase.setHttpHeaders * * <p>For more information, see the * <a href="https: * * @param headers {@link BlobHttpHeaders} */ public void setHttpHeaders(BlobHttpHeaders headers) { setHttpHeadersWithResponse(headers, null, null, Context.NONE); } /** * Changes a blob's HTTP header properties. if only one HTTP header is updated, the others will all be erased. In * order to preserve existing values, they must be passed alongside the header being changed. * * <p><strong>Code Samples</strong></p> * * {@codesnippet com.azure.storage.blob.specialized.BlobClientBase.setHttpHeadersWithResponse * * <p>For more information, see the * <a href="https: * * @param headers {@link BlobHttpHeaders} * @param requestConditions {@link BlobRequestConditions} * @param timeout An optional timeout value beyond which a {@link RuntimeException} will be raised. * @param context Additional context that is passed through the Http pipeline during the service call. * @return A response containing status code and HTTP headers. */ public Response<Void> setHttpHeadersWithResponse(BlobHttpHeaders headers, BlobRequestConditions requestConditions, Duration timeout, Context context) { Mono<Response<Void>> response = client .setHttpHeadersWithResponse(headers, requestConditions, context); return blockWithOptionalTimeout(response, timeout); } /** * Changes a blob's metadata. The specified metadata in this method will replace existing metadata. If old values * must be preserved, they must be downloaded and included in the call to this method. * * <p><strong>Code Samples</strong></p> * * {@codesnippet com.azure.storage.blob.specialized.BlobClientBase.setMetadata * * <p>For more information, see the * <a href="https: * * @param metadata Metadata to associate with the blob. */ public void setMetadata(Map<String, String> metadata) { setMetadataWithResponse(metadata, null, null, Context.NONE); } /** * Changes a blob's metadata. The specified metadata in this method will replace existing metadata. If old values * must be preserved, they must be downloaded and included in the call to this method. * * <p><strong>Code Samples</strong></p> * * {@codesnippet com.azure.storage.blob.specialized.BlobClientBase.setMetadataWithResponse * * <p>For more information, see the * <a href="https: * * @param metadata Metadata to associate with the blob. * @param requestConditions {@link BlobRequestConditions} * @param timeout An optional timeout value beyond which a {@link RuntimeException} will be raised. * @param context Additional context that is passed through the Http pipeline during the service call. * @return A response containing status code and HTTP headers. */ public Response<Void> setMetadataWithResponse(Map<String, String> metadata, BlobRequestConditions requestConditions, Duration timeout, Context context) { Mono<Response<Void>> response = client.setMetadataWithResponse(metadata, requestConditions, context); return blockWithOptionalTimeout(response, timeout); } /** * Returns the blob's tags. * * <p><strong>Code Samples</strong></p> * * {@codesnippet com.azure.storage.blob.specialized.BlobClientBase.getTags} * * <p>For more information, see the * <a href="https: * * @return The blob's tags. */ public Map<String, String> getTags() { return this.getTagsWithResponse(new BlobGetTagsOptions(), null, Context.NONE).getValue(); } /** * Returns the blob's tags. * * <p><strong>Code Samples</strong></p> * * {@codesnippet com.azure.storage.blob.specialized.BlobClientBase.getTagsWithResponse * * <p>For more information, see the * <a href="https: * * @param options {@link BlobGetTagsOptions} * @param timeout An optional timeout value beyond which a {@link RuntimeException} will be raised. * @param context Additional context that is passed through the Http pipeline during the service call. * @return The blob's tags. */ public Response<Map<String, String>> getTagsWithResponse(BlobGetTagsOptions options, Duration timeout, Context context) { Mono<Response<Map<String, String>>> response = client.getTagsWithResponse(options, context); return blockWithOptionalTimeout(response, timeout); } /** * Sets user defined tags. The specified tags in this method will replace existing tags. If old values * must be preserved, they must be downloaded and included in the call to this method. * * <p><strong>Code Samples</strong></p> * * {@codesnippet com.azure.storage.blob.specialized.BlobClientBase.setTags * * <p>For more information, see the * <a href="https: * * @param tags Tags to associate with the blob. */ public void setTags(Map<String, String> tags) { this.setTagsWithResponse(new BlobSetTagsOptions(tags), null, Context.NONE); } /** * Sets user defined tags. The specified tags in this method will replace existing tags. If old values * must be preserved, they must be downloaded and included in the call to this method. * * <p><strong>Code Samples</strong></p> * * {@codesnippet com.azure.storage.blob.specialized.BlobClientBase.setTagsWithResponse * * <p>For more information, see the * <a href="https: * * @param options {@link BlobSetTagsOptions} * @param timeout An optional timeout value beyond which a {@link RuntimeException} will be raised. * @param context Additional context that is passed through the Http pipeline during the service call. * @return A response containing status code and HTTP headers. */ public Response<Void> setTagsWithResponse(BlobSetTagsOptions options, Duration timeout, Context context) { Mono<Response<Void>> response = client.setTagsWithResponse(options, context); return blockWithOptionalTimeout(response, timeout); } /** * Creates a read-only snapshot of the blob. * * <p><strong>Code Samples</strong></p> * * {@codesnippet com.azure.storage.blob.specialized.BlobClientBase.createSnapshot} * * <p>For more information, see the * <a href="https: * * @return A response containing a {@link BlobClientBase} which is used to interact with the created snapshot, use * {@link BlobClientBase */ public BlobClientBase createSnapshot() { return createSnapshotWithResponse(null, null, null, Context.NONE).getValue(); } /** * Creates a read-only snapshot of the blob. * * <p><strong>Code Samples</strong></p> * * {@codesnippet com.azure.storage.blob.specialized.BlobClientBase.createSnapshotWithResponse * * <p>For more information, see the * <a href="https: * * @param metadata Metadata to associate with the blob snapshot. * @param requestConditions {@link BlobRequestConditions} * @param timeout An optional timeout value beyond which a {@link RuntimeException} will be raised. * @param context Additional context that is passed through the Http pipeline during the service call. * @return A response containing a {@link BlobClientBase} which is used to interact with the created snapshot, use * {@link BlobClientBase */ public Response<BlobClientBase> createSnapshotWithResponse(Map<String, String> metadata, BlobRequestConditions requestConditions, Duration timeout, Context context) { Mono<Response<BlobClientBase>> response = client .createSnapshotWithResponse(metadata, requestConditions, context) .map(rb -> new SimpleResponse<>(rb, new BlobClientBase(rb.getValue()))); return blockWithOptionalTimeout(response, timeout); } /** * Sets the tier on a blob. The operation is allowed on a page blob in a premium storage account or a block blob in * a blob storage or GPV2 account. A premium page blob's tier determines the allowed size, IOPS, and bandwidth of * the blob. A block blob's tier determines the Hot/Cool/Archive storage type. This does not update the blob's * etag. * * <p><strong>Code Samples</strong></p> * * {@codesnippet com.azure.storage.blob.specialized.BlobClientBase.setAccessTier * * <p>For more information, see the * <a href="https: * * @param tier The new tier for the blob. */ public void setAccessTier(AccessTier tier) { setAccessTierWithResponse(tier, null, null, null, Context.NONE); } /** * Sets the tier on a blob. The operation is allowed on a page blob in a premium storage account or a block blob in * a blob storage or GPV2 account. A premium page blob's tier determines the allowed size, IOPS, and bandwidth of * the blob. A block blob's tier determines the Hot/Cool/Archive storage type. This does not update the blob's * etag. * * <p><strong>Code Samples</strong></p> * * {@codesnippet com.azure.storage.blob.specialized.BlobClientBase.setAccessTierWithResponse * * <p>For more information, see the * <a href="https: * * @param tier The new tier for the blob. * @param priority Optional priority to set for re-hydrating blobs. * @param leaseId The lease ID the active lease on the blob must match. * @param timeout An optional timeout value beyond which a {@link RuntimeException} will be raised. * @param context Additional context that is passed through the Http pipeline during the service call. * @return A response containing status code and HTTP headers. */ public Response<Void> setAccessTierWithResponse(AccessTier tier, RehydratePriority priority, String leaseId, Duration timeout, Context context) { return setAccessTierWithResponse(new BlobSetAccessTierOptions(tier).setPriority(priority).setLeaseId(leaseId), timeout, context); } /** * Sets the tier on a blob. The operation is allowed on a page blob in a premium storage account or a block blob in * a blob storage or GPV2 account. A premium page blob's tier determines the allowed size, IOPS, and bandwidth of * the blob. A block blob's tier determines the Hot/Cool/Archive storage type. This does not update the blob's * etag. * * <p><strong>Code Samples</strong></p> * * {@codesnippet com.azure.storage.blob.specialized.BlobClientBase.setAccessTierWithResponse * * <p>For more information, see the * <a href="https: * * @param options {@link BlobSetAccessTierOptions} * @param timeout An optional timeout value beyond which a {@link RuntimeException} will be raised. * @param context Additional context that is passed through the Http pipeline during the service call. * @return A response containing status code and HTTP headers. */ public Response<Void> setAccessTierWithResponse(BlobSetAccessTierOptions options, Duration timeout, Context context) { return blockWithOptionalTimeout(client.setTierWithResponse(options, context), timeout); } /** * Undelete restores the content and metadata of a soft-deleted blob and/or any associated soft-deleted snapshots. * * <p><strong>Code Samples</strong></p> * * {@codesnippet com.azure.storage.blob.specialized.BlobClientBase.undelete} * * <p>For more information, see the * <a href="https: */ public void undelete() { undeleteWithResponse(null, Context.NONE); } /** * Undelete restores the content and metadata of a soft-deleted blob and/or any associated soft-deleted snapshots. * * <p><strong>Code Samples</strong></p> * * {@codesnippet com.azure.storage.blob.specialized.BlobClientBase.undeleteWithResponse * * <p>For more information, see the * <a href="https: * * @param timeout An optional timeout value beyond which a {@link RuntimeException} will be raised. * @param context Additional context that is passed through the Http pipeline during the service call. * @return A response containing status code and HTTP headers. */ public Response<Void> undeleteWithResponse(Duration timeout, Context context) { Mono<Response<Void>> response = client.undeleteWithResponse(context); return blockWithOptionalTimeout(response, timeout); } /** * Returns the sku name and account kind for the account. * * <p><strong>Code Samples</strong></p> * * {@codesnippet com.azure.storage.blob.specialized.BlobClientBase.getAccountInfo} * * <p>For more information, see the * <a href="https: * * @return The sku name and account kind. */ public StorageAccountInfo getAccountInfo() { return getAccountInfoWithResponse(null, Context.NONE).getValue(); } /** * Returns the sku name and account kind for the account. * * <p><strong>Code Samples</strong></p> * * {@codesnippet com.azure.storage.blob.specialized.BlobClientBase.getAccountInfoWithResponse * * <p>For more information, see the * <a href="https: * * @param timeout An optional timeout value beyond which a {@link RuntimeException} will be raised. * @param context Additional context that is passed through the Http pipeline during the service call. * @return The sku name and account kind. */ public Response<StorageAccountInfo> getAccountInfoWithResponse(Duration timeout, Context context) { Mono<Response<StorageAccountInfo>> response = client.getAccountInfoWithResponse(context); return blockWithOptionalTimeout(response, timeout); } /** * Generates a user delegation SAS for the blob using the specified {@link BlobServiceSasSignatureValues}. * <p>See {@link BlobServiceSasSignatureValues} for more information on how to construct a user delegation SAS.</p> * * <p><strong>Code Samples</strong></p> * * {@codesnippet com.azure.storage.blob.specialized.BlobClientBase.generateUserDelegationSas * * @param blobServiceSasSignatureValues {@link BlobServiceSasSignatureValues} * @param userDelegationKey A {@link UserDelegationKey} object used to sign the SAS values. * @see BlobServiceClient * user delegation key. * @return A {@code String} representing all SAS query parameters. */ public String generateUserDelegationSas(BlobServiceSasSignatureValues blobServiceSasSignatureValues, UserDelegationKey userDelegationKey) { return this.client.generateUserDelegationSas(blobServiceSasSignatureValues, userDelegationKey); } /** * Generates a service SAS for the blob using the specified {@link BlobServiceSasSignatureValues} * Note : The client must be authenticated via {@link StorageSharedKeyCredential} * <p>See {@link BlobServiceSasSignatureValues} for more information on how to construct a service SAS.</p> * * <p><strong>Code Samples</strong></p> * * {@codesnippet com.azure.storage.blob.specialized.BlobClientBase.generateSas * * @param blobServiceSasSignatureValues {@link BlobServiceSasSignatureValues} * * @return A {@code String} representing all SAS query parameters. */ public String generateSas(BlobServiceSasSignatureValues blobServiceSasSignatureValues) { return this.client.generateSas(blobServiceSasSignatureValues); } /** * Opens a blob input stream to query the blob. * * <p>For more information, see the * <a href="https: * * <p><strong>Code Samples</strong></p> * * {@codesnippet com.azure.storage.blob.specialized.BlobClientBase.openQueryInputStream * * @param expression The query expression. * @return An <code>InputStream</code> object that represents the stream to use for reading the query response. */ public InputStream openQueryInputStream(String expression) { return openQueryInputStreamWithResponse(new BlobQueryOptions(expression)).getValue(); } /** * Opens a blob input stream to query the blob. * * <p>For more information, see the * <a href="https: * * <p><strong>Code Samples</strong></p> * * {@codesnippet com.azure.storage.blob.specialized.BlobClientBase.openQueryInputStream * * @param queryOptions {@link BlobQueryOptions The query options}. * @return A response containing status code and HTTP headers including an <code>InputStream</code> object * that represents the stream to use for reading the query response. */ public Response<InputStream> openQueryInputStreamWithResponse(BlobQueryOptions queryOptions) { BlobQueryAsyncResponse response = client.queryWithResponse(queryOptions).block(); if (response == null) { throw logger.logExceptionAsError(new IllegalStateException("Query response cannot be null")); } return new ResponseBase<>(response.getRequest(), response.getStatusCode(), response.getHeaders(), new FluxInputStream(response.getValue()), response.getDeserializedHeaders()); } /** * Queries an entire blob into an output stream. * * <p>For more information, see the * <a href="https: * * <p><strong>Code Samples</strong></p> * * {@codesnippet com.azure.storage.blob.specialized.BlobClientBase.query * * @param stream A non-null {@link OutputStream} instance where the downloaded data will be written. * @param expression The query expression. * @throws UncheckedIOException If an I/O error occurs. * @throws NullPointerException if {@code stream} is null. */ public void query(OutputStream stream, String expression) { queryWithResponse(new BlobQueryOptions(expression, stream), null, Context.NONE); } /** * Queries an entire blob into an output stream. * * <p>For more information, see the * <a href="https: * * <p><strong>Code Samples</strong></p> * * {@codesnippet com.azure.storage.blob.specialized.BlobClientBase.queryWithResponse * * @param queryOptions {@link BlobQueryOptions The query options}. * @param timeout An optional timeout value beyond which a {@link RuntimeException} will be raised. * @param context Additional context that is passed through the Http pipeline during the service call. * @return A response containing status code and HTTP headers. * @throws UncheckedIOException If an I/O error occurs. * @throws NullPointerException if {@code stream} is null. */ public BlobQueryResponse queryWithResponse(BlobQueryOptions queryOptions, Duration timeout, Context context) { StorageImplUtils.assertNotNull("options", queryOptions); StorageImplUtils.assertNotNull("outputStream", queryOptions.getOutputStream()); Mono<BlobQueryResponse> download = client .queryWithResponse(queryOptions, context) .flatMap(response -> response.getValue().reduce(queryOptions.getOutputStream(), (outputStream, buffer) -> { try { outputStream.write(FluxUtil.byteBufferToArray(buffer)); return outputStream; } catch (IOException ex) { throw logger.logExceptionAsError(Exceptions.propagate(new UncheckedIOException(ex))); } }).thenReturn(new BlobQueryResponse(response))); return blockWithOptionalTimeout(download, timeout); } }
class BlobClientBase { private final ClientLogger logger = new ClientLogger(BlobClientBase.class); private final BlobAsyncClientBase client; /** * Constructor used by {@link SpecializedBlobClientBuilder}. * * @param client the async blob client */ protected BlobClientBase(BlobAsyncClientBase client) { this.client = client; } /** * Creates a new {@link BlobClientBase} linked to the {@code snapshot} of this blob resource. * * @param snapshot the identifier for a specific snapshot of this blob * @return a {@link BlobClientBase} used to interact with the specific snapshot. */ public BlobClientBase getSnapshotClient(String snapshot) { return new BlobClientBase(client.getSnapshotClient(snapshot)); } /** * Creates a new {@link BlobClientBase} linked to the {@code version} of this blob resource. * * @param versionId the identifier for a specific version of this blob, * pass {@code null} to interact with the latest blob version. * @return a {@link BlobClientBase} used to interact with the specific version. */ public BlobClientBase getVersionClient(String versionId) { return new BlobClientBase(client.getVersionClient(versionId)); } /** * Gets the URL of the blob represented by this client. * * @return the URL. */ public String getBlobUrl() { return client.getBlobUrl(); } /** * Get associated account name. * * @return account name associated with this storage resource. */ public String getAccountName() { return client.getAccountName(); } /** * Get the container name. * * <p><strong>Code Samples</strong></p> * * {@codesnippet com.azure.storage.blob.specialized.BlobClientBase.getContainerName} * * @return The name of the container. */ public final String getContainerName() { return client.getContainerName(); } /** * Decodes and gets the blob name. * * <p><strong>Code Samples</strong></p> * * {@codesnippet com.azure.storage.blob.specialized.BlobClientBase.getBlobName} * * @return The decoded name of the blob. */ public final String getBlobName() { return client.getBlobName(); } /** * Gets the {@link HttpPipeline} powering this client. * * @return The pipeline. */ public HttpPipeline getHttpPipeline() { return client.getHttpPipeline(); } /** * Gets the {@link CpkInfo} used to encrypt this blob's content on the server. * * @return the customer provided key used for encryption. */ public CpkInfo getCustomerProvidedKey() { return client.getCustomerProvidedKey(); } /** * Gets the {@code encryption scope} used to encrypt this blob's content on the server. * * @return the encryption scope used for encryption. */ String getEncryptionScope() { return client.getEncryptionScope(); } /** * Gets the service version the client is using. * * @return the service version the client is using. */ public BlobServiceVersion getServiceVersion() { return client.getServiceVersion(); } /** * Gets the snapshotId for a blob resource * * @return A string that represents the snapshotId of the snapshot blob */ public String getSnapshotId() { return client.getSnapshotId(); } /** * Gets the versionId for a blob resource * * @return A string that represents the versionId of the snapshot blob */ public String getVersionId() { return client.getVersionId(); } /** * Determines if a blob is a snapshot * * @return A boolean that indicates if a blob is a snapshot */ public boolean isSnapshot() { return client.isSnapshot(); } /** * Opens a blob input stream to download the blob. * <p> * * @return An <code>InputStream</code> object that represents the stream to use for reading from the blob. * @throws BlobStorageException If a storage service error occurred. */ public final BlobInputStream openInputStream() { return openInputStream(null, null); } /** * Opens a blob input stream to download the specified range of the blob. * <p> * * @param range {@link BlobRange} * @param requestConditions An {@link BlobRequestConditions} object that represents the access conditions for the * blob. * @return An <code>InputStream</code> object that represents the stream to use for reading from the blob. * @throws BlobStorageException If a storage service error occurred. */ public final BlobInputStream openInputStream(BlobRange range, BlobRequestConditions requestConditions) { return openInputStream(new BlobInputStreamOptions().setRange(range).setRequestConditions(requestConditions)); } /** * Opens a blob input stream to download the specified range of the blob. * * @param options {@link BlobInputStreamOptions} * @return An <code>InputStream</code> object that represents the stream to use for reading from the blob. * @throws BlobStorageException If a storage service error occurred. */ /** * Gets if the blob this client represents exists in the cloud. * * <p><strong>Code Samples</strong></p> * * {@codesnippet com.azure.storage.blob.specialized.BlobClientBase.exists} * * @return true if the blob exists, false if it doesn't */ public Boolean exists() { return existsWithResponse(null, Context.NONE).getValue(); } /** * Gets if the blob this client represents exists in the cloud. * * <p><strong>Code Samples</strong></p> * * {@codesnippet com.azure.storage.blob.specialized.BlobClientBase.existsWithResponse * * @param timeout An optional timeout value beyond which a {@link RuntimeException} will be raised. * @param context Additional context that is passed through the Http pipeline during the service call. * @return true if the blob exists, false if it doesn't */ public Response<Boolean> existsWithResponse(Duration timeout, Context context) { Mono<Response<Boolean>> response = client.existsWithResponse(context); return blockWithOptionalTimeout(response, timeout); } /** * Copies the data at the source URL to a blob. * <p> * This method triggers a long-running, asynchronous operations. The source may be another blob or an Azure File. If * the source is in another account, the source must either be public or authenticated with a SAS token. If the * source is in the same account, the Shared Key authorization on the destination will also be applied to the * source. The source URL must be URL encoded. * * <p><strong>Code Samples</strong></p> * * {@codesnippet com.azure.storage.blob.specialized.BlobClientBase.beginCopy * * <p>For more information, see the * <a href="https: * * @param sourceUrl The source URL to copy from. URLs outside of Azure may only be copied to block blobs. * @param pollInterval Duration between each poll for the copy status. If none is specified, a default of one second * is used. * @return A {@link SyncPoller} to poll the progress of blob copy operation. */ public SyncPoller<BlobCopyInfo, Void> beginCopy(String sourceUrl, Duration pollInterval) { return beginCopy(sourceUrl, null, null, null, null, null, pollInterval); } /** * Copies the data at the source URL to a blob. * <p> * This method triggers a long-running, asynchronous operations. The source may be another blob or an Azure File. If * the source is in another account, the source must either be public or authenticated with a SAS token. If the * source is in the same account, the Shared Key authorization on the destination will also be applied to the * source. The source URL must be URL encoded. * * <p><strong>Code Samples</strong></p> * * {@codesnippet com.azure.storage.blob.specialized.BlobClientBase.beginCopy * * <p>For more information, see the * <a href="https: * * @param sourceUrl The source URL to copy from. URLs outside of Azure may only be copied to block blobs. * @param metadata Metadata to associate with the destination blob. * @param tier {@link AccessTier} for the destination blob. * @param priority {@link RehydratePriority} for rehydrating the blob. * @param sourceModifiedRequestConditions {@link RequestConditions} against the source. Standard HTTP Access * conditions related to the modification of data. ETag and LastModifiedTime are used to construct conditions * related to when the blob was changed relative to the given request. The request will fail if the specified * condition is not satisfied. * @param destRequestConditions {@link BlobRequestConditions} against the destination. * @param pollInterval Duration between each poll for the copy status. If none is specified, a default of one second * is used. * @return A {@link SyncPoller} to poll the progress of blob copy operation. */ public SyncPoller<BlobCopyInfo, Void> beginCopy(String sourceUrl, Map<String, String> metadata, AccessTier tier, RehydratePriority priority, RequestConditions sourceModifiedRequestConditions, BlobRequestConditions destRequestConditions, Duration pollInterval) { return this.beginCopy(new BlobBeginCopyOptions(sourceUrl).setMetadata(metadata).setTier(tier) .setRehydratePriority(priority).setSourceRequestConditions( ModelHelper.populateBlobSourceRequestConditions(sourceModifiedRequestConditions)) .setDestinationRequestConditions(destRequestConditions).setPollInterval(pollInterval)); } /** * Copies the data at the source URL to a blob. * <p> * This method triggers a long-running, asynchronous operations. The source may be another blob or an Azure File. If * the source is in another account, the source must either be public or authenticated with a SAS token. If the * source is in the same account, the Shared Key authorization on the destination will also be applied to the * source. The source URL must be URL encoded. * * <p><strong>Code Samples</strong></p> * * {@codesnippet com.azure.storage.blob.specialized.BlobClientBase.beginCopy * * <p>For more information, see the * <a href="https: * * @param options {@link BlobBeginCopyOptions} * @return A {@link SyncPoller} to poll the progress of blob copy operation. */ public SyncPoller<BlobCopyInfo, Void> beginCopy(BlobBeginCopyOptions options) { return client.beginCopy(options).getSyncPoller(); } /** * Stops a pending copy that was previously started and leaves a destination blob with 0 length and metadata. * * <p><strong>Code Samples</strong></p> * * {@codesnippet com.azure.storage.blob.specialized.BlobClientBase.abortCopyFromUrl * * <p>For more information, see the * <a href="https: * * @param copyId The id of the copy operation to abort. */ public void abortCopyFromUrl(String copyId) { abortCopyFromUrlWithResponse(copyId, null, null, Context.NONE); } /** * Stops a pending copy that was previously started and leaves a destination blob with 0 length and metadata. * * <p><strong>Code Samples</strong></p> * * {@codesnippet com.azure.storage.blob.specialized.BlobClientBase.abortCopyFromUrlWithResponse * * <p>For more information, see the * <a href="https: * * @param copyId The id of the copy operation to abort. * @param leaseId The lease ID the active lease on the blob must match. * @param timeout An optional timeout value beyond which a {@link RuntimeException} will be raised. * @param context Additional context that is passed through the Http pipeline during the service call. * @return A response containing status code and HTTP headers. */ public Response<Void> abortCopyFromUrlWithResponse(String copyId, String leaseId, Duration timeout, Context context) { return blockWithOptionalTimeout(client.abortCopyFromUrlWithResponse(copyId, leaseId, context), timeout); } /** * Copies the data at the source URL to a blob and waits for the copy to complete before returning a response. * <p> * The source must be a block blob no larger than 256MB. The source must also be either public or have a sas token * attached. The URL must be URL encoded. * * <p><strong>Code Samples</strong></p> * * {@codesnippet com.azure.storage.blob.specialized.BlobClientBase.copyFromUrl * * <p>For more information, see the * <a href="https: * * @param copySource The source URL to copy from. * @return The copy ID for the long running operation. * @throws IllegalArgumentException If {@code copySource} is a malformed {@link URL}. */ public String copyFromUrl(String copySource) { return copyFromUrlWithResponse(copySource, null, null, null, null, null, Context.NONE).getValue(); } /** * Copies the data at the source URL to a blob and waits for the copy to complete before returning a response. * <p> * The source must be a block blob no larger than 256MB. The source must also be either public or have a sas token * attached. The URL must be URL encoded. * * <p><strong>Code Samples</strong></p> * * {@codesnippet com.azure.storage.blob.specialized.BlobClientBase.copyFromUrlWithResponse * * <p>For more information, see the * <a href="https: * * @param copySource The source URL to copy from. URLs outside of Azure may only be copied to block blobs. * @param metadata Metadata to associate with the destination blob. * @param tier {@link AccessTier} for the destination blob. * @param sourceModifiedRequestConditions {@link RequestConditions} against the source. Standard HTTP Access * conditions related to the modification of data. ETag and LastModifiedTime are used to construct conditions * related to when the blob was changed relative to the given request. The request will fail if the specified * condition is not satisfied. * @param destRequestConditions {@link BlobRequestConditions} against the destination. * @param timeout An optional timeout value beyond which a {@link RuntimeException} will be raised. * @param context Additional context that is passed through the Http pipeline during the service call. * @return The copy ID for the long running operation. * @throws IllegalArgumentException If {@code copySource} is a malformed {@link URL}. */ public Response<String> copyFromUrlWithResponse(String copySource, Map<String, String> metadata, AccessTier tier, RequestConditions sourceModifiedRequestConditions, BlobRequestConditions destRequestConditions, Duration timeout, Context context) { return this.copyFromUrlWithResponse(new BlobCopyFromUrlOptions(copySource).setMetadata(metadata) .setTier(tier).setSourceRequestConditions(sourceModifiedRequestConditions) .setDestinationRequestConditions(destRequestConditions), timeout, context); } /** * Copies the data at the source URL to a blob and waits for the copy to complete before returning a response. * <p> * The source must be a block blob no larger than 256MB. The source must also be either public or have a sas token * attached. The URL must be URL encoded. * * <p><strong>Code Samples</strong></p> * * {@codesnippet com.azure.storage.blob.specialized.BlobClientBase.copyFromUrlWithResponse * * <p>For more information, see the * <a href="https: * * @param options {@link BlobCopyFromUrlOptions} * @param timeout An optional timeout value beyond which a {@link RuntimeException} will be raised. * @param context Additional context that is passed through the Http pipeline during the service call. * @return The copy ID for the long running operation. * @throws IllegalArgumentException If {@code copySource} is a malformed {@link URL}. */ public Response<String> copyFromUrlWithResponse(BlobCopyFromUrlOptions options, Duration timeout, Context context) { Mono<Response<String>> response = client .copyFromUrlWithResponse(options, context); return blockWithOptionalTimeout(response, timeout); } /** * Downloads the entire blob into an output stream. Uploading data must be done from the {@link BlockBlobClient}, * {@link PageBlobClient}, or {@link AppendBlobClient}. * * <p><strong>Code Samples</strong></p> * * {@codesnippet com.azure.storage.blob.specialized.BlobClientBase.download * * <p>For more information, see the * <a href="https: * * @param stream A non-null {@link OutputStream} instance where the downloaded data will be written. * @throws UncheckedIOException If an I/O error occurs. * @throws NullPointerException if {@code stream} is null */ public void download(OutputStream stream) { downloadWithResponse(stream, null, null, null, false, null, Context.NONE); } /** * Downloads a range of bytes from a blob into an output stream. Uploading data must be done from the {@link * BlockBlobClient}, {@link PageBlobClient}, or {@link AppendBlobClient}. * * <p><strong>Code Samples</strong></p> * * {@codesnippet com.azure.storage.blob.specialized.BlobClientBase.downloadWithResponse * * <p>For more information, see the * <a href="https: * * @param stream A non-null {@link OutputStream} instance where the downloaded data will be written. * @param range {@link BlobRange} * @param options {@link DownloadRetryOptions} * @param requestConditions {@link BlobRequestConditions} * @param getRangeContentMd5 Whether the contentMD5 for the specified blob range should be returned. * @param timeout An optional timeout value beyond which a {@link RuntimeException} will be raised. * @param context Additional context that is passed through the Http pipeline during the service call. * @return A response containing status code and HTTP headers. * @throws UncheckedIOException If an I/O error occurs. * @throws NullPointerException if {@code stream} is null */ public BlobDownloadResponse downloadWithResponse(OutputStream stream, BlobRange range, DownloadRetryOptions options, BlobRequestConditions requestConditions, boolean getRangeContentMd5, Duration timeout, Context context) { StorageImplUtils.assertNotNull("stream", stream); Mono<BlobDownloadResponse> download = client .downloadWithResponse(range, options, requestConditions, getRangeContentMd5, context) .flatMap(response -> response.getValue().reduce(stream, (outputStream, buffer) -> { try { outputStream.write(FluxUtil.byteBufferToArray(buffer)); return outputStream; } catch (IOException ex) { throw logger.logExceptionAsError(Exceptions.propagate(new UncheckedIOException(ex))); } }).thenReturn(new BlobDownloadResponse(response))); return blockWithOptionalTimeout(download, timeout); } /** * Downloads the entire blob into a file specified by the path. * * <p>The file will be created and must not exist, if the file already exists a {@link FileAlreadyExistsException} * will be thrown.</p> * * <p><strong>Code Samples</strong></p> * * {@codesnippet com.azure.storage.blob.specialized.BlobClientBase.downloadToFile * * <p>For more information, see the * <a href="https: * * @param filePath A {@link String} representing the filePath where the downloaded data will be written. * @return The blob properties and metadata. * @throws UncheckedIOException If an I/O error occurs */ public BlobProperties downloadToFile(String filePath) { return downloadToFile(filePath, false); } /** * Downloads the entire blob into a file specified by the path. * * <p>If overwrite is set to false, the file will be created and must not exist, if the file already exists a * {@link FileAlreadyExistsException} will be thrown.</p> * * <p><strong>Code Samples</strong></p> * * {@codesnippet com.azure.storage.blob.specialized.BlobClientBase.downloadToFile * * <p>For more information, see the * <a href="https: * * @param filePath A {@link String} representing the filePath where the downloaded data will be written. * @param overwrite Whether or not to overwrite the file, should the file exist. * @return The blob properties and metadata. * @throws UncheckedIOException If an I/O error occurs */ public BlobProperties downloadToFile(String filePath, boolean overwrite) { Set<OpenOption> openOptions = null; if (overwrite) { openOptions = new HashSet<>(); openOptions.add(StandardOpenOption.CREATE); openOptions.add(StandardOpenOption.TRUNCATE_EXISTING); openOptions.add(StandardOpenOption.READ); openOptions.add(StandardOpenOption.WRITE); } return downloadToFileWithResponse(filePath, null, null, null, null, false, openOptions, null, Context.NONE) .getValue(); } /** * Downloads the entire blob into a file specified by the path. * * <p>The file will be created and must not exist, if the file already exists a {@link FileAlreadyExistsException} * will be thrown.</p> * * <p><strong>Code Samples</strong></p> * * {@codesnippet com.azure.storage.blob.specialized.BlobClientBase.downloadToFileWithResponse * * <p>For more information, see the * <a href="https: * * @param filePath A {@link String} representing the filePath where the downloaded data will be written. * @param range {@link BlobRange} * @param parallelTransferOptions {@link ParallelTransferOptions} to use to download to file. Number of parallel * transfers parameter is ignored. * @param downloadRetryOptions {@link DownloadRetryOptions} * @param requestConditions {@link BlobRequestConditions} * @param rangeGetContentMd5 Whether the contentMD5 for the specified blob range should be returned. * @param timeout An optional timeout value beyond which a {@link RuntimeException} will be raised. * @param context Additional context that is passed through the Http pipeline during the service call. * @return A response containing the blob properties and metadata. * @throws UncheckedIOException If an I/O error occurs. */ public Response<BlobProperties> downloadToFileWithResponse(String filePath, BlobRange range, ParallelTransferOptions parallelTransferOptions, DownloadRetryOptions downloadRetryOptions, BlobRequestConditions requestConditions, boolean rangeGetContentMd5, Duration timeout, Context context) { return downloadToFileWithResponse(filePath, range, parallelTransferOptions, downloadRetryOptions, requestConditions, rangeGetContentMd5, null, timeout, context); } /** * Downloads the entire blob into a file specified by the path. * * <p>By default the file will be created and must not exist, if the file already exists a * {@link FileAlreadyExistsException} will be thrown. To override this behavior, provide appropriate * {@link OpenOption OpenOptions} </p> * * <p><strong>Code Samples</strong></p> * * {@codesnippet com.azure.storage.blob.specialized.BlobClientBase.downloadToFileWithResponse * * <p>For more information, see the * <a href="https: * * @param filePath A {@link String} representing the filePath where the downloaded data will be written. * @param range {@link BlobRange} * @param parallelTransferOptions {@link ParallelTransferOptions} to use to download to file. Number of parallel * transfers parameter is ignored. * @param downloadRetryOptions {@link DownloadRetryOptions} * @param requestConditions {@link BlobRequestConditions} * @param rangeGetContentMd5 Whether the contentMD5 for the specified blob range should be returned. * @param openOptions {@link OpenOption OpenOptions} to use to configure how to open or create the file. * @param timeout An optional timeout value beyond which a {@link RuntimeException} will be raised. * @param context Additional context that is passed through the Http pipeline during the service call. * @return A response containing the blob properties and metadata. * @throws UncheckedIOException If an I/O error occurs. */ public Response<BlobProperties> downloadToFileWithResponse(String filePath, BlobRange range, ParallelTransferOptions parallelTransferOptions, DownloadRetryOptions downloadRetryOptions, BlobRequestConditions requestConditions, boolean rangeGetContentMd5, Set<OpenOption> openOptions, Duration timeout, Context context) { final com.azure.storage.common.ParallelTransferOptions finalParallelTransferOptions = ModelHelper.wrapBlobOptions(ModelHelper.populateAndApplyDefaults(parallelTransferOptions)); return downloadToFileWithResponse(new BlobDownloadToFileOptions(filePath).setRange(range) .setParallelTransferOptions(finalParallelTransferOptions) .setDownloadRetryOptions(downloadRetryOptions).setRequestConditions(requestConditions) .setRangeGetContentMd5(rangeGetContentMd5).setOpenOptions(openOptions), timeout, context); } /** * Downloads the entire blob into a file specified by the path. * * <p>By default the file will be created and must not exist, if the file already exists a * {@link FileAlreadyExistsException} will be thrown. To override this behavior, provide appropriate * {@link OpenOption OpenOptions} </p> * * <p><strong>Code Samples</strong></p> * * {@codesnippet com.azure.storage.blob.specialized.BlobClientBase.downloadToFileWithResponse * * <p>For more information, see the * <a href="https: * * @param options {@link BlobDownloadToFileOptions} * @param timeout An optional timeout value beyond which a {@link RuntimeException} will be raised. * @param context Additional context that is passed through the Http pipeline during the service call. * @return A response containing the blob properties and metadata. * @throws UncheckedIOException If an I/O error occurs. */ public Response<BlobProperties> downloadToFileWithResponse(BlobDownloadToFileOptions options, Duration timeout, Context context) { Mono<Response<BlobProperties>> download = client.downloadToFileWithResponse(options, context); return blockWithOptionalTimeout(download, timeout); } /** * Deletes the specified blob or snapshot. Note that deleting a blob also deletes all its snapshots. * * <p><strong>Code Samples</strong></p> * * {@codesnippet com.azure.storage.blob.specialized.BlobClientBase.delete} * * <p>For more information, see the * <a href="https: */ public void delete() { deleteWithResponse(null, null, null, Context.NONE); } /** * Deletes the specified blob or snapshot. Note that deleting a blob also deletes all its snapshots. * * <p><strong>Code Samples</strong></p> * * {@codesnippet com.azure.storage.blob.specialized.BlobClientBase.deleteWithResponse * * <p>For more information, see the * <a href="https: * * @param deleteBlobSnapshotOptions Specifies the behavior for deleting the snapshots on this blob. {@code Include} * will delete the base blob and all snapshots. {@code Only} will delete only the snapshots. If a snapshot is being * deleted, you must pass null. * @param requestConditions {@link BlobRequestConditions} * @param timeout An optional timeout value beyond which a {@link RuntimeException} will be raised. * @param context Additional context that is passed through the Http pipeline during the service call. * @return A response containing status code and HTTP headers. */ public Response<Void> deleteWithResponse(DeleteSnapshotsOptionType deleteBlobSnapshotOptions, BlobRequestConditions requestConditions, Duration timeout, Context context) { Mono<Response<Void>> response = client .deleteWithResponse(deleteBlobSnapshotOptions, requestConditions, context); return blockWithOptionalTimeout(response, timeout); } /** * Returns the blob's metadata and properties. * * <p><strong>Code Samples</strong></p> * * {@codesnippet com.azure.storage.blob.specialized.BlobClientBase.getProperties} * * <p>For more information, see the * <a href="https: * * @return The blob properties and metadata. */ public BlobProperties getProperties() { return getPropertiesWithResponse(null, null, Context.NONE).getValue(); } /** * Returns the blob's metadata and properties. * * <p><strong>Code Samples</strong></p> * * {@codesnippet com.azure.storage.blob.specialized.BlobClientBase.getPropertiesWithResponse * * <p>For more information, see the * <a href="https: * * @param requestConditions {@link BlobRequestConditions} * @param timeout An optional timeout value beyond which a {@link RuntimeException} will be raised. * @param context Additional context that is passed through the Http pipeline during the service call. * @return The blob properties and metadata. */ public Response<BlobProperties> getPropertiesWithResponse(BlobRequestConditions requestConditions, Duration timeout, Context context) { Mono<Response<BlobProperties>> response = client.getPropertiesWithResponse(requestConditions, context); return blockWithOptionalTimeout(response, timeout); } /** * Changes a blob's HTTP header properties. if only one HTTP header is updated, the others will all be erased. In * order to preserve existing values, they must be passed alongside the header being changed. * * <p><strong>Code Samples</strong></p> * * {@codesnippet com.azure.storage.blob.specialized.BlobClientBase.setHttpHeaders * * <p>For more information, see the * <a href="https: * * @param headers {@link BlobHttpHeaders} */ public void setHttpHeaders(BlobHttpHeaders headers) { setHttpHeadersWithResponse(headers, null, null, Context.NONE); } /** * Changes a blob's HTTP header properties. if only one HTTP header is updated, the others will all be erased. In * order to preserve existing values, they must be passed alongside the header being changed. * * <p><strong>Code Samples</strong></p> * * {@codesnippet com.azure.storage.blob.specialized.BlobClientBase.setHttpHeadersWithResponse * * <p>For more information, see the * <a href="https: * * @param headers {@link BlobHttpHeaders} * @param requestConditions {@link BlobRequestConditions} * @param timeout An optional timeout value beyond which a {@link RuntimeException} will be raised. * @param context Additional context that is passed through the Http pipeline during the service call. * @return A response containing status code and HTTP headers. */ public Response<Void> setHttpHeadersWithResponse(BlobHttpHeaders headers, BlobRequestConditions requestConditions, Duration timeout, Context context) { Mono<Response<Void>> response = client .setHttpHeadersWithResponse(headers, requestConditions, context); return blockWithOptionalTimeout(response, timeout); } /** * Changes a blob's metadata. The specified metadata in this method will replace existing metadata. If old values * must be preserved, they must be downloaded and included in the call to this method. * * <p><strong>Code Samples</strong></p> * * {@codesnippet com.azure.storage.blob.specialized.BlobClientBase.setMetadata * * <p>For more information, see the * <a href="https: * * @param metadata Metadata to associate with the blob. */ public void setMetadata(Map<String, String> metadata) { setMetadataWithResponse(metadata, null, null, Context.NONE); } /** * Changes a blob's metadata. The specified metadata in this method will replace existing metadata. If old values * must be preserved, they must be downloaded and included in the call to this method. * * <p><strong>Code Samples</strong></p> * * {@codesnippet com.azure.storage.blob.specialized.BlobClientBase.setMetadataWithResponse * * <p>For more information, see the * <a href="https: * * @param metadata Metadata to associate with the blob. * @param requestConditions {@link BlobRequestConditions} * @param timeout An optional timeout value beyond which a {@link RuntimeException} will be raised. * @param context Additional context that is passed through the Http pipeline during the service call. * @return A response containing status code and HTTP headers. */ public Response<Void> setMetadataWithResponse(Map<String, String> metadata, BlobRequestConditions requestConditions, Duration timeout, Context context) { Mono<Response<Void>> response = client.setMetadataWithResponse(metadata, requestConditions, context); return blockWithOptionalTimeout(response, timeout); } /** * Returns the blob's tags. * * <p><strong>Code Samples</strong></p> * * {@codesnippet com.azure.storage.blob.specialized.BlobClientBase.getTags} * * <p>For more information, see the * <a href="https: * * @return The blob's tags. */ public Map<String, String> getTags() { return this.getTagsWithResponse(new BlobGetTagsOptions(), null, Context.NONE).getValue(); } /** * Returns the blob's tags. * * <p><strong>Code Samples</strong></p> * * {@codesnippet com.azure.storage.blob.specialized.BlobClientBase.getTagsWithResponse * * <p>For more information, see the * <a href="https: * * @param options {@link BlobGetTagsOptions} * @param timeout An optional timeout value beyond which a {@link RuntimeException} will be raised. * @param context Additional context that is passed through the Http pipeline during the service call. * @return The blob's tags. */ public Response<Map<String, String>> getTagsWithResponse(BlobGetTagsOptions options, Duration timeout, Context context) { Mono<Response<Map<String, String>>> response = client.getTagsWithResponse(options, context); return blockWithOptionalTimeout(response, timeout); } /** * Sets user defined tags. The specified tags in this method will replace existing tags. If old values * must be preserved, they must be downloaded and included in the call to this method. * * <p><strong>Code Samples</strong></p> * * {@codesnippet com.azure.storage.blob.specialized.BlobClientBase.setTags * * <p>For more information, see the * <a href="https: * * @param tags Tags to associate with the blob. */ public void setTags(Map<String, String> tags) { this.setTagsWithResponse(new BlobSetTagsOptions(tags), null, Context.NONE); } /** * Sets user defined tags. The specified tags in this method will replace existing tags. If old values * must be preserved, they must be downloaded and included in the call to this method. * * <p><strong>Code Samples</strong></p> * * {@codesnippet com.azure.storage.blob.specialized.BlobClientBase.setTagsWithResponse * * <p>For more information, see the * <a href="https: * * @param options {@link BlobSetTagsOptions} * @param timeout An optional timeout value beyond which a {@link RuntimeException} will be raised. * @param context Additional context that is passed through the Http pipeline during the service call. * @return A response containing status code and HTTP headers. */ public Response<Void> setTagsWithResponse(BlobSetTagsOptions options, Duration timeout, Context context) { Mono<Response<Void>> response = client.setTagsWithResponse(options, context); return blockWithOptionalTimeout(response, timeout); } /** * Creates a read-only snapshot of the blob. * * <p><strong>Code Samples</strong></p> * * {@codesnippet com.azure.storage.blob.specialized.BlobClientBase.createSnapshot} * * <p>For more information, see the * <a href="https: * * @return A response containing a {@link BlobClientBase} which is used to interact with the created snapshot, use * {@link BlobClientBase */ public BlobClientBase createSnapshot() { return createSnapshotWithResponse(null, null, null, Context.NONE).getValue(); } /** * Creates a read-only snapshot of the blob. * * <p><strong>Code Samples</strong></p> * * {@codesnippet com.azure.storage.blob.specialized.BlobClientBase.createSnapshotWithResponse * * <p>For more information, see the * <a href="https: * * @param metadata Metadata to associate with the blob snapshot. * @param requestConditions {@link BlobRequestConditions} * @param timeout An optional timeout value beyond which a {@link RuntimeException} will be raised. * @param context Additional context that is passed through the Http pipeline during the service call. * @return A response containing a {@link BlobClientBase} which is used to interact with the created snapshot, use * {@link BlobClientBase */ public Response<BlobClientBase> createSnapshotWithResponse(Map<String, String> metadata, BlobRequestConditions requestConditions, Duration timeout, Context context) { Mono<Response<BlobClientBase>> response = client .createSnapshotWithResponse(metadata, requestConditions, context) .map(rb -> new SimpleResponse<>(rb, new BlobClientBase(rb.getValue()))); return blockWithOptionalTimeout(response, timeout); } /** * Sets the tier on a blob. The operation is allowed on a page blob in a premium storage account or a block blob in * a blob storage or GPV2 account. A premium page blob's tier determines the allowed size, IOPS, and bandwidth of * the blob. A block blob's tier determines the Hot/Cool/Archive storage type. This does not update the blob's * etag. * * <p><strong>Code Samples</strong></p> * * {@codesnippet com.azure.storage.blob.specialized.BlobClientBase.setAccessTier * * <p>For more information, see the * <a href="https: * * @param tier The new tier for the blob. */ public void setAccessTier(AccessTier tier) { setAccessTierWithResponse(tier, null, null, null, Context.NONE); } /** * Sets the tier on a blob. The operation is allowed on a page blob in a premium storage account or a block blob in * a blob storage or GPV2 account. A premium page blob's tier determines the allowed size, IOPS, and bandwidth of * the blob. A block blob's tier determines the Hot/Cool/Archive storage type. This does not update the blob's * etag. * * <p><strong>Code Samples</strong></p> * * {@codesnippet com.azure.storage.blob.specialized.BlobClientBase.setAccessTierWithResponse * * <p>For more information, see the * <a href="https: * * @param tier The new tier for the blob. * @param priority Optional priority to set for re-hydrating blobs. * @param leaseId The lease ID the active lease on the blob must match. * @param timeout An optional timeout value beyond which a {@link RuntimeException} will be raised. * @param context Additional context that is passed through the Http pipeline during the service call. * @return A response containing status code and HTTP headers. */ public Response<Void> setAccessTierWithResponse(AccessTier tier, RehydratePriority priority, String leaseId, Duration timeout, Context context) { return setAccessTierWithResponse(new BlobSetAccessTierOptions(tier).setPriority(priority).setLeaseId(leaseId), timeout, context); } /** * Sets the tier on a blob. The operation is allowed on a page blob in a premium storage account or a block blob in * a blob storage or GPV2 account. A premium page blob's tier determines the allowed size, IOPS, and bandwidth of * the blob. A block blob's tier determines the Hot/Cool/Archive storage type. This does not update the blob's * etag. * * <p><strong>Code Samples</strong></p> * * {@codesnippet com.azure.storage.blob.specialized.BlobClientBase.setAccessTierWithResponse * * <p>For more information, see the * <a href="https: * * @param options {@link BlobSetAccessTierOptions} * @param timeout An optional timeout value beyond which a {@link RuntimeException} will be raised. * @param context Additional context that is passed through the Http pipeline during the service call. * @return A response containing status code and HTTP headers. */ public Response<Void> setAccessTierWithResponse(BlobSetAccessTierOptions options, Duration timeout, Context context) { return blockWithOptionalTimeout(client.setTierWithResponse(options, context), timeout); } /** * Undelete restores the content and metadata of a soft-deleted blob and/or any associated soft-deleted snapshots. * * <p><strong>Code Samples</strong></p> * * {@codesnippet com.azure.storage.blob.specialized.BlobClientBase.undelete} * * <p>For more information, see the * <a href="https: */ public void undelete() { undeleteWithResponse(null, Context.NONE); } /** * Undelete restores the content and metadata of a soft-deleted blob and/or any associated soft-deleted snapshots. * * <p><strong>Code Samples</strong></p> * * {@codesnippet com.azure.storage.blob.specialized.BlobClientBase.undeleteWithResponse * * <p>For more information, see the * <a href="https: * * @param timeout An optional timeout value beyond which a {@link RuntimeException} will be raised. * @param context Additional context that is passed through the Http pipeline during the service call. * @return A response containing status code and HTTP headers. */ public Response<Void> undeleteWithResponse(Duration timeout, Context context) { Mono<Response<Void>> response = client.undeleteWithResponse(context); return blockWithOptionalTimeout(response, timeout); } /** * Returns the sku name and account kind for the account. * * <p><strong>Code Samples</strong></p> * * {@codesnippet com.azure.storage.blob.specialized.BlobClientBase.getAccountInfo} * * <p>For more information, see the * <a href="https: * * @return The sku name and account kind. */ public StorageAccountInfo getAccountInfo() { return getAccountInfoWithResponse(null, Context.NONE).getValue(); } /** * Returns the sku name and account kind for the account. * * <p><strong>Code Samples</strong></p> * * {@codesnippet com.azure.storage.blob.specialized.BlobClientBase.getAccountInfoWithResponse * * <p>For more information, see the * <a href="https: * * @param timeout An optional timeout value beyond which a {@link RuntimeException} will be raised. * @param context Additional context that is passed through the Http pipeline during the service call. * @return The sku name and account kind. */ public Response<StorageAccountInfo> getAccountInfoWithResponse(Duration timeout, Context context) { Mono<Response<StorageAccountInfo>> response = client.getAccountInfoWithResponse(context); return blockWithOptionalTimeout(response, timeout); } /** * Generates a user delegation SAS for the blob using the specified {@link BlobServiceSasSignatureValues}. * <p>See {@link BlobServiceSasSignatureValues} for more information on how to construct a user delegation SAS.</p> * * <p><strong>Code Samples</strong></p> * * {@codesnippet com.azure.storage.blob.specialized.BlobClientBase.generateUserDelegationSas * * @param blobServiceSasSignatureValues {@link BlobServiceSasSignatureValues} * @param userDelegationKey A {@link UserDelegationKey} object used to sign the SAS values. * @see BlobServiceClient * user delegation key. * @return A {@code String} representing all SAS query parameters. */ public String generateUserDelegationSas(BlobServiceSasSignatureValues blobServiceSasSignatureValues, UserDelegationKey userDelegationKey) { return this.client.generateUserDelegationSas(blobServiceSasSignatureValues, userDelegationKey); } /** * Generates a service SAS for the blob using the specified {@link BlobServiceSasSignatureValues} * Note : The client must be authenticated via {@link StorageSharedKeyCredential} * <p>See {@link BlobServiceSasSignatureValues} for more information on how to construct a service SAS.</p> * * <p><strong>Code Samples</strong></p> * * {@codesnippet com.azure.storage.blob.specialized.BlobClientBase.generateSas * * @param blobServiceSasSignatureValues {@link BlobServiceSasSignatureValues} * * @return A {@code String} representing all SAS query parameters. */ public String generateSas(BlobServiceSasSignatureValues blobServiceSasSignatureValues) { return this.client.generateSas(blobServiceSasSignatureValues); } /** * Opens a blob input stream to query the blob. * * <p>For more information, see the * <a href="https: * * <p><strong>Code Samples</strong></p> * * {@codesnippet com.azure.storage.blob.specialized.BlobClientBase.openQueryInputStream * * @param expression The query expression. * @return An <code>InputStream</code> object that represents the stream to use for reading the query response. */ public InputStream openQueryInputStream(String expression) { return openQueryInputStreamWithResponse(new BlobQueryOptions(expression)).getValue(); } /** * Opens a blob input stream to query the blob. * * <p>For more information, see the * <a href="https: * * <p><strong>Code Samples</strong></p> * * {@codesnippet com.azure.storage.blob.specialized.BlobClientBase.openQueryInputStream * * @param queryOptions {@link BlobQueryOptions The query options}. * @return A response containing status code and HTTP headers including an <code>InputStream</code> object * that represents the stream to use for reading the query response. */ public Response<InputStream> openQueryInputStreamWithResponse(BlobQueryOptions queryOptions) { BlobQueryAsyncResponse response = client.queryWithResponse(queryOptions).block(); if (response == null) { throw logger.logExceptionAsError(new IllegalStateException("Query response cannot be null")); } return new ResponseBase<>(response.getRequest(), response.getStatusCode(), response.getHeaders(), new FluxInputStream(response.getValue()), response.getDeserializedHeaders()); } /** * Queries an entire blob into an output stream. * * <p>For more information, see the * <a href="https: * * <p><strong>Code Samples</strong></p> * * {@codesnippet com.azure.storage.blob.specialized.BlobClientBase.query * * @param stream A non-null {@link OutputStream} instance where the downloaded data will be written. * @param expression The query expression. * @throws UncheckedIOException If an I/O error occurs. * @throws NullPointerException if {@code stream} is null. */ public void query(OutputStream stream, String expression) { queryWithResponse(new BlobQueryOptions(expression, stream), null, Context.NONE); } /** * Queries an entire blob into an output stream. * * <p>For more information, see the * <a href="https: * * <p><strong>Code Samples</strong></p> * * {@codesnippet com.azure.storage.blob.specialized.BlobClientBase.queryWithResponse * * @param queryOptions {@link BlobQueryOptions The query options}. * @param timeout An optional timeout value beyond which a {@link RuntimeException} will be raised. * @param context Additional context that is passed through the Http pipeline during the service call. * @return A response containing status code and HTTP headers. * @throws UncheckedIOException If an I/O error occurs. * @throws NullPointerException if {@code stream} is null. */ public BlobQueryResponse queryWithResponse(BlobQueryOptions queryOptions, Duration timeout, Context context) { StorageImplUtils.assertNotNull("options", queryOptions); StorageImplUtils.assertNotNull("outputStream", queryOptions.getOutputStream()); Mono<BlobQueryResponse> download = client .queryWithResponse(queryOptions, context) .flatMap(response -> response.getValue().reduce(queryOptions.getOutputStream(), (outputStream, buffer) -> { try { outputStream.write(FluxUtil.byteBufferToArray(buffer)); return outputStream; } catch (IOException ex) { throw logger.logExceptionAsError(Exceptions.propagate(new UncheckedIOException(ex))); } }).thenReturn(new BlobQueryResponse(response))); return blockWithOptionalTimeout(download, timeout); } }
Fixed.
public BlobInputStream openInputStream(BlobInputStreamOptions options) { options = options == null ? new BlobInputStreamOptions() : options; BlobProperties properties = getProperties(); BlobRange range = options.getRange() == null ? new BlobRange(0) : options.getRange(); int chunkSize = options.getBlockSize() == null ? 4 * Constants.MB : options.getBlockSize(); BlobRequestConditions requestConditions = options.getRequestConditions() == null ? new BlobRequestConditions() : options.getRequestConditions(); requestConditions.setIfMatch(properties.getETag()); return new BlobInputStream(client, range.getOffset(), range.getCount(), chunkSize, requestConditions, properties); }
requestConditions.setIfMatch(properties.getETag());
public BlobInputStream openInputStream(BlobInputStreamOptions options) { options = options == null ? new BlobInputStreamOptions() : options; BlobProperties properties = getProperties(); BlobRange range = options.getRange() == null ? new BlobRange(0) : options.getRange(); int chunkSize = options.getBlockSize() == null ? 4 * Constants.MB : options.getBlockSize(); BlobRequestConditions requestConditions = options.getRequestConditions() == null ? new BlobRequestConditions() : options.getRequestConditions(); if (requestConditions.getIfMatch() == null) { requestConditions.setIfMatch(properties.getETag()); } return new BlobInputStream(client, range.getOffset(), range.getCount(), chunkSize, requestConditions, properties); }
class BlobClientBase { private final ClientLogger logger = new ClientLogger(BlobClientBase.class); private final BlobAsyncClientBase client; /** * Constructor used by {@link SpecializedBlobClientBuilder}. * * @param client the async blob client */ protected BlobClientBase(BlobAsyncClientBase client) { this.client = client; } /** * Creates a new {@link BlobClientBase} linked to the {@code snapshot} of this blob resource. * * @param snapshot the identifier for a specific snapshot of this blob * @return a {@link BlobClientBase} used to interact with the specific snapshot. */ public BlobClientBase getSnapshotClient(String snapshot) { return new BlobClientBase(client.getSnapshotClient(snapshot)); } /** * Creates a new {@link BlobClientBase} linked to the {@code version} of this blob resource. * * @param versionId the identifier for a specific version of this blob, * pass {@code null} to interact with the latest blob version. * @return a {@link BlobClientBase} used to interact with the specific version. */ public BlobClientBase getVersionClient(String versionId) { return new BlobClientBase(client.getVersionClient(versionId)); } /** * Gets the URL of the blob represented by this client. * * @return the URL. */ public String getBlobUrl() { return client.getBlobUrl(); } /** * Get associated account name. * * @return account name associated with this storage resource. */ public String getAccountName() { return client.getAccountName(); } /** * Get the container name. * * <p><strong>Code Samples</strong></p> * * {@codesnippet com.azure.storage.blob.specialized.BlobClientBase.getContainerName} * * @return The name of the container. */ public final String getContainerName() { return client.getContainerName(); } /** * Decodes and gets the blob name. * * <p><strong>Code Samples</strong></p> * * {@codesnippet com.azure.storage.blob.specialized.BlobClientBase.getBlobName} * * @return The decoded name of the blob. */ public final String getBlobName() { return client.getBlobName(); } /** * Gets the {@link HttpPipeline} powering this client. * * @return The pipeline. */ public HttpPipeline getHttpPipeline() { return client.getHttpPipeline(); } /** * Gets the {@link CpkInfo} used to encrypt this blob's content on the server. * * @return the customer provided key used for encryption. */ public CpkInfo getCustomerProvidedKey() { return client.getCustomerProvidedKey(); } /** * Gets the {@code encryption scope} used to encrypt this blob's content on the server. * * @return the encryption scope used for encryption. */ String getEncryptionScope() { return client.getEncryptionScope(); } /** * Gets the service version the client is using. * * @return the service version the client is using. */ public BlobServiceVersion getServiceVersion() { return client.getServiceVersion(); } /** * Gets the snapshotId for a blob resource * * @return A string that represents the snapshotId of the snapshot blob */ public String getSnapshotId() { return client.getSnapshotId(); } /** * Gets the versionId for a blob resource * * @return A string that represents the versionId of the snapshot blob */ public String getVersionId() { return client.getVersionId(); } /** * Determines if a blob is a snapshot * * @return A boolean that indicates if a blob is a snapshot */ public boolean isSnapshot() { return client.isSnapshot(); } /** * Opens a blob input stream to download the blob. * <p> * * @return An <code>InputStream</code> object that represents the stream to use for reading from the blob. * @throws BlobStorageException If a storage service error occurred. */ public final BlobInputStream openInputStream() { return openInputStream(null, null); } /** * Opens a blob input stream to download the specified range of the blob. * <p> * * @param range {@link BlobRange} * @param requestConditions An {@link BlobRequestConditions} object that represents the access conditions for the * blob. * @return An <code>InputStream</code> object that represents the stream to use for reading from the blob. * @throws BlobStorageException If a storage service error occurred. */ public final BlobInputStream openInputStream(BlobRange range, BlobRequestConditions requestConditions) { return openInputStream(new BlobInputStreamOptions().setRange(range).setRequestConditions(requestConditions)); } /** * Opens a blob input stream to download the specified range of the blob. * * @param options {@link BlobInputStreamOptions} * @return An <code>InputStream</code> object that represents the stream to use for reading from the blob. * @throws BlobStorageException If a storage service error occurred. */ /** * Gets if the blob this client represents exists in the cloud. * * <p><strong>Code Samples</strong></p> * * {@codesnippet com.azure.storage.blob.specialized.BlobClientBase.exists} * * @return true if the blob exists, false if it doesn't */ public Boolean exists() { return existsWithResponse(null, Context.NONE).getValue(); } /** * Gets if the blob this client represents exists in the cloud. * * <p><strong>Code Samples</strong></p> * * {@codesnippet com.azure.storage.blob.specialized.BlobClientBase.existsWithResponse * * @param timeout An optional timeout value beyond which a {@link RuntimeException} will be raised. * @param context Additional context that is passed through the Http pipeline during the service call. * @return true if the blob exists, false if it doesn't */ public Response<Boolean> existsWithResponse(Duration timeout, Context context) { Mono<Response<Boolean>> response = client.existsWithResponse(context); return blockWithOptionalTimeout(response, timeout); } /** * Copies the data at the source URL to a blob. * <p> * This method triggers a long-running, asynchronous operations. The source may be another blob or an Azure File. If * the source is in another account, the source must either be public or authenticated with a SAS token. If the * source is in the same account, the Shared Key authorization on the destination will also be applied to the * source. The source URL must be URL encoded. * * <p><strong>Code Samples</strong></p> * * {@codesnippet com.azure.storage.blob.specialized.BlobClientBase.beginCopy * * <p>For more information, see the * <a href="https: * * @param sourceUrl The source URL to copy from. URLs outside of Azure may only be copied to block blobs. * @param pollInterval Duration between each poll for the copy status. If none is specified, a default of one second * is used. * @return A {@link SyncPoller} to poll the progress of blob copy operation. */ public SyncPoller<BlobCopyInfo, Void> beginCopy(String sourceUrl, Duration pollInterval) { return beginCopy(sourceUrl, null, null, null, null, null, pollInterval); } /** * Copies the data at the source URL to a blob. * <p> * This method triggers a long-running, asynchronous operations. The source may be another blob or an Azure File. If * the source is in another account, the source must either be public or authenticated with a SAS token. If the * source is in the same account, the Shared Key authorization on the destination will also be applied to the * source. The source URL must be URL encoded. * * <p><strong>Code Samples</strong></p> * * {@codesnippet com.azure.storage.blob.specialized.BlobClientBase.beginCopy * * <p>For more information, see the * <a href="https: * * @param sourceUrl The source URL to copy from. URLs outside of Azure may only be copied to block blobs. * @param metadata Metadata to associate with the destination blob. * @param tier {@link AccessTier} for the destination blob. * @param priority {@link RehydratePriority} for rehydrating the blob. * @param sourceModifiedRequestConditions {@link RequestConditions} against the source. Standard HTTP Access * conditions related to the modification of data. ETag and LastModifiedTime are used to construct conditions * related to when the blob was changed relative to the given request. The request will fail if the specified * condition is not satisfied. * @param destRequestConditions {@link BlobRequestConditions} against the destination. * @param pollInterval Duration between each poll for the copy status. If none is specified, a default of one second * is used. * @return A {@link SyncPoller} to poll the progress of blob copy operation. */ public SyncPoller<BlobCopyInfo, Void> beginCopy(String sourceUrl, Map<String, String> metadata, AccessTier tier, RehydratePriority priority, RequestConditions sourceModifiedRequestConditions, BlobRequestConditions destRequestConditions, Duration pollInterval) { return this.beginCopy(new BlobBeginCopyOptions(sourceUrl).setMetadata(metadata).setTier(tier) .setRehydratePriority(priority).setSourceRequestConditions( ModelHelper.populateBlobSourceRequestConditions(sourceModifiedRequestConditions)) .setDestinationRequestConditions(destRequestConditions).setPollInterval(pollInterval)); } /** * Copies the data at the source URL to a blob. * <p> * This method triggers a long-running, asynchronous operations. The source may be another blob or an Azure File. If * the source is in another account, the source must either be public or authenticated with a SAS token. If the * source is in the same account, the Shared Key authorization on the destination will also be applied to the * source. The source URL must be URL encoded. * * <p><strong>Code Samples</strong></p> * * {@codesnippet com.azure.storage.blob.specialized.BlobClientBase.beginCopy * * <p>For more information, see the * <a href="https: * * @param options {@link BlobBeginCopyOptions} * @return A {@link SyncPoller} to poll the progress of blob copy operation. */ public SyncPoller<BlobCopyInfo, Void> beginCopy(BlobBeginCopyOptions options) { return client.beginCopy(options).getSyncPoller(); } /** * Stops a pending copy that was previously started and leaves a destination blob with 0 length and metadata. * * <p><strong>Code Samples</strong></p> * * {@codesnippet com.azure.storage.blob.specialized.BlobClientBase.abortCopyFromUrl * * <p>For more information, see the * <a href="https: * * @param copyId The id of the copy operation to abort. */ public void abortCopyFromUrl(String copyId) { abortCopyFromUrlWithResponse(copyId, null, null, Context.NONE); } /** * Stops a pending copy that was previously started and leaves a destination blob with 0 length and metadata. * * <p><strong>Code Samples</strong></p> * * {@codesnippet com.azure.storage.blob.specialized.BlobClientBase.abortCopyFromUrlWithResponse * * <p>For more information, see the * <a href="https: * * @param copyId The id of the copy operation to abort. * @param leaseId The lease ID the active lease on the blob must match. * @param timeout An optional timeout value beyond which a {@link RuntimeException} will be raised. * @param context Additional context that is passed through the Http pipeline during the service call. * @return A response containing status code and HTTP headers. */ public Response<Void> abortCopyFromUrlWithResponse(String copyId, String leaseId, Duration timeout, Context context) { return blockWithOptionalTimeout(client.abortCopyFromUrlWithResponse(copyId, leaseId, context), timeout); } /** * Copies the data at the source URL to a blob and waits for the copy to complete before returning a response. * <p> * The source must be a block blob no larger than 256MB. The source must also be either public or have a sas token * attached. The URL must be URL encoded. * * <p><strong>Code Samples</strong></p> * * {@codesnippet com.azure.storage.blob.specialized.BlobClientBase.copyFromUrl * * <p>For more information, see the * <a href="https: * * @param copySource The source URL to copy from. * @return The copy ID for the long running operation. * @throws IllegalArgumentException If {@code copySource} is a malformed {@link URL}. */ public String copyFromUrl(String copySource) { return copyFromUrlWithResponse(copySource, null, null, null, null, null, Context.NONE).getValue(); } /** * Copies the data at the source URL to a blob and waits for the copy to complete before returning a response. * <p> * The source must be a block blob no larger than 256MB. The source must also be either public or have a sas token * attached. The URL must be URL encoded. * * <p><strong>Code Samples</strong></p> * * {@codesnippet com.azure.storage.blob.specialized.BlobClientBase.copyFromUrlWithResponse * * <p>For more information, see the * <a href="https: * * @param copySource The source URL to copy from. URLs outside of Azure may only be copied to block blobs. * @param metadata Metadata to associate with the destination blob. * @param tier {@link AccessTier} for the destination blob. * @param sourceModifiedRequestConditions {@link RequestConditions} against the source. Standard HTTP Access * conditions related to the modification of data. ETag and LastModifiedTime are used to construct conditions * related to when the blob was changed relative to the given request. The request will fail if the specified * condition is not satisfied. * @param destRequestConditions {@link BlobRequestConditions} against the destination. * @param timeout An optional timeout value beyond which a {@link RuntimeException} will be raised. * @param context Additional context that is passed through the Http pipeline during the service call. * @return The copy ID for the long running operation. * @throws IllegalArgumentException If {@code copySource} is a malformed {@link URL}. */ public Response<String> copyFromUrlWithResponse(String copySource, Map<String, String> metadata, AccessTier tier, RequestConditions sourceModifiedRequestConditions, BlobRequestConditions destRequestConditions, Duration timeout, Context context) { return this.copyFromUrlWithResponse(new BlobCopyFromUrlOptions(copySource).setMetadata(metadata) .setTier(tier).setSourceRequestConditions(sourceModifiedRequestConditions) .setDestinationRequestConditions(destRequestConditions), timeout, context); } /** * Copies the data at the source URL to a blob and waits for the copy to complete before returning a response. * <p> * The source must be a block blob no larger than 256MB. The source must also be either public or have a sas token * attached. The URL must be URL encoded. * * <p><strong>Code Samples</strong></p> * * {@codesnippet com.azure.storage.blob.specialized.BlobClientBase.copyFromUrlWithResponse * * <p>For more information, see the * <a href="https: * * @param options {@link BlobCopyFromUrlOptions} * @param timeout An optional timeout value beyond which a {@link RuntimeException} will be raised. * @param context Additional context that is passed through the Http pipeline during the service call. * @return The copy ID for the long running operation. * @throws IllegalArgumentException If {@code copySource} is a malformed {@link URL}. */ public Response<String> copyFromUrlWithResponse(BlobCopyFromUrlOptions options, Duration timeout, Context context) { Mono<Response<String>> response = client .copyFromUrlWithResponse(options, context); return blockWithOptionalTimeout(response, timeout); } /** * Downloads the entire blob into an output stream. Uploading data must be done from the {@link BlockBlobClient}, * {@link PageBlobClient}, or {@link AppendBlobClient}. * * <p><strong>Code Samples</strong></p> * * {@codesnippet com.azure.storage.blob.specialized.BlobClientBase.download * * <p>For more information, see the * <a href="https: * * @param stream A non-null {@link OutputStream} instance where the downloaded data will be written. * @throws UncheckedIOException If an I/O error occurs. * @throws NullPointerException if {@code stream} is null */ public void download(OutputStream stream) { downloadWithResponse(stream, null, null, null, false, null, Context.NONE); } /** * Downloads a range of bytes from a blob into an output stream. Uploading data must be done from the {@link * BlockBlobClient}, {@link PageBlobClient}, or {@link AppendBlobClient}. * * <p><strong>Code Samples</strong></p> * * {@codesnippet com.azure.storage.blob.specialized.BlobClientBase.downloadWithResponse * * <p>For more information, see the * <a href="https: * * @param stream A non-null {@link OutputStream} instance where the downloaded data will be written. * @param range {@link BlobRange} * @param options {@link DownloadRetryOptions} * @param requestConditions {@link BlobRequestConditions} * @param getRangeContentMd5 Whether the contentMD5 for the specified blob range should be returned. * @param timeout An optional timeout value beyond which a {@link RuntimeException} will be raised. * @param context Additional context that is passed through the Http pipeline during the service call. * @return A response containing status code and HTTP headers. * @throws UncheckedIOException If an I/O error occurs. * @throws NullPointerException if {@code stream} is null */ public BlobDownloadResponse downloadWithResponse(OutputStream stream, BlobRange range, DownloadRetryOptions options, BlobRequestConditions requestConditions, boolean getRangeContentMd5, Duration timeout, Context context) { StorageImplUtils.assertNotNull("stream", stream); Mono<BlobDownloadResponse> download = client .downloadWithResponse(range, options, requestConditions, getRangeContentMd5, context) .flatMap(response -> response.getValue().reduce(stream, (outputStream, buffer) -> { try { outputStream.write(FluxUtil.byteBufferToArray(buffer)); return outputStream; } catch (IOException ex) { throw logger.logExceptionAsError(Exceptions.propagate(new UncheckedIOException(ex))); } }).thenReturn(new BlobDownloadResponse(response))); return blockWithOptionalTimeout(download, timeout); } /** * Downloads the entire blob into a file specified by the path. * * <p>The file will be created and must not exist, if the file already exists a {@link FileAlreadyExistsException} * will be thrown.</p> * * <p><strong>Code Samples</strong></p> * * {@codesnippet com.azure.storage.blob.specialized.BlobClientBase.downloadToFile * * <p>For more information, see the * <a href="https: * * @param filePath A {@link String} representing the filePath where the downloaded data will be written. * @return The blob properties and metadata. * @throws UncheckedIOException If an I/O error occurs */ public BlobProperties downloadToFile(String filePath) { return downloadToFile(filePath, false); } /** * Downloads the entire blob into a file specified by the path. * * <p>If overwrite is set to false, the file will be created and must not exist, if the file already exists a * {@link FileAlreadyExistsException} will be thrown.</p> * * <p><strong>Code Samples</strong></p> * * {@codesnippet com.azure.storage.blob.specialized.BlobClientBase.downloadToFile * * <p>For more information, see the * <a href="https: * * @param filePath A {@link String} representing the filePath where the downloaded data will be written. * @param overwrite Whether or not to overwrite the file, should the file exist. * @return The blob properties and metadata. * @throws UncheckedIOException If an I/O error occurs */ public BlobProperties downloadToFile(String filePath, boolean overwrite) { Set<OpenOption> openOptions = null; if (overwrite) { openOptions = new HashSet<>(); openOptions.add(StandardOpenOption.CREATE); openOptions.add(StandardOpenOption.TRUNCATE_EXISTING); openOptions.add(StandardOpenOption.READ); openOptions.add(StandardOpenOption.WRITE); } return downloadToFileWithResponse(filePath, null, null, null, null, false, openOptions, null, Context.NONE) .getValue(); } /** * Downloads the entire blob into a file specified by the path. * * <p>The file will be created and must not exist, if the file already exists a {@link FileAlreadyExistsException} * will be thrown.</p> * * <p><strong>Code Samples</strong></p> * * {@codesnippet com.azure.storage.blob.specialized.BlobClientBase.downloadToFileWithResponse * * <p>For more information, see the * <a href="https: * * @param filePath A {@link String} representing the filePath where the downloaded data will be written. * @param range {@link BlobRange} * @param parallelTransferOptions {@link ParallelTransferOptions} to use to download to file. Number of parallel * transfers parameter is ignored. * @param downloadRetryOptions {@link DownloadRetryOptions} * @param requestConditions {@link BlobRequestConditions} * @param rangeGetContentMd5 Whether the contentMD5 for the specified blob range should be returned. * @param timeout An optional timeout value beyond which a {@link RuntimeException} will be raised. * @param context Additional context that is passed through the Http pipeline during the service call. * @return A response containing the blob properties and metadata. * @throws UncheckedIOException If an I/O error occurs. */ public Response<BlobProperties> downloadToFileWithResponse(String filePath, BlobRange range, ParallelTransferOptions parallelTransferOptions, DownloadRetryOptions downloadRetryOptions, BlobRequestConditions requestConditions, boolean rangeGetContentMd5, Duration timeout, Context context) { return downloadToFileWithResponse(filePath, range, parallelTransferOptions, downloadRetryOptions, requestConditions, rangeGetContentMd5, null, timeout, context); } /** * Downloads the entire blob into a file specified by the path. * * <p>By default the file will be created and must not exist, if the file already exists a * {@link FileAlreadyExistsException} will be thrown. To override this behavior, provide appropriate * {@link OpenOption OpenOptions} </p> * * <p><strong>Code Samples</strong></p> * * {@codesnippet com.azure.storage.blob.specialized.BlobClientBase.downloadToFileWithResponse * * <p>For more information, see the * <a href="https: * * @param filePath A {@link String} representing the filePath where the downloaded data will be written. * @param range {@link BlobRange} * @param parallelTransferOptions {@link ParallelTransferOptions} to use to download to file. Number of parallel * transfers parameter is ignored. * @param downloadRetryOptions {@link DownloadRetryOptions} * @param requestConditions {@link BlobRequestConditions} * @param rangeGetContentMd5 Whether the contentMD5 for the specified blob range should be returned. * @param openOptions {@link OpenOption OpenOptions} to use to configure how to open or create the file. * @param timeout An optional timeout value beyond which a {@link RuntimeException} will be raised. * @param context Additional context that is passed through the Http pipeline during the service call. * @return A response containing the blob properties and metadata. * @throws UncheckedIOException If an I/O error occurs. */ public Response<BlobProperties> downloadToFileWithResponse(String filePath, BlobRange range, ParallelTransferOptions parallelTransferOptions, DownloadRetryOptions downloadRetryOptions, BlobRequestConditions requestConditions, boolean rangeGetContentMd5, Set<OpenOption> openOptions, Duration timeout, Context context) { final com.azure.storage.common.ParallelTransferOptions finalParallelTransferOptions = ModelHelper.wrapBlobOptions(ModelHelper.populateAndApplyDefaults(parallelTransferOptions)); return downloadToFileWithResponse(new BlobDownloadToFileOptions(filePath).setRange(range) .setParallelTransferOptions(finalParallelTransferOptions) .setDownloadRetryOptions(downloadRetryOptions).setRequestConditions(requestConditions) .setRangeGetContentMd5(rangeGetContentMd5).setOpenOptions(openOptions), timeout, context); } /** * Downloads the entire blob into a file specified by the path. * * <p>By default the file will be created and must not exist, if the file already exists a * {@link FileAlreadyExistsException} will be thrown. To override this behavior, provide appropriate * {@link OpenOption OpenOptions} </p> * * <p><strong>Code Samples</strong></p> * * {@codesnippet com.azure.storage.blob.specialized.BlobClientBase.downloadToFileWithResponse * * <p>For more information, see the * <a href="https: * * @param options {@link BlobDownloadToFileOptions} * @param timeout An optional timeout value beyond which a {@link RuntimeException} will be raised. * @param context Additional context that is passed through the Http pipeline during the service call. * @return A response containing the blob properties and metadata. * @throws UncheckedIOException If an I/O error occurs. */ public Response<BlobProperties> downloadToFileWithResponse(BlobDownloadToFileOptions options, Duration timeout, Context context) { Mono<Response<BlobProperties>> download = client.downloadToFileWithResponse(options, context); return blockWithOptionalTimeout(download, timeout); } /** * Deletes the specified blob or snapshot. Note that deleting a blob also deletes all its snapshots. * * <p><strong>Code Samples</strong></p> * * {@codesnippet com.azure.storage.blob.specialized.BlobClientBase.delete} * * <p>For more information, see the * <a href="https: */ public void delete() { deleteWithResponse(null, null, null, Context.NONE); } /** * Deletes the specified blob or snapshot. Note that deleting a blob also deletes all its snapshots. * * <p><strong>Code Samples</strong></p> * * {@codesnippet com.azure.storage.blob.specialized.BlobClientBase.deleteWithResponse * * <p>For more information, see the * <a href="https: * * @param deleteBlobSnapshotOptions Specifies the behavior for deleting the snapshots on this blob. {@code Include} * will delete the base blob and all snapshots. {@code Only} will delete only the snapshots. If a snapshot is being * deleted, you must pass null. * @param requestConditions {@link BlobRequestConditions} * @param timeout An optional timeout value beyond which a {@link RuntimeException} will be raised. * @param context Additional context that is passed through the Http pipeline during the service call. * @return A response containing status code and HTTP headers. */ public Response<Void> deleteWithResponse(DeleteSnapshotsOptionType deleteBlobSnapshotOptions, BlobRequestConditions requestConditions, Duration timeout, Context context) { Mono<Response<Void>> response = client .deleteWithResponse(deleteBlobSnapshotOptions, requestConditions, context); return blockWithOptionalTimeout(response, timeout); } /** * Returns the blob's metadata and properties. * * <p><strong>Code Samples</strong></p> * * {@codesnippet com.azure.storage.blob.specialized.BlobClientBase.getProperties} * * <p>For more information, see the * <a href="https: * * @return The blob properties and metadata. */ public BlobProperties getProperties() { return getPropertiesWithResponse(null, null, Context.NONE).getValue(); } /** * Returns the blob's metadata and properties. * * <p><strong>Code Samples</strong></p> * * {@codesnippet com.azure.storage.blob.specialized.BlobClientBase.getPropertiesWithResponse * * <p>For more information, see the * <a href="https: * * @param requestConditions {@link BlobRequestConditions} * @param timeout An optional timeout value beyond which a {@link RuntimeException} will be raised. * @param context Additional context that is passed through the Http pipeline during the service call. * @return The blob properties and metadata. */ public Response<BlobProperties> getPropertiesWithResponse(BlobRequestConditions requestConditions, Duration timeout, Context context) { Mono<Response<BlobProperties>> response = client.getPropertiesWithResponse(requestConditions, context); return blockWithOptionalTimeout(response, timeout); } /** * Changes a blob's HTTP header properties. if only one HTTP header is updated, the others will all be erased. In * order to preserve existing values, they must be passed alongside the header being changed. * * <p><strong>Code Samples</strong></p> * * {@codesnippet com.azure.storage.blob.specialized.BlobClientBase.setHttpHeaders * * <p>For more information, see the * <a href="https: * * @param headers {@link BlobHttpHeaders} */ public void setHttpHeaders(BlobHttpHeaders headers) { setHttpHeadersWithResponse(headers, null, null, Context.NONE); } /** * Changes a blob's HTTP header properties. if only one HTTP header is updated, the others will all be erased. In * order to preserve existing values, they must be passed alongside the header being changed. * * <p><strong>Code Samples</strong></p> * * {@codesnippet com.azure.storage.blob.specialized.BlobClientBase.setHttpHeadersWithResponse * * <p>For more information, see the * <a href="https: * * @param headers {@link BlobHttpHeaders} * @param requestConditions {@link BlobRequestConditions} * @param timeout An optional timeout value beyond which a {@link RuntimeException} will be raised. * @param context Additional context that is passed through the Http pipeline during the service call. * @return A response containing status code and HTTP headers. */ public Response<Void> setHttpHeadersWithResponse(BlobHttpHeaders headers, BlobRequestConditions requestConditions, Duration timeout, Context context) { Mono<Response<Void>> response = client .setHttpHeadersWithResponse(headers, requestConditions, context); return blockWithOptionalTimeout(response, timeout); } /** * Changes a blob's metadata. The specified metadata in this method will replace existing metadata. If old values * must be preserved, they must be downloaded and included in the call to this method. * * <p><strong>Code Samples</strong></p> * * {@codesnippet com.azure.storage.blob.specialized.BlobClientBase.setMetadata * * <p>For more information, see the * <a href="https: * * @param metadata Metadata to associate with the blob. */ public void setMetadata(Map<String, String> metadata) { setMetadataWithResponse(metadata, null, null, Context.NONE); } /** * Changes a blob's metadata. The specified metadata in this method will replace existing metadata. If old values * must be preserved, they must be downloaded and included in the call to this method. * * <p><strong>Code Samples</strong></p> * * {@codesnippet com.azure.storage.blob.specialized.BlobClientBase.setMetadataWithResponse * * <p>For more information, see the * <a href="https: * * @param metadata Metadata to associate with the blob. * @param requestConditions {@link BlobRequestConditions} * @param timeout An optional timeout value beyond which a {@link RuntimeException} will be raised. * @param context Additional context that is passed through the Http pipeline during the service call. * @return A response containing status code and HTTP headers. */ public Response<Void> setMetadataWithResponse(Map<String, String> metadata, BlobRequestConditions requestConditions, Duration timeout, Context context) { Mono<Response<Void>> response = client.setMetadataWithResponse(metadata, requestConditions, context); return blockWithOptionalTimeout(response, timeout); } /** * Returns the blob's tags. * * <p><strong>Code Samples</strong></p> * * {@codesnippet com.azure.storage.blob.specialized.BlobClientBase.getTags} * * <p>For more information, see the * <a href="https: * * @return The blob's tags. */ public Map<String, String> getTags() { return this.getTagsWithResponse(new BlobGetTagsOptions(), null, Context.NONE).getValue(); } /** * Returns the blob's tags. * * <p><strong>Code Samples</strong></p> * * {@codesnippet com.azure.storage.blob.specialized.BlobClientBase.getTagsWithResponse * * <p>For more information, see the * <a href="https: * * @param options {@link BlobGetTagsOptions} * @param timeout An optional timeout value beyond which a {@link RuntimeException} will be raised. * @param context Additional context that is passed through the Http pipeline during the service call. * @return The blob's tags. */ public Response<Map<String, String>> getTagsWithResponse(BlobGetTagsOptions options, Duration timeout, Context context) { Mono<Response<Map<String, String>>> response = client.getTagsWithResponse(options, context); return blockWithOptionalTimeout(response, timeout); } /** * Sets user defined tags. The specified tags in this method will replace existing tags. If old values * must be preserved, they must be downloaded and included in the call to this method. * * <p><strong>Code Samples</strong></p> * * {@codesnippet com.azure.storage.blob.specialized.BlobClientBase.setTags * * <p>For more information, see the * <a href="https: * * @param tags Tags to associate with the blob. */ public void setTags(Map<String, String> tags) { this.setTagsWithResponse(new BlobSetTagsOptions(tags), null, Context.NONE); } /** * Sets user defined tags. The specified tags in this method will replace existing tags. If old values * must be preserved, they must be downloaded and included in the call to this method. * * <p><strong>Code Samples</strong></p> * * {@codesnippet com.azure.storage.blob.specialized.BlobClientBase.setTagsWithResponse * * <p>For more information, see the * <a href="https: * * @param options {@link BlobSetTagsOptions} * @param timeout An optional timeout value beyond which a {@link RuntimeException} will be raised. * @param context Additional context that is passed through the Http pipeline during the service call. * @return A response containing status code and HTTP headers. */ public Response<Void> setTagsWithResponse(BlobSetTagsOptions options, Duration timeout, Context context) { Mono<Response<Void>> response = client.setTagsWithResponse(options, context); return blockWithOptionalTimeout(response, timeout); } /** * Creates a read-only snapshot of the blob. * * <p><strong>Code Samples</strong></p> * * {@codesnippet com.azure.storage.blob.specialized.BlobClientBase.createSnapshot} * * <p>For more information, see the * <a href="https: * * @return A response containing a {@link BlobClientBase} which is used to interact with the created snapshot, use * {@link BlobClientBase */ public BlobClientBase createSnapshot() { return createSnapshotWithResponse(null, null, null, Context.NONE).getValue(); } /** * Creates a read-only snapshot of the blob. * * <p><strong>Code Samples</strong></p> * * {@codesnippet com.azure.storage.blob.specialized.BlobClientBase.createSnapshotWithResponse * * <p>For more information, see the * <a href="https: * * @param metadata Metadata to associate with the blob snapshot. * @param requestConditions {@link BlobRequestConditions} * @param timeout An optional timeout value beyond which a {@link RuntimeException} will be raised. * @param context Additional context that is passed through the Http pipeline during the service call. * @return A response containing a {@link BlobClientBase} which is used to interact with the created snapshot, use * {@link BlobClientBase */ public Response<BlobClientBase> createSnapshotWithResponse(Map<String, String> metadata, BlobRequestConditions requestConditions, Duration timeout, Context context) { Mono<Response<BlobClientBase>> response = client .createSnapshotWithResponse(metadata, requestConditions, context) .map(rb -> new SimpleResponse<>(rb, new BlobClientBase(rb.getValue()))); return blockWithOptionalTimeout(response, timeout); } /** * Sets the tier on a blob. The operation is allowed on a page blob in a premium storage account or a block blob in * a blob storage or GPV2 account. A premium page blob's tier determines the allowed size, IOPS, and bandwidth of * the blob. A block blob's tier determines the Hot/Cool/Archive storage type. This does not update the blob's * etag. * * <p><strong>Code Samples</strong></p> * * {@codesnippet com.azure.storage.blob.specialized.BlobClientBase.setAccessTier * * <p>For more information, see the * <a href="https: * * @param tier The new tier for the blob. */ public void setAccessTier(AccessTier tier) { setAccessTierWithResponse(tier, null, null, null, Context.NONE); } /** * Sets the tier on a blob. The operation is allowed on a page blob in a premium storage account or a block blob in * a blob storage or GPV2 account. A premium page blob's tier determines the allowed size, IOPS, and bandwidth of * the blob. A block blob's tier determines the Hot/Cool/Archive storage type. This does not update the blob's * etag. * * <p><strong>Code Samples</strong></p> * * {@codesnippet com.azure.storage.blob.specialized.BlobClientBase.setAccessTierWithResponse * * <p>For more information, see the * <a href="https: * * @param tier The new tier for the blob. * @param priority Optional priority to set for re-hydrating blobs. * @param leaseId The lease ID the active lease on the blob must match. * @param timeout An optional timeout value beyond which a {@link RuntimeException} will be raised. * @param context Additional context that is passed through the Http pipeline during the service call. * @return A response containing status code and HTTP headers. */ public Response<Void> setAccessTierWithResponse(AccessTier tier, RehydratePriority priority, String leaseId, Duration timeout, Context context) { return setAccessTierWithResponse(new BlobSetAccessTierOptions(tier).setPriority(priority).setLeaseId(leaseId), timeout, context); } /** * Sets the tier on a blob. The operation is allowed on a page blob in a premium storage account or a block blob in * a blob storage or GPV2 account. A premium page blob's tier determines the allowed size, IOPS, and bandwidth of * the blob. A block blob's tier determines the Hot/Cool/Archive storage type. This does not update the blob's * etag. * * <p><strong>Code Samples</strong></p> * * {@codesnippet com.azure.storage.blob.specialized.BlobClientBase.setAccessTierWithResponse * * <p>For more information, see the * <a href="https: * * @param options {@link BlobSetAccessTierOptions} * @param timeout An optional timeout value beyond which a {@link RuntimeException} will be raised. * @param context Additional context that is passed through the Http pipeline during the service call. * @return A response containing status code and HTTP headers. */ public Response<Void> setAccessTierWithResponse(BlobSetAccessTierOptions options, Duration timeout, Context context) { return blockWithOptionalTimeout(client.setTierWithResponse(options, context), timeout); } /** * Undelete restores the content and metadata of a soft-deleted blob and/or any associated soft-deleted snapshots. * * <p><strong>Code Samples</strong></p> * * {@codesnippet com.azure.storage.blob.specialized.BlobClientBase.undelete} * * <p>For more information, see the * <a href="https: */ public void undelete() { undeleteWithResponse(null, Context.NONE); } /** * Undelete restores the content and metadata of a soft-deleted blob and/or any associated soft-deleted snapshots. * * <p><strong>Code Samples</strong></p> * * {@codesnippet com.azure.storage.blob.specialized.BlobClientBase.undeleteWithResponse * * <p>For more information, see the * <a href="https: * * @param timeout An optional timeout value beyond which a {@link RuntimeException} will be raised. * @param context Additional context that is passed through the Http pipeline during the service call. * @return A response containing status code and HTTP headers. */ public Response<Void> undeleteWithResponse(Duration timeout, Context context) { Mono<Response<Void>> response = client.undeleteWithResponse(context); return blockWithOptionalTimeout(response, timeout); } /** * Returns the sku name and account kind for the account. * * <p><strong>Code Samples</strong></p> * * {@codesnippet com.azure.storage.blob.specialized.BlobClientBase.getAccountInfo} * * <p>For more information, see the * <a href="https: * * @return The sku name and account kind. */ public StorageAccountInfo getAccountInfo() { return getAccountInfoWithResponse(null, Context.NONE).getValue(); } /** * Returns the sku name and account kind for the account. * * <p><strong>Code Samples</strong></p> * * {@codesnippet com.azure.storage.blob.specialized.BlobClientBase.getAccountInfoWithResponse * * <p>For more information, see the * <a href="https: * * @param timeout An optional timeout value beyond which a {@link RuntimeException} will be raised. * @param context Additional context that is passed through the Http pipeline during the service call. * @return The sku name and account kind. */ public Response<StorageAccountInfo> getAccountInfoWithResponse(Duration timeout, Context context) { Mono<Response<StorageAccountInfo>> response = client.getAccountInfoWithResponse(context); return blockWithOptionalTimeout(response, timeout); } /** * Generates a user delegation SAS for the blob using the specified {@link BlobServiceSasSignatureValues}. * <p>See {@link BlobServiceSasSignatureValues} for more information on how to construct a user delegation SAS.</p> * * <p><strong>Code Samples</strong></p> * * {@codesnippet com.azure.storage.blob.specialized.BlobClientBase.generateUserDelegationSas * * @param blobServiceSasSignatureValues {@link BlobServiceSasSignatureValues} * @param userDelegationKey A {@link UserDelegationKey} object used to sign the SAS values. * @see BlobServiceClient * user delegation key. * @return A {@code String} representing all SAS query parameters. */ public String generateUserDelegationSas(BlobServiceSasSignatureValues blobServiceSasSignatureValues, UserDelegationKey userDelegationKey) { return this.client.generateUserDelegationSas(blobServiceSasSignatureValues, userDelegationKey); } /** * Generates a service SAS for the blob using the specified {@link BlobServiceSasSignatureValues} * Note : The client must be authenticated via {@link StorageSharedKeyCredential} * <p>See {@link BlobServiceSasSignatureValues} for more information on how to construct a service SAS.</p> * * <p><strong>Code Samples</strong></p> * * {@codesnippet com.azure.storage.blob.specialized.BlobClientBase.generateSas * * @param blobServiceSasSignatureValues {@link BlobServiceSasSignatureValues} * * @return A {@code String} representing all SAS query parameters. */ public String generateSas(BlobServiceSasSignatureValues blobServiceSasSignatureValues) { return this.client.generateSas(blobServiceSasSignatureValues); } /** * Opens a blob input stream to query the blob. * * <p>For more information, see the * <a href="https: * * <p><strong>Code Samples</strong></p> * * {@codesnippet com.azure.storage.blob.specialized.BlobClientBase.openQueryInputStream * * @param expression The query expression. * @return An <code>InputStream</code> object that represents the stream to use for reading the query response. */ public InputStream openQueryInputStream(String expression) { return openQueryInputStreamWithResponse(new BlobQueryOptions(expression)).getValue(); } /** * Opens a blob input stream to query the blob. * * <p>For more information, see the * <a href="https: * * <p><strong>Code Samples</strong></p> * * {@codesnippet com.azure.storage.blob.specialized.BlobClientBase.openQueryInputStream * * @param queryOptions {@link BlobQueryOptions The query options}. * @return A response containing status code and HTTP headers including an <code>InputStream</code> object * that represents the stream to use for reading the query response. */ public Response<InputStream> openQueryInputStreamWithResponse(BlobQueryOptions queryOptions) { BlobQueryAsyncResponse response = client.queryWithResponse(queryOptions).block(); if (response == null) { throw logger.logExceptionAsError(new IllegalStateException("Query response cannot be null")); } return new ResponseBase<>(response.getRequest(), response.getStatusCode(), response.getHeaders(), new FluxInputStream(response.getValue()), response.getDeserializedHeaders()); } /** * Queries an entire blob into an output stream. * * <p>For more information, see the * <a href="https: * * <p><strong>Code Samples</strong></p> * * {@codesnippet com.azure.storage.blob.specialized.BlobClientBase.query * * @param stream A non-null {@link OutputStream} instance where the downloaded data will be written. * @param expression The query expression. * @throws UncheckedIOException If an I/O error occurs. * @throws NullPointerException if {@code stream} is null. */ public void query(OutputStream stream, String expression) { queryWithResponse(new BlobQueryOptions(expression, stream), null, Context.NONE); } /** * Queries an entire blob into an output stream. * * <p>For more information, see the * <a href="https: * * <p><strong>Code Samples</strong></p> * * {@codesnippet com.azure.storage.blob.specialized.BlobClientBase.queryWithResponse * * @param queryOptions {@link BlobQueryOptions The query options}. * @param timeout An optional timeout value beyond which a {@link RuntimeException} will be raised. * @param context Additional context that is passed through the Http pipeline during the service call. * @return A response containing status code and HTTP headers. * @throws UncheckedIOException If an I/O error occurs. * @throws NullPointerException if {@code stream} is null. */ public BlobQueryResponse queryWithResponse(BlobQueryOptions queryOptions, Duration timeout, Context context) { StorageImplUtils.assertNotNull("options", queryOptions); StorageImplUtils.assertNotNull("outputStream", queryOptions.getOutputStream()); Mono<BlobQueryResponse> download = client .queryWithResponse(queryOptions, context) .flatMap(response -> response.getValue().reduce(queryOptions.getOutputStream(), (outputStream, buffer) -> { try { outputStream.write(FluxUtil.byteBufferToArray(buffer)); return outputStream; } catch (IOException ex) { throw logger.logExceptionAsError(Exceptions.propagate(new UncheckedIOException(ex))); } }).thenReturn(new BlobQueryResponse(response))); return blockWithOptionalTimeout(download, timeout); } }
class BlobClientBase { private final ClientLogger logger = new ClientLogger(BlobClientBase.class); private final BlobAsyncClientBase client; /** * Constructor used by {@link SpecializedBlobClientBuilder}. * * @param client the async blob client */ protected BlobClientBase(BlobAsyncClientBase client) { this.client = client; } /** * Creates a new {@link BlobClientBase} linked to the {@code snapshot} of this blob resource. * * @param snapshot the identifier for a specific snapshot of this blob * @return a {@link BlobClientBase} used to interact with the specific snapshot. */ public BlobClientBase getSnapshotClient(String snapshot) { return new BlobClientBase(client.getSnapshotClient(snapshot)); } /** * Creates a new {@link BlobClientBase} linked to the {@code version} of this blob resource. * * @param versionId the identifier for a specific version of this blob, * pass {@code null} to interact with the latest blob version. * @return a {@link BlobClientBase} used to interact with the specific version. */ public BlobClientBase getVersionClient(String versionId) { return new BlobClientBase(client.getVersionClient(versionId)); } /** * Gets the URL of the blob represented by this client. * * @return the URL. */ public String getBlobUrl() { return client.getBlobUrl(); } /** * Get associated account name. * * @return account name associated with this storage resource. */ public String getAccountName() { return client.getAccountName(); } /** * Get the container name. * * <p><strong>Code Samples</strong></p> * * {@codesnippet com.azure.storage.blob.specialized.BlobClientBase.getContainerName} * * @return The name of the container. */ public final String getContainerName() { return client.getContainerName(); } /** * Decodes and gets the blob name. * * <p><strong>Code Samples</strong></p> * * {@codesnippet com.azure.storage.blob.specialized.BlobClientBase.getBlobName} * * @return The decoded name of the blob. */ public final String getBlobName() { return client.getBlobName(); } /** * Gets the {@link HttpPipeline} powering this client. * * @return The pipeline. */ public HttpPipeline getHttpPipeline() { return client.getHttpPipeline(); } /** * Gets the {@link CpkInfo} used to encrypt this blob's content on the server. * * @return the customer provided key used for encryption. */ public CpkInfo getCustomerProvidedKey() { return client.getCustomerProvidedKey(); } /** * Gets the {@code encryption scope} used to encrypt this blob's content on the server. * * @return the encryption scope used for encryption. */ String getEncryptionScope() { return client.getEncryptionScope(); } /** * Gets the service version the client is using. * * @return the service version the client is using. */ public BlobServiceVersion getServiceVersion() { return client.getServiceVersion(); } /** * Gets the snapshotId for a blob resource * * @return A string that represents the snapshotId of the snapshot blob */ public String getSnapshotId() { return client.getSnapshotId(); } /** * Gets the versionId for a blob resource * * @return A string that represents the versionId of the snapshot blob */ public String getVersionId() { return client.getVersionId(); } /** * Determines if a blob is a snapshot * * @return A boolean that indicates if a blob is a snapshot */ public boolean isSnapshot() { return client.isSnapshot(); } /** * Opens a blob input stream to download the blob. * <p> * * @return An <code>InputStream</code> object that represents the stream to use for reading from the blob. * @throws BlobStorageException If a storage service error occurred. */ public final BlobInputStream openInputStream() { return openInputStream(null, null); } /** * Opens a blob input stream to download the specified range of the blob. * <p> * * @param range {@link BlobRange} * @param requestConditions An {@link BlobRequestConditions} object that represents the access conditions for the * blob. * @return An <code>InputStream</code> object that represents the stream to use for reading from the blob. * @throws BlobStorageException If a storage service error occurred. */ public final BlobInputStream openInputStream(BlobRange range, BlobRequestConditions requestConditions) { return openInputStream(new BlobInputStreamOptions().setRange(range).setRequestConditions(requestConditions)); } /** * Opens a blob input stream to download the specified range of the blob. * * @param options {@link BlobInputStreamOptions} * @return An <code>InputStream</code> object that represents the stream to use for reading from the blob. * @throws BlobStorageException If a storage service error occurred. */ /** * Gets if the blob this client represents exists in the cloud. * * <p><strong>Code Samples</strong></p> * * {@codesnippet com.azure.storage.blob.specialized.BlobClientBase.exists} * * @return true if the blob exists, false if it doesn't */ public Boolean exists() { return existsWithResponse(null, Context.NONE).getValue(); } /** * Gets if the blob this client represents exists in the cloud. * * <p><strong>Code Samples</strong></p> * * {@codesnippet com.azure.storage.blob.specialized.BlobClientBase.existsWithResponse * * @param timeout An optional timeout value beyond which a {@link RuntimeException} will be raised. * @param context Additional context that is passed through the Http pipeline during the service call. * @return true if the blob exists, false if it doesn't */ public Response<Boolean> existsWithResponse(Duration timeout, Context context) { Mono<Response<Boolean>> response = client.existsWithResponse(context); return blockWithOptionalTimeout(response, timeout); } /** * Copies the data at the source URL to a blob. * <p> * This method triggers a long-running, asynchronous operations. The source may be another blob or an Azure File. If * the source is in another account, the source must either be public or authenticated with a SAS token. If the * source is in the same account, the Shared Key authorization on the destination will also be applied to the * source. The source URL must be URL encoded. * * <p><strong>Code Samples</strong></p> * * {@codesnippet com.azure.storage.blob.specialized.BlobClientBase.beginCopy * * <p>For more information, see the * <a href="https: * * @param sourceUrl The source URL to copy from. URLs outside of Azure may only be copied to block blobs. * @param pollInterval Duration between each poll for the copy status. If none is specified, a default of one second * is used. * @return A {@link SyncPoller} to poll the progress of blob copy operation. */ public SyncPoller<BlobCopyInfo, Void> beginCopy(String sourceUrl, Duration pollInterval) { return beginCopy(sourceUrl, null, null, null, null, null, pollInterval); } /** * Copies the data at the source URL to a blob. * <p> * This method triggers a long-running, asynchronous operations. The source may be another blob or an Azure File. If * the source is in another account, the source must either be public or authenticated with a SAS token. If the * source is in the same account, the Shared Key authorization on the destination will also be applied to the * source. The source URL must be URL encoded. * * <p><strong>Code Samples</strong></p> * * {@codesnippet com.azure.storage.blob.specialized.BlobClientBase.beginCopy * * <p>For more information, see the * <a href="https: * * @param sourceUrl The source URL to copy from. URLs outside of Azure may only be copied to block blobs. * @param metadata Metadata to associate with the destination blob. * @param tier {@link AccessTier} for the destination blob. * @param priority {@link RehydratePriority} for rehydrating the blob. * @param sourceModifiedRequestConditions {@link RequestConditions} against the source. Standard HTTP Access * conditions related to the modification of data. ETag and LastModifiedTime are used to construct conditions * related to when the blob was changed relative to the given request. The request will fail if the specified * condition is not satisfied. * @param destRequestConditions {@link BlobRequestConditions} against the destination. * @param pollInterval Duration between each poll for the copy status. If none is specified, a default of one second * is used. * @return A {@link SyncPoller} to poll the progress of blob copy operation. */ public SyncPoller<BlobCopyInfo, Void> beginCopy(String sourceUrl, Map<String, String> metadata, AccessTier tier, RehydratePriority priority, RequestConditions sourceModifiedRequestConditions, BlobRequestConditions destRequestConditions, Duration pollInterval) { return this.beginCopy(new BlobBeginCopyOptions(sourceUrl).setMetadata(metadata).setTier(tier) .setRehydratePriority(priority).setSourceRequestConditions( ModelHelper.populateBlobSourceRequestConditions(sourceModifiedRequestConditions)) .setDestinationRequestConditions(destRequestConditions).setPollInterval(pollInterval)); } /** * Copies the data at the source URL to a blob. * <p> * This method triggers a long-running, asynchronous operations. The source may be another blob or an Azure File. If * the source is in another account, the source must either be public or authenticated with a SAS token. If the * source is in the same account, the Shared Key authorization on the destination will also be applied to the * source. The source URL must be URL encoded. * * <p><strong>Code Samples</strong></p> * * {@codesnippet com.azure.storage.blob.specialized.BlobClientBase.beginCopy * * <p>For more information, see the * <a href="https: * * @param options {@link BlobBeginCopyOptions} * @return A {@link SyncPoller} to poll the progress of blob copy operation. */ public SyncPoller<BlobCopyInfo, Void> beginCopy(BlobBeginCopyOptions options) { return client.beginCopy(options).getSyncPoller(); } /** * Stops a pending copy that was previously started and leaves a destination blob with 0 length and metadata. * * <p><strong>Code Samples</strong></p> * * {@codesnippet com.azure.storage.blob.specialized.BlobClientBase.abortCopyFromUrl * * <p>For more information, see the * <a href="https: * * @param copyId The id of the copy operation to abort. */ public void abortCopyFromUrl(String copyId) { abortCopyFromUrlWithResponse(copyId, null, null, Context.NONE); } /** * Stops a pending copy that was previously started and leaves a destination blob with 0 length and metadata. * * <p><strong>Code Samples</strong></p> * * {@codesnippet com.azure.storage.blob.specialized.BlobClientBase.abortCopyFromUrlWithResponse * * <p>For more information, see the * <a href="https: * * @param copyId The id of the copy operation to abort. * @param leaseId The lease ID the active lease on the blob must match. * @param timeout An optional timeout value beyond which a {@link RuntimeException} will be raised. * @param context Additional context that is passed through the Http pipeline during the service call. * @return A response containing status code and HTTP headers. */ public Response<Void> abortCopyFromUrlWithResponse(String copyId, String leaseId, Duration timeout, Context context) { return blockWithOptionalTimeout(client.abortCopyFromUrlWithResponse(copyId, leaseId, context), timeout); } /** * Copies the data at the source URL to a blob and waits for the copy to complete before returning a response. * <p> * The source must be a block blob no larger than 256MB. The source must also be either public or have a sas token * attached. The URL must be URL encoded. * * <p><strong>Code Samples</strong></p> * * {@codesnippet com.azure.storage.blob.specialized.BlobClientBase.copyFromUrl * * <p>For more information, see the * <a href="https: * * @param copySource The source URL to copy from. * @return The copy ID for the long running operation. * @throws IllegalArgumentException If {@code copySource} is a malformed {@link URL}. */ public String copyFromUrl(String copySource) { return copyFromUrlWithResponse(copySource, null, null, null, null, null, Context.NONE).getValue(); } /** * Copies the data at the source URL to a blob and waits for the copy to complete before returning a response. * <p> * The source must be a block blob no larger than 256MB. The source must also be either public or have a sas token * attached. The URL must be URL encoded. * * <p><strong>Code Samples</strong></p> * * {@codesnippet com.azure.storage.blob.specialized.BlobClientBase.copyFromUrlWithResponse * * <p>For more information, see the * <a href="https: * * @param copySource The source URL to copy from. URLs outside of Azure may only be copied to block blobs. * @param metadata Metadata to associate with the destination blob. * @param tier {@link AccessTier} for the destination blob. * @param sourceModifiedRequestConditions {@link RequestConditions} against the source. Standard HTTP Access * conditions related to the modification of data. ETag and LastModifiedTime are used to construct conditions * related to when the blob was changed relative to the given request. The request will fail if the specified * condition is not satisfied. * @param destRequestConditions {@link BlobRequestConditions} against the destination. * @param timeout An optional timeout value beyond which a {@link RuntimeException} will be raised. * @param context Additional context that is passed through the Http pipeline during the service call. * @return The copy ID for the long running operation. * @throws IllegalArgumentException If {@code copySource} is a malformed {@link URL}. */ public Response<String> copyFromUrlWithResponse(String copySource, Map<String, String> metadata, AccessTier tier, RequestConditions sourceModifiedRequestConditions, BlobRequestConditions destRequestConditions, Duration timeout, Context context) { return this.copyFromUrlWithResponse(new BlobCopyFromUrlOptions(copySource).setMetadata(metadata) .setTier(tier).setSourceRequestConditions(sourceModifiedRequestConditions) .setDestinationRequestConditions(destRequestConditions), timeout, context); } /** * Copies the data at the source URL to a blob and waits for the copy to complete before returning a response. * <p> * The source must be a block blob no larger than 256MB. The source must also be either public or have a sas token * attached. The URL must be URL encoded. * * <p><strong>Code Samples</strong></p> * * {@codesnippet com.azure.storage.blob.specialized.BlobClientBase.copyFromUrlWithResponse * * <p>For more information, see the * <a href="https: * * @param options {@link BlobCopyFromUrlOptions} * @param timeout An optional timeout value beyond which a {@link RuntimeException} will be raised. * @param context Additional context that is passed through the Http pipeline during the service call. * @return The copy ID for the long running operation. * @throws IllegalArgumentException If {@code copySource} is a malformed {@link URL}. */ public Response<String> copyFromUrlWithResponse(BlobCopyFromUrlOptions options, Duration timeout, Context context) { Mono<Response<String>> response = client .copyFromUrlWithResponse(options, context); return blockWithOptionalTimeout(response, timeout); } /** * Downloads the entire blob into an output stream. Uploading data must be done from the {@link BlockBlobClient}, * {@link PageBlobClient}, or {@link AppendBlobClient}. * * <p><strong>Code Samples</strong></p> * * {@codesnippet com.azure.storage.blob.specialized.BlobClientBase.download * * <p>For more information, see the * <a href="https: * * @param stream A non-null {@link OutputStream} instance where the downloaded data will be written. * @throws UncheckedIOException If an I/O error occurs. * @throws NullPointerException if {@code stream} is null */ public void download(OutputStream stream) { downloadWithResponse(stream, null, null, null, false, null, Context.NONE); } /** * Downloads a range of bytes from a blob into an output stream. Uploading data must be done from the {@link * BlockBlobClient}, {@link PageBlobClient}, or {@link AppendBlobClient}. * * <p><strong>Code Samples</strong></p> * * {@codesnippet com.azure.storage.blob.specialized.BlobClientBase.downloadWithResponse * * <p>For more information, see the * <a href="https: * * @param stream A non-null {@link OutputStream} instance where the downloaded data will be written. * @param range {@link BlobRange} * @param options {@link DownloadRetryOptions} * @param requestConditions {@link BlobRequestConditions} * @param getRangeContentMd5 Whether the contentMD5 for the specified blob range should be returned. * @param timeout An optional timeout value beyond which a {@link RuntimeException} will be raised. * @param context Additional context that is passed through the Http pipeline during the service call. * @return A response containing status code and HTTP headers. * @throws UncheckedIOException If an I/O error occurs. * @throws NullPointerException if {@code stream} is null */ public BlobDownloadResponse downloadWithResponse(OutputStream stream, BlobRange range, DownloadRetryOptions options, BlobRequestConditions requestConditions, boolean getRangeContentMd5, Duration timeout, Context context) { StorageImplUtils.assertNotNull("stream", stream); Mono<BlobDownloadResponse> download = client .downloadWithResponse(range, options, requestConditions, getRangeContentMd5, context) .flatMap(response -> response.getValue().reduce(stream, (outputStream, buffer) -> { try { outputStream.write(FluxUtil.byteBufferToArray(buffer)); return outputStream; } catch (IOException ex) { throw logger.logExceptionAsError(Exceptions.propagate(new UncheckedIOException(ex))); } }).thenReturn(new BlobDownloadResponse(response))); return blockWithOptionalTimeout(download, timeout); } /** * Downloads the entire blob into a file specified by the path. * * <p>The file will be created and must not exist, if the file already exists a {@link FileAlreadyExistsException} * will be thrown.</p> * * <p><strong>Code Samples</strong></p> * * {@codesnippet com.azure.storage.blob.specialized.BlobClientBase.downloadToFile * * <p>For more information, see the * <a href="https: * * @param filePath A {@link String} representing the filePath where the downloaded data will be written. * @return The blob properties and metadata. * @throws UncheckedIOException If an I/O error occurs */ public BlobProperties downloadToFile(String filePath) { return downloadToFile(filePath, false); } /** * Downloads the entire blob into a file specified by the path. * * <p>If overwrite is set to false, the file will be created and must not exist, if the file already exists a * {@link FileAlreadyExistsException} will be thrown.</p> * * <p><strong>Code Samples</strong></p> * * {@codesnippet com.azure.storage.blob.specialized.BlobClientBase.downloadToFile * * <p>For more information, see the * <a href="https: * * @param filePath A {@link String} representing the filePath where the downloaded data will be written. * @param overwrite Whether or not to overwrite the file, should the file exist. * @return The blob properties and metadata. * @throws UncheckedIOException If an I/O error occurs */ public BlobProperties downloadToFile(String filePath, boolean overwrite) { Set<OpenOption> openOptions = null; if (overwrite) { openOptions = new HashSet<>(); openOptions.add(StandardOpenOption.CREATE); openOptions.add(StandardOpenOption.TRUNCATE_EXISTING); openOptions.add(StandardOpenOption.READ); openOptions.add(StandardOpenOption.WRITE); } return downloadToFileWithResponse(filePath, null, null, null, null, false, openOptions, null, Context.NONE) .getValue(); } /** * Downloads the entire blob into a file specified by the path. * * <p>The file will be created and must not exist, if the file already exists a {@link FileAlreadyExistsException} * will be thrown.</p> * * <p><strong>Code Samples</strong></p> * * {@codesnippet com.azure.storage.blob.specialized.BlobClientBase.downloadToFileWithResponse * * <p>For more information, see the * <a href="https: * * @param filePath A {@link String} representing the filePath where the downloaded data will be written. * @param range {@link BlobRange} * @param parallelTransferOptions {@link ParallelTransferOptions} to use to download to file. Number of parallel * transfers parameter is ignored. * @param downloadRetryOptions {@link DownloadRetryOptions} * @param requestConditions {@link BlobRequestConditions} * @param rangeGetContentMd5 Whether the contentMD5 for the specified blob range should be returned. * @param timeout An optional timeout value beyond which a {@link RuntimeException} will be raised. * @param context Additional context that is passed through the Http pipeline during the service call. * @return A response containing the blob properties and metadata. * @throws UncheckedIOException If an I/O error occurs. */ public Response<BlobProperties> downloadToFileWithResponse(String filePath, BlobRange range, ParallelTransferOptions parallelTransferOptions, DownloadRetryOptions downloadRetryOptions, BlobRequestConditions requestConditions, boolean rangeGetContentMd5, Duration timeout, Context context) { return downloadToFileWithResponse(filePath, range, parallelTransferOptions, downloadRetryOptions, requestConditions, rangeGetContentMd5, null, timeout, context); } /** * Downloads the entire blob into a file specified by the path. * * <p>By default the file will be created and must not exist, if the file already exists a * {@link FileAlreadyExistsException} will be thrown. To override this behavior, provide appropriate * {@link OpenOption OpenOptions} </p> * * <p><strong>Code Samples</strong></p> * * {@codesnippet com.azure.storage.blob.specialized.BlobClientBase.downloadToFileWithResponse * * <p>For more information, see the * <a href="https: * * @param filePath A {@link String} representing the filePath where the downloaded data will be written. * @param range {@link BlobRange} * @param parallelTransferOptions {@link ParallelTransferOptions} to use to download to file. Number of parallel * transfers parameter is ignored. * @param downloadRetryOptions {@link DownloadRetryOptions} * @param requestConditions {@link BlobRequestConditions} * @param rangeGetContentMd5 Whether the contentMD5 for the specified blob range should be returned. * @param openOptions {@link OpenOption OpenOptions} to use to configure how to open or create the file. * @param timeout An optional timeout value beyond which a {@link RuntimeException} will be raised. * @param context Additional context that is passed through the Http pipeline during the service call. * @return A response containing the blob properties and metadata. * @throws UncheckedIOException If an I/O error occurs. */ public Response<BlobProperties> downloadToFileWithResponse(String filePath, BlobRange range, ParallelTransferOptions parallelTransferOptions, DownloadRetryOptions downloadRetryOptions, BlobRequestConditions requestConditions, boolean rangeGetContentMd5, Set<OpenOption> openOptions, Duration timeout, Context context) { final com.azure.storage.common.ParallelTransferOptions finalParallelTransferOptions = ModelHelper.wrapBlobOptions(ModelHelper.populateAndApplyDefaults(parallelTransferOptions)); return downloadToFileWithResponse(new BlobDownloadToFileOptions(filePath).setRange(range) .setParallelTransferOptions(finalParallelTransferOptions) .setDownloadRetryOptions(downloadRetryOptions).setRequestConditions(requestConditions) .setRangeGetContentMd5(rangeGetContentMd5).setOpenOptions(openOptions), timeout, context); } /** * Downloads the entire blob into a file specified by the path. * * <p>By default the file will be created and must not exist, if the file already exists a * {@link FileAlreadyExistsException} will be thrown. To override this behavior, provide appropriate * {@link OpenOption OpenOptions} </p> * * <p><strong>Code Samples</strong></p> * * {@codesnippet com.azure.storage.blob.specialized.BlobClientBase.downloadToFileWithResponse * * <p>For more information, see the * <a href="https: * * @param options {@link BlobDownloadToFileOptions} * @param timeout An optional timeout value beyond which a {@link RuntimeException} will be raised. * @param context Additional context that is passed through the Http pipeline during the service call. * @return A response containing the blob properties and metadata. * @throws UncheckedIOException If an I/O error occurs. */ public Response<BlobProperties> downloadToFileWithResponse(BlobDownloadToFileOptions options, Duration timeout, Context context) { Mono<Response<BlobProperties>> download = client.downloadToFileWithResponse(options, context); return blockWithOptionalTimeout(download, timeout); } /** * Deletes the specified blob or snapshot. Note that deleting a blob also deletes all its snapshots. * * <p><strong>Code Samples</strong></p> * * {@codesnippet com.azure.storage.blob.specialized.BlobClientBase.delete} * * <p>For more information, see the * <a href="https: */ public void delete() { deleteWithResponse(null, null, null, Context.NONE); } /** * Deletes the specified blob or snapshot. Note that deleting a blob also deletes all its snapshots. * * <p><strong>Code Samples</strong></p> * * {@codesnippet com.azure.storage.blob.specialized.BlobClientBase.deleteWithResponse * * <p>For more information, see the * <a href="https: * * @param deleteBlobSnapshotOptions Specifies the behavior for deleting the snapshots on this blob. {@code Include} * will delete the base blob and all snapshots. {@code Only} will delete only the snapshots. If a snapshot is being * deleted, you must pass null. * @param requestConditions {@link BlobRequestConditions} * @param timeout An optional timeout value beyond which a {@link RuntimeException} will be raised. * @param context Additional context that is passed through the Http pipeline during the service call. * @return A response containing status code and HTTP headers. */ public Response<Void> deleteWithResponse(DeleteSnapshotsOptionType deleteBlobSnapshotOptions, BlobRequestConditions requestConditions, Duration timeout, Context context) { Mono<Response<Void>> response = client .deleteWithResponse(deleteBlobSnapshotOptions, requestConditions, context); return blockWithOptionalTimeout(response, timeout); } /** * Returns the blob's metadata and properties. * * <p><strong>Code Samples</strong></p> * * {@codesnippet com.azure.storage.blob.specialized.BlobClientBase.getProperties} * * <p>For more information, see the * <a href="https: * * @return The blob properties and metadata. */ public BlobProperties getProperties() { return getPropertiesWithResponse(null, null, Context.NONE).getValue(); } /** * Returns the blob's metadata and properties. * * <p><strong>Code Samples</strong></p> * * {@codesnippet com.azure.storage.blob.specialized.BlobClientBase.getPropertiesWithResponse * * <p>For more information, see the * <a href="https: * * @param requestConditions {@link BlobRequestConditions} * @param timeout An optional timeout value beyond which a {@link RuntimeException} will be raised. * @param context Additional context that is passed through the Http pipeline during the service call. * @return The blob properties and metadata. */ public Response<BlobProperties> getPropertiesWithResponse(BlobRequestConditions requestConditions, Duration timeout, Context context) { Mono<Response<BlobProperties>> response = client.getPropertiesWithResponse(requestConditions, context); return blockWithOptionalTimeout(response, timeout); } /** * Changes a blob's HTTP header properties. if only one HTTP header is updated, the others will all be erased. In * order to preserve existing values, they must be passed alongside the header being changed. * * <p><strong>Code Samples</strong></p> * * {@codesnippet com.azure.storage.blob.specialized.BlobClientBase.setHttpHeaders * * <p>For more information, see the * <a href="https: * * @param headers {@link BlobHttpHeaders} */ public void setHttpHeaders(BlobHttpHeaders headers) { setHttpHeadersWithResponse(headers, null, null, Context.NONE); } /** * Changes a blob's HTTP header properties. if only one HTTP header is updated, the others will all be erased. In * order to preserve existing values, they must be passed alongside the header being changed. * * <p><strong>Code Samples</strong></p> * * {@codesnippet com.azure.storage.blob.specialized.BlobClientBase.setHttpHeadersWithResponse * * <p>For more information, see the * <a href="https: * * @param headers {@link BlobHttpHeaders} * @param requestConditions {@link BlobRequestConditions} * @param timeout An optional timeout value beyond which a {@link RuntimeException} will be raised. * @param context Additional context that is passed through the Http pipeline during the service call. * @return A response containing status code and HTTP headers. */ public Response<Void> setHttpHeadersWithResponse(BlobHttpHeaders headers, BlobRequestConditions requestConditions, Duration timeout, Context context) { Mono<Response<Void>> response = client .setHttpHeadersWithResponse(headers, requestConditions, context); return blockWithOptionalTimeout(response, timeout); } /** * Changes a blob's metadata. The specified metadata in this method will replace existing metadata. If old values * must be preserved, they must be downloaded and included in the call to this method. * * <p><strong>Code Samples</strong></p> * * {@codesnippet com.azure.storage.blob.specialized.BlobClientBase.setMetadata * * <p>For more information, see the * <a href="https: * * @param metadata Metadata to associate with the blob. */ public void setMetadata(Map<String, String> metadata) { setMetadataWithResponse(metadata, null, null, Context.NONE); } /** * Changes a blob's metadata. The specified metadata in this method will replace existing metadata. If old values * must be preserved, they must be downloaded and included in the call to this method. * * <p><strong>Code Samples</strong></p> * * {@codesnippet com.azure.storage.blob.specialized.BlobClientBase.setMetadataWithResponse * * <p>For more information, see the * <a href="https: * * @param metadata Metadata to associate with the blob. * @param requestConditions {@link BlobRequestConditions} * @param timeout An optional timeout value beyond which a {@link RuntimeException} will be raised. * @param context Additional context that is passed through the Http pipeline during the service call. * @return A response containing status code and HTTP headers. */ public Response<Void> setMetadataWithResponse(Map<String, String> metadata, BlobRequestConditions requestConditions, Duration timeout, Context context) { Mono<Response<Void>> response = client.setMetadataWithResponse(metadata, requestConditions, context); return blockWithOptionalTimeout(response, timeout); } /** * Returns the blob's tags. * * <p><strong>Code Samples</strong></p> * * {@codesnippet com.azure.storage.blob.specialized.BlobClientBase.getTags} * * <p>For more information, see the * <a href="https: * * @return The blob's tags. */ public Map<String, String> getTags() { return this.getTagsWithResponse(new BlobGetTagsOptions(), null, Context.NONE).getValue(); } /** * Returns the blob's tags. * * <p><strong>Code Samples</strong></p> * * {@codesnippet com.azure.storage.blob.specialized.BlobClientBase.getTagsWithResponse * * <p>For more information, see the * <a href="https: * * @param options {@link BlobGetTagsOptions} * @param timeout An optional timeout value beyond which a {@link RuntimeException} will be raised. * @param context Additional context that is passed through the Http pipeline during the service call. * @return The blob's tags. */ public Response<Map<String, String>> getTagsWithResponse(BlobGetTagsOptions options, Duration timeout, Context context) { Mono<Response<Map<String, String>>> response = client.getTagsWithResponse(options, context); return blockWithOptionalTimeout(response, timeout); } /** * Sets user defined tags. The specified tags in this method will replace existing tags. If old values * must be preserved, they must be downloaded and included in the call to this method. * * <p><strong>Code Samples</strong></p> * * {@codesnippet com.azure.storage.blob.specialized.BlobClientBase.setTags * * <p>For more information, see the * <a href="https: * * @param tags Tags to associate with the blob. */ public void setTags(Map<String, String> tags) { this.setTagsWithResponse(new BlobSetTagsOptions(tags), null, Context.NONE); } /** * Sets user defined tags. The specified tags in this method will replace existing tags. If old values * must be preserved, they must be downloaded and included in the call to this method. * * <p><strong>Code Samples</strong></p> * * {@codesnippet com.azure.storage.blob.specialized.BlobClientBase.setTagsWithResponse * * <p>For more information, see the * <a href="https: * * @param options {@link BlobSetTagsOptions} * @param timeout An optional timeout value beyond which a {@link RuntimeException} will be raised. * @param context Additional context that is passed through the Http pipeline during the service call. * @return A response containing status code and HTTP headers. */ public Response<Void> setTagsWithResponse(BlobSetTagsOptions options, Duration timeout, Context context) { Mono<Response<Void>> response = client.setTagsWithResponse(options, context); return blockWithOptionalTimeout(response, timeout); } /** * Creates a read-only snapshot of the blob. * * <p><strong>Code Samples</strong></p> * * {@codesnippet com.azure.storage.blob.specialized.BlobClientBase.createSnapshot} * * <p>For more information, see the * <a href="https: * * @return A response containing a {@link BlobClientBase} which is used to interact with the created snapshot, use * {@link BlobClientBase */ public BlobClientBase createSnapshot() { return createSnapshotWithResponse(null, null, null, Context.NONE).getValue(); } /** * Creates a read-only snapshot of the blob. * * <p><strong>Code Samples</strong></p> * * {@codesnippet com.azure.storage.blob.specialized.BlobClientBase.createSnapshotWithResponse * * <p>For more information, see the * <a href="https: * * @param metadata Metadata to associate with the blob snapshot. * @param requestConditions {@link BlobRequestConditions} * @param timeout An optional timeout value beyond which a {@link RuntimeException} will be raised. * @param context Additional context that is passed through the Http pipeline during the service call. * @return A response containing a {@link BlobClientBase} which is used to interact with the created snapshot, use * {@link BlobClientBase */ public Response<BlobClientBase> createSnapshotWithResponse(Map<String, String> metadata, BlobRequestConditions requestConditions, Duration timeout, Context context) { Mono<Response<BlobClientBase>> response = client .createSnapshotWithResponse(metadata, requestConditions, context) .map(rb -> new SimpleResponse<>(rb, new BlobClientBase(rb.getValue()))); return blockWithOptionalTimeout(response, timeout); } /** * Sets the tier on a blob. The operation is allowed on a page blob in a premium storage account or a block blob in * a blob storage or GPV2 account. A premium page blob's tier determines the allowed size, IOPS, and bandwidth of * the blob. A block blob's tier determines the Hot/Cool/Archive storage type. This does not update the blob's * etag. * * <p><strong>Code Samples</strong></p> * * {@codesnippet com.azure.storage.blob.specialized.BlobClientBase.setAccessTier * * <p>For more information, see the * <a href="https: * * @param tier The new tier for the blob. */ public void setAccessTier(AccessTier tier) { setAccessTierWithResponse(tier, null, null, null, Context.NONE); } /** * Sets the tier on a blob. The operation is allowed on a page blob in a premium storage account or a block blob in * a blob storage or GPV2 account. A premium page blob's tier determines the allowed size, IOPS, and bandwidth of * the blob. A block blob's tier determines the Hot/Cool/Archive storage type. This does not update the blob's * etag. * * <p><strong>Code Samples</strong></p> * * {@codesnippet com.azure.storage.blob.specialized.BlobClientBase.setAccessTierWithResponse * * <p>For more information, see the * <a href="https: * * @param tier The new tier for the blob. * @param priority Optional priority to set for re-hydrating blobs. * @param leaseId The lease ID the active lease on the blob must match. * @param timeout An optional timeout value beyond which a {@link RuntimeException} will be raised. * @param context Additional context that is passed through the Http pipeline during the service call. * @return A response containing status code and HTTP headers. */ public Response<Void> setAccessTierWithResponse(AccessTier tier, RehydratePriority priority, String leaseId, Duration timeout, Context context) { return setAccessTierWithResponse(new BlobSetAccessTierOptions(tier).setPriority(priority).setLeaseId(leaseId), timeout, context); } /** * Sets the tier on a blob. The operation is allowed on a page blob in a premium storage account or a block blob in * a blob storage or GPV2 account. A premium page blob's tier determines the allowed size, IOPS, and bandwidth of * the blob. A block blob's tier determines the Hot/Cool/Archive storage type. This does not update the blob's * etag. * * <p><strong>Code Samples</strong></p> * * {@codesnippet com.azure.storage.blob.specialized.BlobClientBase.setAccessTierWithResponse * * <p>For more information, see the * <a href="https: * * @param options {@link BlobSetAccessTierOptions} * @param timeout An optional timeout value beyond which a {@link RuntimeException} will be raised. * @param context Additional context that is passed through the Http pipeline during the service call. * @return A response containing status code and HTTP headers. */ public Response<Void> setAccessTierWithResponse(BlobSetAccessTierOptions options, Duration timeout, Context context) { return blockWithOptionalTimeout(client.setTierWithResponse(options, context), timeout); } /** * Undelete restores the content and metadata of a soft-deleted blob and/or any associated soft-deleted snapshots. * * <p><strong>Code Samples</strong></p> * * {@codesnippet com.azure.storage.blob.specialized.BlobClientBase.undelete} * * <p>For more information, see the * <a href="https: */ public void undelete() { undeleteWithResponse(null, Context.NONE); } /** * Undelete restores the content and metadata of a soft-deleted blob and/or any associated soft-deleted snapshots. * * <p><strong>Code Samples</strong></p> * * {@codesnippet com.azure.storage.blob.specialized.BlobClientBase.undeleteWithResponse * * <p>For more information, see the * <a href="https: * * @param timeout An optional timeout value beyond which a {@link RuntimeException} will be raised. * @param context Additional context that is passed through the Http pipeline during the service call. * @return A response containing status code and HTTP headers. */ public Response<Void> undeleteWithResponse(Duration timeout, Context context) { Mono<Response<Void>> response = client.undeleteWithResponse(context); return blockWithOptionalTimeout(response, timeout); } /** * Returns the sku name and account kind for the account. * * <p><strong>Code Samples</strong></p> * * {@codesnippet com.azure.storage.blob.specialized.BlobClientBase.getAccountInfo} * * <p>For more information, see the * <a href="https: * * @return The sku name and account kind. */ public StorageAccountInfo getAccountInfo() { return getAccountInfoWithResponse(null, Context.NONE).getValue(); } /** * Returns the sku name and account kind for the account. * * <p><strong>Code Samples</strong></p> * * {@codesnippet com.azure.storage.blob.specialized.BlobClientBase.getAccountInfoWithResponse * * <p>For more information, see the * <a href="https: * * @param timeout An optional timeout value beyond which a {@link RuntimeException} will be raised. * @param context Additional context that is passed through the Http pipeline during the service call. * @return The sku name and account kind. */ public Response<StorageAccountInfo> getAccountInfoWithResponse(Duration timeout, Context context) { Mono<Response<StorageAccountInfo>> response = client.getAccountInfoWithResponse(context); return blockWithOptionalTimeout(response, timeout); } /** * Generates a user delegation SAS for the blob using the specified {@link BlobServiceSasSignatureValues}. * <p>See {@link BlobServiceSasSignatureValues} for more information on how to construct a user delegation SAS.</p> * * <p><strong>Code Samples</strong></p> * * {@codesnippet com.azure.storage.blob.specialized.BlobClientBase.generateUserDelegationSas * * @param blobServiceSasSignatureValues {@link BlobServiceSasSignatureValues} * @param userDelegationKey A {@link UserDelegationKey} object used to sign the SAS values. * @see BlobServiceClient * user delegation key. * @return A {@code String} representing all SAS query parameters. */ public String generateUserDelegationSas(BlobServiceSasSignatureValues blobServiceSasSignatureValues, UserDelegationKey userDelegationKey) { return this.client.generateUserDelegationSas(blobServiceSasSignatureValues, userDelegationKey); } /** * Generates a service SAS for the blob using the specified {@link BlobServiceSasSignatureValues} * Note : The client must be authenticated via {@link StorageSharedKeyCredential} * <p>See {@link BlobServiceSasSignatureValues} for more information on how to construct a service SAS.</p> * * <p><strong>Code Samples</strong></p> * * {@codesnippet com.azure.storage.blob.specialized.BlobClientBase.generateSas * * @param blobServiceSasSignatureValues {@link BlobServiceSasSignatureValues} * * @return A {@code String} representing all SAS query parameters. */ public String generateSas(BlobServiceSasSignatureValues blobServiceSasSignatureValues) { return this.client.generateSas(blobServiceSasSignatureValues); } /** * Opens a blob input stream to query the blob. * * <p>For more information, see the * <a href="https: * * <p><strong>Code Samples</strong></p> * * {@codesnippet com.azure.storage.blob.specialized.BlobClientBase.openQueryInputStream * * @param expression The query expression. * @return An <code>InputStream</code> object that represents the stream to use for reading the query response. */ public InputStream openQueryInputStream(String expression) { return openQueryInputStreamWithResponse(new BlobQueryOptions(expression)).getValue(); } /** * Opens a blob input stream to query the blob. * * <p>For more information, see the * <a href="https: * * <p><strong>Code Samples</strong></p> * * {@codesnippet com.azure.storage.blob.specialized.BlobClientBase.openQueryInputStream * * @param queryOptions {@link BlobQueryOptions The query options}. * @return A response containing status code and HTTP headers including an <code>InputStream</code> object * that represents the stream to use for reading the query response. */ public Response<InputStream> openQueryInputStreamWithResponse(BlobQueryOptions queryOptions) { BlobQueryAsyncResponse response = client.queryWithResponse(queryOptions).block(); if (response == null) { throw logger.logExceptionAsError(new IllegalStateException("Query response cannot be null")); } return new ResponseBase<>(response.getRequest(), response.getStatusCode(), response.getHeaders(), new FluxInputStream(response.getValue()), response.getDeserializedHeaders()); } /** * Queries an entire blob into an output stream. * * <p>For more information, see the * <a href="https: * * <p><strong>Code Samples</strong></p> * * {@codesnippet com.azure.storage.blob.specialized.BlobClientBase.query * * @param stream A non-null {@link OutputStream} instance where the downloaded data will be written. * @param expression The query expression. * @throws UncheckedIOException If an I/O error occurs. * @throws NullPointerException if {@code stream} is null. */ public void query(OutputStream stream, String expression) { queryWithResponse(new BlobQueryOptions(expression, stream), null, Context.NONE); } /** * Queries an entire blob into an output stream. * * <p>For more information, see the * <a href="https: * * <p><strong>Code Samples</strong></p> * * {@codesnippet com.azure.storage.blob.specialized.BlobClientBase.queryWithResponse * * @param queryOptions {@link BlobQueryOptions The query options}. * @param timeout An optional timeout value beyond which a {@link RuntimeException} will be raised. * @param context Additional context that is passed through the Http pipeline during the service call. * @return A response containing status code and HTTP headers. * @throws UncheckedIOException If an I/O error occurs. * @throws NullPointerException if {@code stream} is null. */ public BlobQueryResponse queryWithResponse(BlobQueryOptions queryOptions, Duration timeout, Context context) { StorageImplUtils.assertNotNull("options", queryOptions); StorageImplUtils.assertNotNull("outputStream", queryOptions.getOutputStream()); Mono<BlobQueryResponse> download = client .queryWithResponse(queryOptions, context) .flatMap(response -> response.getValue().reduce(queryOptions.getOutputStream(), (outputStream, buffer) -> { try { outputStream.write(FluxUtil.byteBufferToArray(buffer)); return outputStream; } catch (IOException ex) { throw logger.logExceptionAsError(Exceptions.propagate(new UncheckedIOException(ex))); } }).thenReturn(new BlobQueryResponse(response))); return blockWithOptionalTimeout(download, timeout); } }
This field wasn't used anywhere, so I deleted it
public static void runComponentSample() throws JsonProcessingException { ConsoleLogger.printHeader("COMPONENT SAMPLES"); String componentModelId = UniqueIdHelper.getUniqueModelId(SamplesConstants.TEMPORARY_COMPONENT_MODEL_PREFIX, client, randomIntegerStringGenerator); String modelId = UniqueIdHelper.getUniqueModelId(SamplesConstants.TEMPORARY_MODEL_PREFIX, client, randomIntegerStringGenerator); String basicDigitalTwinId = UniqueIdHelper.getUniqueDigitalTwinId(SamplesConstants.TEMPORARY_TWIN_PREFIX, client, randomIntegerStringGenerator); String newComponentModelPayload = SamplesConstants.TEMPORARY_COMPONENT_MODEL_PAYLOAD .replace(SamplesConstants.COMPONENT_ID, componentModelId); String newModelPayload = SamplesConstants.TEMPORARY_MODEL_WITH_COMPONENT_PAYLOAD .replace(SamplesConstants.MODEL_ID, modelId) .replace(SamplesConstants.COMPONENT_ID, componentModelId); List<String> modelsList = new ArrayList<>(Arrays.asList(newComponentModelPayload, newModelPayload)); ConsoleLogger.printHeader("Create Models"); Iterable<DigitalTwinsModelData> modelList = client.createModels(modelsList); for (DigitalTwinsModelData model : modelList) { ConsoleLogger.print("Created model: " + model.getModelId()); } ConsoleLogger.printHeader("Create digital twin with components"); BasicDigitalTwin basicTwin = new BasicDigitalTwin(basicDigitalTwinId) .setMetadata( new BasicDigitalTwinMetadata() .setModelId(modelId) ) .addToContents("Prop1", "Value1") .addToContents("Prop2", 987) .addToContents( "Component1", new BasicDigitalTwinComponent() .addToContents("ComponentProp1", "Component value 1") .addToContents("ComponentProp2", 123) ); BasicDigitalTwin basicTwinResponse = client.createOrReplaceDigitalTwin(basicDigitalTwinId, basicTwin, BasicDigitalTwin.class); ConsoleLogger.print("Created digital twin " + basicTwinResponse.getId()); Response<String> getStringDigitalTwinResponse = client.getDigitalTwinWithResponse(basicDigitalTwinId, String.class, Context.NONE); ConsoleLogger.print("Successfully retrieved digital twin as a json string \n" + getStringDigitalTwinResponse.getValue()); BasicDigitalTwin deserializedDigitalTwin = mapper.readValue(getStringDigitalTwinResponse.getValue(), BasicDigitalTwin.class); ConsoleLogger.print("Deserialized the string response into a BasicDigitalTwin with Id: " + deserializedDigitalTwin.getId()); Response<BasicDigitalTwin> basicDigitalTwinResponse = client.getDigitalTwinWithResponse(basicDigitalTwinId, BasicDigitalTwin.class, Context.NONE); if (basicDigitalTwinResponse.getStatusCode() == HttpsURLConnection.HTTP_OK) { BasicDigitalTwin basicDigitalTwin = basicDigitalTwinResponse.getValue(); String component1RawText = mapper.writeValueAsString(basicDigitalTwin.getContents().get("Component1")); HashMap component1 = mapper.readValue(component1RawText, HashMap.class); ConsoleLogger.print("Retrieved digital twin using generic API to use built in deserialization into a BasicDigitalTwin with Id: " + basicDigitalTwin.getId() + ":\n\t" + "Etag: " + basicDigitalTwin.getEtag() + "\n\t" + "Prop1: " + basicDigitalTwin.getContents().get("Prop1") + "\n\t" + "Prop2: " + basicDigitalTwin.getContents().get("Prop2") + "\n\t" + "ComponentProp1: " + component1.get("ComponentProp1") + "\n\t" + "ComponentProp2: " + component1.get("ComponentProp2") + "\n\t" ); } ConsoleLogger.printHeader("Update Component"); UpdateOperationUtility updateOperationUtility = new UpdateOperationUtility(); updateOperationUtility.appendReplaceOperation("/ComponentProp1", "Some new Value"); client.updateComponent(basicDigitalTwinId, "Component1", updateOperationUtility.getUpdateOperations()); ConsoleLogger.print("Updated component for digital twin: " + basicDigitalTwinId); ConsoleLogger.printHeader("Get Component"); String getComponentResponse = client.getComponent(basicDigitalTwinId, "Component1", String.class); ConsoleLogger.print("Retrieved component for digital twin " + basicDigitalTwinId + " :\n" + getComponentResponse); try { client.deleteDigitalTwin(basicDigitalTwinId); } catch (ErrorResponseException ex) { ConsoleLogger.printFatal("Failed to delete digital twin due to" + ex); } try { client.deleteModel(modelId); client.deleteModel(componentModelId); } catch (ErrorResponseException ex) { ConsoleLogger.printFatal("Failed to delete models due to" + ex); } }
BasicDigitalTwin basicTwinResponse = client.createOrReplaceDigitalTwin(basicDigitalTwinId, basicTwin, BasicDigitalTwin.class);
public static void runComponentSample() throws JsonProcessingException { ConsoleLogger.printHeader("COMPONENT SAMPLES"); String componentModelId = UniqueIdHelper.getUniqueModelId(SamplesConstants.TEMPORARY_COMPONENT_MODEL_PREFIX, client, randomIntegerStringGenerator); String modelId = UniqueIdHelper.getUniqueModelId(SamplesConstants.TEMPORARY_MODEL_PREFIX, client, randomIntegerStringGenerator); String basicDigitalTwinId = UniqueIdHelper.getUniqueDigitalTwinId(SamplesConstants.TEMPORARY_TWIN_PREFIX, client, randomIntegerStringGenerator); String newComponentModelPayload = SamplesConstants.TEMPORARY_COMPONENT_MODEL_PAYLOAD .replace(SamplesConstants.COMPONENT_ID, componentModelId); String newModelPayload = SamplesConstants.TEMPORARY_MODEL_WITH_COMPONENT_PAYLOAD .replace(SamplesConstants.MODEL_ID, modelId) .replace(SamplesConstants.COMPONENT_ID, componentModelId); List<String> modelsList = new ArrayList<>(Arrays.asList(newComponentModelPayload, newModelPayload)); ConsoleLogger.printHeader("Create Models"); Iterable<DigitalTwinsModelData> modelList = client.createModels(modelsList); for (DigitalTwinsModelData model : modelList) { ConsoleLogger.print("Created model: " + model.getModelId()); } ConsoleLogger.printHeader("Create digital twin with components"); BasicDigitalTwin basicTwin = new BasicDigitalTwin(basicDigitalTwinId) .setMetadata( new BasicDigitalTwinMetadata() .setModelId(modelId) ) .addToContents("Prop1", "Value1") .addToContents("Prop2", 987) .addToContents( "Component1", new BasicDigitalTwinComponent() .addToContents("ComponentProp1", "Component value 1") .addToContents("ComponentProp2", 123) ); BasicDigitalTwin basicTwinResponse = client.createOrReplaceDigitalTwin(basicDigitalTwinId, basicTwin, BasicDigitalTwin.class); ConsoleLogger.print("Created digital twin " + basicTwinResponse.getId()); Response<String> getStringDigitalTwinResponse = client.getDigitalTwinWithResponse(basicDigitalTwinId, String.class, Context.NONE); ConsoleLogger.print("Successfully retrieved digital twin as a json string \n" + getStringDigitalTwinResponse.getValue()); BasicDigitalTwin deserializedDigitalTwin = mapper.readValue(getStringDigitalTwinResponse.getValue(), BasicDigitalTwin.class); ConsoleLogger.print("Deserialized the string response into a BasicDigitalTwin with Id: " + deserializedDigitalTwin.getId()); Response<BasicDigitalTwin> basicDigitalTwinResponse = client.getDigitalTwinWithResponse(basicDigitalTwinId, BasicDigitalTwin.class, Context.NONE); if (basicDigitalTwinResponse.getStatusCode() == HttpsURLConnection.HTTP_OK) { BasicDigitalTwin basicDigitalTwin = basicDigitalTwinResponse.getValue(); String component1RawText = mapper.writeValueAsString(basicDigitalTwin.getContents().get("Component1")); HashMap component1 = mapper.readValue(component1RawText, HashMap.class); ConsoleLogger.print("Retrieved digital twin using generic API to use built in deserialization into a BasicDigitalTwin with Id: " + basicDigitalTwin.getId() + ":\n\t" + "Etag: " + basicDigitalTwin.getEtag() + "\n\t" + "Prop1: " + basicDigitalTwin.getContents().get("Prop1") + "\n\t" + "Prop2: " + basicDigitalTwin.getContents().get("Prop2") + "\n\t" + "ComponentProp1: " + component1.get("ComponentProp1") + "\n\t" + "ComponentProp2: " + component1.get("ComponentProp2") + "\n\t" ); } ConsoleLogger.printHeader("Update Component"); UpdateOperationUtility updateOperationUtility = new UpdateOperationUtility(); updateOperationUtility.appendReplaceOperation("/ComponentProp1", "Some new Value"); client.updateComponent(basicDigitalTwinId, "Component1", updateOperationUtility.getUpdateOperations()); ConsoleLogger.print("Updated component for digital twin: " + basicDigitalTwinId); ConsoleLogger.printHeader("Get Component"); BasicDigitalTwinComponent getComponentResponse = client.getComponent(basicDigitalTwinId, "Component1", BasicDigitalTwinComponent.class); ConsoleLogger.print("Retrieved component for digital twin " + basicDigitalTwinId + " :"); for (String key : getComponentResponse.getContents().keySet()) { ConsoleLogger.print("\t" + key + " : " + getComponentResponse.getContents().get(key)); } try { client.deleteDigitalTwin(basicDigitalTwinId); } catch (ErrorResponseException ex) { ConsoleLogger.printFatal("Failed to delete digital twin due to" + ex); } try { client.deleteModel(modelId); client.deleteModel(componentModelId); } catch (ErrorResponseException ex) { ConsoleLogger.printFatal("Failed to delete models due to" + ex); } }
class ComponentSyncSamples { private static DigitalTwinsClient client; private static final ObjectMapper mapper = new ObjectMapper(); public static Function<Integer, String> randomIntegerStringGenerator = (maxLength) -> { int randInt = new Random().nextInt((int)Math.pow(10, 8) - 1) + 1; return String.valueOf(randInt); }; public static void main(String[] args) throws IOException { SamplesArguments parsedArguments = new SamplesArguments(args); client = new DigitalTwinsClientBuilder() .credential( new ClientSecretCredentialBuilder() .tenantId(parsedArguments.getTenantId()) .clientId(parsedArguments.getClientId()) .clientSecret(parsedArguments.getClientSecret()) .build() ) .endpoint(parsedArguments.getDigitalTwinEndpoint()) .httpLogOptions( new HttpLogOptions() .setLogLevel(parsedArguments.getHttpLogDetailLevel())) .buildClient(); runComponentSample(); } @SuppressWarnings("rawtypes") }
class ComponentSyncSamples { private static DigitalTwinsClient client; private static final ObjectMapper mapper = new ObjectMapper(); public static Function<Integer, String> randomIntegerStringGenerator = (maxLength) -> { int randInt = new Random().nextInt((int)Math.pow(10, 8) - 1) + 1; return String.valueOf(randInt); }; public static void main(String[] args) throws IOException { SamplesArguments parsedArguments = new SamplesArguments(args); client = new DigitalTwinsClientBuilder() .credential( new ClientSecretCredentialBuilder() .tenantId(parsedArguments.getTenantId()) .clientId(parsedArguments.getClientId()) .clientSecret(parsedArguments.getClientSecret()) .build() ) .endpoint(parsedArguments.getDigitalTwinEndpoint()) .httpLogOptions( new HttpLogOptions() .setLogLevel(parsedArguments.getHttpLogDetailLevel())) .buildClient(); runComponentSample(); } @SuppressWarnings("rawtypes") }
is this because of service side message change?
public void conflictResolutionPolicyCRUD() { CosmosContainerProperties containerSettings = new CosmosContainerProperties(UUID.randomUUID().toString(), partitionKeyDef); database.createContainer(containerSettings, new CosmosContainerRequestOptions()).block(); CosmosAsyncContainer container = database.getContainer(containerSettings.getId()); containerSettings = container.read().block().getProperties(); assertThat(containerSettings.getConflictResolutionPolicy().getMode()).isEqualTo(ConflictResolutionMode.LAST_WRITER_WINS); containerSettings.setConflictResolutionPolicy(ConflictResolutionPolicy.createLastWriterWinsPolicy()); containerSettings = container.replace(containerSettings, null).block().getProperties(); assertThat(containerSettings.getConflictResolutionPolicy().getMode()).isEqualTo(ConflictResolutionMode.LAST_WRITER_WINS); assertThat(containerSettings.getConflictResolutionPolicy().getConflictResolutionPath()).isEqualTo("/_ts"); testConflictResolutionPolicyRequiringPath(ConflictResolutionMode.LAST_WRITER_WINS, new String[] { "/a", null, "" }, new String[] { "/a", "/_ts", "/_ts" }); containerSettings.setConflictResolutionPolicy(ConflictResolutionPolicy.createLastWriterWinsPolicy("/a/b")); try { containerSettings = container.replace(containerSettings, null).block().getProperties(); fail("Expected exception on invalid getPath."); } catch (Exception e) { CosmosException dce = Utils.as(e, CosmosException.class); if (dce != null && dce.getStatusCode() == 400) { assertThat(dce.getMessage()).contains("Invalid path '\\\\\\/a\\\\\\/b' for last writer wins conflict resolution"); } else { throw e; } } containerSettings.setConflictResolutionPolicy(ConflictResolutionPolicy.createLastWriterWinsPolicy("someText")); try { containerSettings = container.replace(containerSettings, null).block().getProperties(); fail("Expected exception on invalid path."); } catch (Exception e) { CosmosException dce = Utils.as(e, CosmosException.class); if (dce != null && dce.getStatusCode() == 400) { assertThat(dce.getMessage()).contains("Invalid path 'someText' for last writer wins conflict resolution"); } else { throw e; } } testConflictResolutionPolicyRequiringPath(ConflictResolutionMode.CUSTOM, new String[] { "dbs/mydb/colls" + "/mycoll/sprocs/randomSprocName", null, "" }, new String[] { "dbs/mydb/colls/mycoll/sprocs" + "/randomSprocName", "", "" }); }
assertThat(dce.getMessage()).contains("Invalid path '\\\\\\/a\\\\\\/b' for last writer wins conflict resolution");
public void conflictResolutionPolicyCRUD() { CosmosContainerProperties containerSettings = new CosmosContainerProperties(UUID.randomUUID().toString(), partitionKeyDef); database.createContainer(containerSettings, new CosmosContainerRequestOptions()).block(); CosmosAsyncContainer container = database.getContainer(containerSettings.getId()); containerSettings = container.read().block().getProperties(); assertThat(containerSettings.getConflictResolutionPolicy().getMode()).isEqualTo(ConflictResolutionMode.LAST_WRITER_WINS); containerSettings.setConflictResolutionPolicy(ConflictResolutionPolicy.createLastWriterWinsPolicy()); containerSettings = container.replace(containerSettings, null).block().getProperties(); assertThat(containerSettings.getConflictResolutionPolicy().getMode()).isEqualTo(ConflictResolutionMode.LAST_WRITER_WINS); assertThat(containerSettings.getConflictResolutionPolicy().getConflictResolutionPath()).isEqualTo("/_ts"); testConflictResolutionPolicyRequiringPath(ConflictResolutionMode.LAST_WRITER_WINS, new String[] { "/a", null, "" }, new String[] { "/a", "/_ts", "/_ts" }); containerSettings.setConflictResolutionPolicy(ConflictResolutionPolicy.createLastWriterWinsPolicy("/a/b")); try { containerSettings = container.replace(containerSettings, null).block().getProperties(); fail("Expected exception on invalid getPath."); } catch (Exception e) { CosmosException dce = Utils.as(e, CosmosException.class); if (dce != null && dce.getStatusCode() == 400) { assertThat(dce.getMessage()).contains("Invalid path '\\\\\\/a\\\\\\/b' for last writer wins conflict resolution"); } else { throw e; } } containerSettings.setConflictResolutionPolicy(ConflictResolutionPolicy.createLastWriterWinsPolicy("someText")); try { containerSettings = container.replace(containerSettings, null).block().getProperties(); fail("Expected exception on invalid path."); } catch (Exception e) { CosmosException dce = Utils.as(e, CosmosException.class); if (dce != null && dce.getStatusCode() == 400) { assertThat(dce.getMessage()).contains("Invalid path 'someText' for last writer wins conflict resolution"); } else { throw e; } } testConflictResolutionPolicyRequiringPath(ConflictResolutionMode.CUSTOM, new String[] { "dbs/mydb/colls" + "/mycoll/sprocs/randomSprocName", null, "" }, new String[] { "dbs/mydb/colls/mycoll/sprocs" + "/randomSprocName", "", "" }); }
class MultiMasterConflictResolutionTest extends TestSuiteBase { private static final int TIMEOUT = 40000; private final String databaseId = CosmosDatabaseForTest.generateId(); private PartitionKeyDefinition partitionKeyDef; private CosmosAsyncClient client; private CosmosAsyncDatabase database; @Factory(dataProvider = "clientBuilders") public MultiMasterConflictResolutionTest(CosmosClientBuilder clientBuilder) { super(clientBuilder); } @Test(groups = "multi-master", timeOut = 10 * TIMEOUT) private void testConflictResolutionPolicyRequiringPath(ConflictResolutionMode conflictResolutionMode, String[] paths, String[] expectedPaths) { for (int i = 0; i < paths.length; i++) { CosmosContainerProperties collectionSettings = new CosmosContainerProperties(UUID.randomUUID().toString(), partitionKeyDef); if (conflictResolutionMode == ConflictResolutionMode.LAST_WRITER_WINS) { collectionSettings.setConflictResolutionPolicy(ConflictResolutionPolicy.createLastWriterWinsPolicy(paths[i])); } else { collectionSettings.setConflictResolutionPolicy(ConflictResolutionPolicy.createCustomPolicy(paths[i])); } collectionSettings = database.createContainer(collectionSettings, new CosmosContainerRequestOptions()).block().getProperties(); assertThat(collectionSettings.getConflictResolutionPolicy().getMode()).isEqualTo(conflictResolutionMode); if (conflictResolutionMode == ConflictResolutionMode.LAST_WRITER_WINS) { assertThat(collectionSettings.getConflictResolutionPolicy().getConflictResolutionPath()).isEqualTo(expectedPaths[i]); } else { assertThat(collectionSettings.getConflictResolutionPolicy().getConflictResolutionProcedure()).isEqualTo(expectedPaths[i]); } } } @Test(groups = "multi-master", timeOut = TIMEOUT) public void invalidConflictResolutionPolicy_LastWriterWinsWithStoredProc() throws Exception { CosmosContainerProperties collection = new CosmosContainerProperties(UUID.randomUUID().toString(), partitionKeyDef); ConflictResolutionPolicy policy = ModelBridgeUtils.createConflictResolutionPolicy(); ModelBridgeUtils.setMode(policy, ConflictResolutionMode.LAST_WRITER_WINS); ModelBridgeUtils.setStoredProc(policy,"randomSprocName"); collection.setConflictResolutionPolicy(policy); Mono<CosmosContainerResponse> createObservable = database.createContainer( collection, new CosmosContainerRequestOptions()); FailureValidator validator = new FailureValidator.Builder() .instanceOf(CosmosException.class) .statusCode(400) .errorMessageContains("LastWriterWins conflict resolution mode should not have conflict resolution procedure set.") .build(); validateFailure(createObservable, validator); } @Test(groups = "multi-master", timeOut = TIMEOUT) public void invalidConflictResolutionPolicy_CustomWithPath() throws Exception { CosmosContainerProperties collection = new CosmosContainerProperties(UUID.randomUUID().toString(), partitionKeyDef); ConflictResolutionPolicy policy = ModelBridgeUtils.createConflictResolutionPolicy(); ModelBridgeUtils.setMode(policy, ConflictResolutionMode.CUSTOM); ModelBridgeUtils.setPath(policy,"/mypath"); collection.setConflictResolutionPolicy(policy); Mono<CosmosContainerResponse> createObservable = database.createContainer( collection, new CosmosContainerRequestOptions()); FailureValidator validator = new FailureValidator.Builder() .instanceOf(CosmosException.class) .statusCode(400) .errorMessageContains("Custom conflict resolution mode should not have conflict resolution path set.") .build(); validateFailure(createObservable, validator); } @BeforeClass(groups = {"multi-master"}, timeOut = SETUP_TIMEOUT) public void before_MultiMasterConflictResolutionTest() { client = getClientBuilder().buildAsyncClient(); database = createDatabase(client, databaseId); partitionKeyDef = new PartitionKeyDefinition(); ArrayList<String> paths = new ArrayList<String>(); paths.add("/mypk"); partitionKeyDef.setPaths(paths); } @AfterClass(groups = {"multi-master"}, timeOut = SHUTDOWN_TIMEOUT, alwaysRun = true) public void afterClass() { safeDeleteDatabase(database); safeClose(client); } }
class MultiMasterConflictResolutionTest extends TestSuiteBase { private static final int TIMEOUT = 40000; private final String databaseId = CosmosDatabaseForTest.generateId(); private PartitionKeyDefinition partitionKeyDef; private CosmosAsyncClient client; private CosmosAsyncDatabase database; @Factory(dataProvider = "clientBuilders") public MultiMasterConflictResolutionTest(CosmosClientBuilder clientBuilder) { super(clientBuilder); } @Test(groups = "multi-master", timeOut = 10 * TIMEOUT) private void testConflictResolutionPolicyRequiringPath(ConflictResolutionMode conflictResolutionMode, String[] paths, String[] expectedPaths) { for (int i = 0; i < paths.length; i++) { CosmosContainerProperties collectionSettings = new CosmosContainerProperties(UUID.randomUUID().toString(), partitionKeyDef); if (conflictResolutionMode == ConflictResolutionMode.LAST_WRITER_WINS) { collectionSettings.setConflictResolutionPolicy(ConflictResolutionPolicy.createLastWriterWinsPolicy(paths[i])); } else { collectionSettings.setConflictResolutionPolicy(ConflictResolutionPolicy.createCustomPolicy(paths[i])); } collectionSettings = database.createContainer(collectionSettings, new CosmosContainerRequestOptions()).block().getProperties(); assertThat(collectionSettings.getConflictResolutionPolicy().getMode()).isEqualTo(conflictResolutionMode); if (conflictResolutionMode == ConflictResolutionMode.LAST_WRITER_WINS) { assertThat(collectionSettings.getConflictResolutionPolicy().getConflictResolutionPath()).isEqualTo(expectedPaths[i]); } else { assertThat(collectionSettings.getConflictResolutionPolicy().getConflictResolutionProcedure()).isEqualTo(expectedPaths[i]); } } } @Test(groups = "multi-master", timeOut = TIMEOUT) public void invalidConflictResolutionPolicy_LastWriterWinsWithStoredProc() throws Exception { CosmosContainerProperties collection = new CosmosContainerProperties(UUID.randomUUID().toString(), partitionKeyDef); ConflictResolutionPolicy policy = ModelBridgeUtils.createConflictResolutionPolicy(); ModelBridgeUtils.setMode(policy, ConflictResolutionMode.LAST_WRITER_WINS); ModelBridgeUtils.setStoredProc(policy,"randomSprocName"); collection.setConflictResolutionPolicy(policy); Mono<CosmosContainerResponse> createObservable = database.createContainer( collection, new CosmosContainerRequestOptions()); FailureValidator validator = new FailureValidator.Builder() .instanceOf(CosmosException.class) .statusCode(400) .errorMessageContains("LastWriterWins conflict resolution mode should not have conflict resolution procedure set.") .build(); validateFailure(createObservable, validator); } @Test(groups = "multi-master", timeOut = TIMEOUT) public void invalidConflictResolutionPolicy_CustomWithPath() throws Exception { CosmosContainerProperties collection = new CosmosContainerProperties(UUID.randomUUID().toString(), partitionKeyDef); ConflictResolutionPolicy policy = ModelBridgeUtils.createConflictResolutionPolicy(); ModelBridgeUtils.setMode(policy, ConflictResolutionMode.CUSTOM); ModelBridgeUtils.setPath(policy,"/mypath"); collection.setConflictResolutionPolicy(policy); Mono<CosmosContainerResponse> createObservable = database.createContainer( collection, new CosmosContainerRequestOptions()); FailureValidator validator = new FailureValidator.Builder() .instanceOf(CosmosException.class) .statusCode(400) .errorMessageContains("Custom conflict resolution mode should not have conflict resolution path set.") .build(); validateFailure(createObservable, validator); } @BeforeClass(groups = {"multi-master"}, timeOut = SETUP_TIMEOUT) public void before_MultiMasterConflictResolutionTest() { client = getClientBuilder().buildAsyncClient(); database = createDatabase(client, databaseId); partitionKeyDef = new PartitionKeyDefinition(); ArrayList<String> paths = new ArrayList<String>(); paths.add("/mypk"); partitionKeyDef.setPaths(paths); } @AfterClass(groups = {"multi-master"}, timeOut = SHUTDOWN_TIMEOUT, alwaysRun = true) public void afterClass() { safeDeleteDatabase(database); safeClose(client); } }
Yes, that is the reason :)
public void conflictResolutionPolicyCRUD() { CosmosContainerProperties containerSettings = new CosmosContainerProperties(UUID.randomUUID().toString(), partitionKeyDef); database.createContainer(containerSettings, new CosmosContainerRequestOptions()).block(); CosmosAsyncContainer container = database.getContainer(containerSettings.getId()); containerSettings = container.read().block().getProperties(); assertThat(containerSettings.getConflictResolutionPolicy().getMode()).isEqualTo(ConflictResolutionMode.LAST_WRITER_WINS); containerSettings.setConflictResolutionPolicy(ConflictResolutionPolicy.createLastWriterWinsPolicy()); containerSettings = container.replace(containerSettings, null).block().getProperties(); assertThat(containerSettings.getConflictResolutionPolicy().getMode()).isEqualTo(ConflictResolutionMode.LAST_WRITER_WINS); assertThat(containerSettings.getConflictResolutionPolicy().getConflictResolutionPath()).isEqualTo("/_ts"); testConflictResolutionPolicyRequiringPath(ConflictResolutionMode.LAST_WRITER_WINS, new String[] { "/a", null, "" }, new String[] { "/a", "/_ts", "/_ts" }); containerSettings.setConflictResolutionPolicy(ConflictResolutionPolicy.createLastWriterWinsPolicy("/a/b")); try { containerSettings = container.replace(containerSettings, null).block().getProperties(); fail("Expected exception on invalid getPath."); } catch (Exception e) { CosmosException dce = Utils.as(e, CosmosException.class); if (dce != null && dce.getStatusCode() == 400) { assertThat(dce.getMessage()).contains("Invalid path '\\\\\\/a\\\\\\/b' for last writer wins conflict resolution"); } else { throw e; } } containerSettings.setConflictResolutionPolicy(ConflictResolutionPolicy.createLastWriterWinsPolicy("someText")); try { containerSettings = container.replace(containerSettings, null).block().getProperties(); fail("Expected exception on invalid path."); } catch (Exception e) { CosmosException dce = Utils.as(e, CosmosException.class); if (dce != null && dce.getStatusCode() == 400) { assertThat(dce.getMessage()).contains("Invalid path 'someText' for last writer wins conflict resolution"); } else { throw e; } } testConflictResolutionPolicyRequiringPath(ConflictResolutionMode.CUSTOM, new String[] { "dbs/mydb/colls" + "/mycoll/sprocs/randomSprocName", null, "" }, new String[] { "dbs/mydb/colls/mycoll/sprocs" + "/randomSprocName", "", "" }); }
assertThat(dce.getMessage()).contains("Invalid path '\\\\\\/a\\\\\\/b' for last writer wins conflict resolution");
public void conflictResolutionPolicyCRUD() { CosmosContainerProperties containerSettings = new CosmosContainerProperties(UUID.randomUUID().toString(), partitionKeyDef); database.createContainer(containerSettings, new CosmosContainerRequestOptions()).block(); CosmosAsyncContainer container = database.getContainer(containerSettings.getId()); containerSettings = container.read().block().getProperties(); assertThat(containerSettings.getConflictResolutionPolicy().getMode()).isEqualTo(ConflictResolutionMode.LAST_WRITER_WINS); containerSettings.setConflictResolutionPolicy(ConflictResolutionPolicy.createLastWriterWinsPolicy()); containerSettings = container.replace(containerSettings, null).block().getProperties(); assertThat(containerSettings.getConflictResolutionPolicy().getMode()).isEqualTo(ConflictResolutionMode.LAST_WRITER_WINS); assertThat(containerSettings.getConflictResolutionPolicy().getConflictResolutionPath()).isEqualTo("/_ts"); testConflictResolutionPolicyRequiringPath(ConflictResolutionMode.LAST_WRITER_WINS, new String[] { "/a", null, "" }, new String[] { "/a", "/_ts", "/_ts" }); containerSettings.setConflictResolutionPolicy(ConflictResolutionPolicy.createLastWriterWinsPolicy("/a/b")); try { containerSettings = container.replace(containerSettings, null).block().getProperties(); fail("Expected exception on invalid getPath."); } catch (Exception e) { CosmosException dce = Utils.as(e, CosmosException.class); if (dce != null && dce.getStatusCode() == 400) { assertThat(dce.getMessage()).contains("Invalid path '\\\\\\/a\\\\\\/b' for last writer wins conflict resolution"); } else { throw e; } } containerSettings.setConflictResolutionPolicy(ConflictResolutionPolicy.createLastWriterWinsPolicy("someText")); try { containerSettings = container.replace(containerSettings, null).block().getProperties(); fail("Expected exception on invalid path."); } catch (Exception e) { CosmosException dce = Utils.as(e, CosmosException.class); if (dce != null && dce.getStatusCode() == 400) { assertThat(dce.getMessage()).contains("Invalid path 'someText' for last writer wins conflict resolution"); } else { throw e; } } testConflictResolutionPolicyRequiringPath(ConflictResolutionMode.CUSTOM, new String[] { "dbs/mydb/colls" + "/mycoll/sprocs/randomSprocName", null, "" }, new String[] { "dbs/mydb/colls/mycoll/sprocs" + "/randomSprocName", "", "" }); }
class MultiMasterConflictResolutionTest extends TestSuiteBase { private static final int TIMEOUT = 40000; private final String databaseId = CosmosDatabaseForTest.generateId(); private PartitionKeyDefinition partitionKeyDef; private CosmosAsyncClient client; private CosmosAsyncDatabase database; @Factory(dataProvider = "clientBuilders") public MultiMasterConflictResolutionTest(CosmosClientBuilder clientBuilder) { super(clientBuilder); } @Test(groups = "multi-master", timeOut = 10 * TIMEOUT) private void testConflictResolutionPolicyRequiringPath(ConflictResolutionMode conflictResolutionMode, String[] paths, String[] expectedPaths) { for (int i = 0; i < paths.length; i++) { CosmosContainerProperties collectionSettings = new CosmosContainerProperties(UUID.randomUUID().toString(), partitionKeyDef); if (conflictResolutionMode == ConflictResolutionMode.LAST_WRITER_WINS) { collectionSettings.setConflictResolutionPolicy(ConflictResolutionPolicy.createLastWriterWinsPolicy(paths[i])); } else { collectionSettings.setConflictResolutionPolicy(ConflictResolutionPolicy.createCustomPolicy(paths[i])); } collectionSettings = database.createContainer(collectionSettings, new CosmosContainerRequestOptions()).block().getProperties(); assertThat(collectionSettings.getConflictResolutionPolicy().getMode()).isEqualTo(conflictResolutionMode); if (conflictResolutionMode == ConflictResolutionMode.LAST_WRITER_WINS) { assertThat(collectionSettings.getConflictResolutionPolicy().getConflictResolutionPath()).isEqualTo(expectedPaths[i]); } else { assertThat(collectionSettings.getConflictResolutionPolicy().getConflictResolutionProcedure()).isEqualTo(expectedPaths[i]); } } } @Test(groups = "multi-master", timeOut = TIMEOUT) public void invalidConflictResolutionPolicy_LastWriterWinsWithStoredProc() throws Exception { CosmosContainerProperties collection = new CosmosContainerProperties(UUID.randomUUID().toString(), partitionKeyDef); ConflictResolutionPolicy policy = ModelBridgeUtils.createConflictResolutionPolicy(); ModelBridgeUtils.setMode(policy, ConflictResolutionMode.LAST_WRITER_WINS); ModelBridgeUtils.setStoredProc(policy,"randomSprocName"); collection.setConflictResolutionPolicy(policy); Mono<CosmosContainerResponse> createObservable = database.createContainer( collection, new CosmosContainerRequestOptions()); FailureValidator validator = new FailureValidator.Builder() .instanceOf(CosmosException.class) .statusCode(400) .errorMessageContains("LastWriterWins conflict resolution mode should not have conflict resolution procedure set.") .build(); validateFailure(createObservable, validator); } @Test(groups = "multi-master", timeOut = TIMEOUT) public void invalidConflictResolutionPolicy_CustomWithPath() throws Exception { CosmosContainerProperties collection = new CosmosContainerProperties(UUID.randomUUID().toString(), partitionKeyDef); ConflictResolutionPolicy policy = ModelBridgeUtils.createConflictResolutionPolicy(); ModelBridgeUtils.setMode(policy, ConflictResolutionMode.CUSTOM); ModelBridgeUtils.setPath(policy,"/mypath"); collection.setConflictResolutionPolicy(policy); Mono<CosmosContainerResponse> createObservable = database.createContainer( collection, new CosmosContainerRequestOptions()); FailureValidator validator = new FailureValidator.Builder() .instanceOf(CosmosException.class) .statusCode(400) .errorMessageContains("Custom conflict resolution mode should not have conflict resolution path set.") .build(); validateFailure(createObservable, validator); } @BeforeClass(groups = {"multi-master"}, timeOut = SETUP_TIMEOUT) public void before_MultiMasterConflictResolutionTest() { client = getClientBuilder().buildAsyncClient(); database = createDatabase(client, databaseId); partitionKeyDef = new PartitionKeyDefinition(); ArrayList<String> paths = new ArrayList<String>(); paths.add("/mypk"); partitionKeyDef.setPaths(paths); } @AfterClass(groups = {"multi-master"}, timeOut = SHUTDOWN_TIMEOUT, alwaysRun = true) public void afterClass() { safeDeleteDatabase(database); safeClose(client); } }
class MultiMasterConflictResolutionTest extends TestSuiteBase { private static final int TIMEOUT = 40000; private final String databaseId = CosmosDatabaseForTest.generateId(); private PartitionKeyDefinition partitionKeyDef; private CosmosAsyncClient client; private CosmosAsyncDatabase database; @Factory(dataProvider = "clientBuilders") public MultiMasterConflictResolutionTest(CosmosClientBuilder clientBuilder) { super(clientBuilder); } @Test(groups = "multi-master", timeOut = 10 * TIMEOUT) private void testConflictResolutionPolicyRequiringPath(ConflictResolutionMode conflictResolutionMode, String[] paths, String[] expectedPaths) { for (int i = 0; i < paths.length; i++) { CosmosContainerProperties collectionSettings = new CosmosContainerProperties(UUID.randomUUID().toString(), partitionKeyDef); if (conflictResolutionMode == ConflictResolutionMode.LAST_WRITER_WINS) { collectionSettings.setConflictResolutionPolicy(ConflictResolutionPolicy.createLastWriterWinsPolicy(paths[i])); } else { collectionSettings.setConflictResolutionPolicy(ConflictResolutionPolicy.createCustomPolicy(paths[i])); } collectionSettings = database.createContainer(collectionSettings, new CosmosContainerRequestOptions()).block().getProperties(); assertThat(collectionSettings.getConflictResolutionPolicy().getMode()).isEqualTo(conflictResolutionMode); if (conflictResolutionMode == ConflictResolutionMode.LAST_WRITER_WINS) { assertThat(collectionSettings.getConflictResolutionPolicy().getConflictResolutionPath()).isEqualTo(expectedPaths[i]); } else { assertThat(collectionSettings.getConflictResolutionPolicy().getConflictResolutionProcedure()).isEqualTo(expectedPaths[i]); } } } @Test(groups = "multi-master", timeOut = TIMEOUT) public void invalidConflictResolutionPolicy_LastWriterWinsWithStoredProc() throws Exception { CosmosContainerProperties collection = new CosmosContainerProperties(UUID.randomUUID().toString(), partitionKeyDef); ConflictResolutionPolicy policy = ModelBridgeUtils.createConflictResolutionPolicy(); ModelBridgeUtils.setMode(policy, ConflictResolutionMode.LAST_WRITER_WINS); ModelBridgeUtils.setStoredProc(policy,"randomSprocName"); collection.setConflictResolutionPolicy(policy); Mono<CosmosContainerResponse> createObservable = database.createContainer( collection, new CosmosContainerRequestOptions()); FailureValidator validator = new FailureValidator.Builder() .instanceOf(CosmosException.class) .statusCode(400) .errorMessageContains("LastWriterWins conflict resolution mode should not have conflict resolution procedure set.") .build(); validateFailure(createObservable, validator); } @Test(groups = "multi-master", timeOut = TIMEOUT) public void invalidConflictResolutionPolicy_CustomWithPath() throws Exception { CosmosContainerProperties collection = new CosmosContainerProperties(UUID.randomUUID().toString(), partitionKeyDef); ConflictResolutionPolicy policy = ModelBridgeUtils.createConflictResolutionPolicy(); ModelBridgeUtils.setMode(policy, ConflictResolutionMode.CUSTOM); ModelBridgeUtils.setPath(policy,"/mypath"); collection.setConflictResolutionPolicy(policy); Mono<CosmosContainerResponse> createObservable = database.createContainer( collection, new CosmosContainerRequestOptions()); FailureValidator validator = new FailureValidator.Builder() .instanceOf(CosmosException.class) .statusCode(400) .errorMessageContains("Custom conflict resolution mode should not have conflict resolution path set.") .build(); validateFailure(createObservable, validator); } @BeforeClass(groups = {"multi-master"}, timeOut = SETUP_TIMEOUT) public void before_MultiMasterConflictResolutionTest() { client = getClientBuilder().buildAsyncClient(); database = createDatabase(client, databaseId); partitionKeyDef = new PartitionKeyDefinition(); ArrayList<String> paths = new ArrayList<String>(); paths.add("/mypk"); partitionKeyDef.setPaths(paths); } @AfterClass(groups = {"multi-master"}, timeOut = SHUTDOWN_TIMEOUT, alwaysRun = true) public void afterClass() { safeDeleteDatabase(database); safeClose(client); } }
Do we intend to change this to handle the Uri parsing internally? ([.NET example](https://github.com/Azure/azure-sdk-for-net/pull/15885))
public static void main(String[] args) { KeyVaultBackupClient backupClient = new KeyVaultBackupClientBuilder() .vaultUrl("https: .credential(new DefaultAzureCredentialBuilder().build()) .buildClient(); String blobStorageUrl = "<blob-storage-url>"; String sasToken = "<sas-token>"; SyncPoller<KeyVaultBackupOperation, String> backupPoller = backupClient.beginBackup(blobStorageUrl, sasToken); backupPoller.waitForCompletion(); String backupUri = backupPoller.getFinalResult(); String[] segments = backupUri.split("/"); String folderName = segments[segments.length - 1]; SyncPoller<KeyVaultRestoreOperation, Void> restorePoller = backupClient.beginRestore(blobStorageUrl, sasToken, folderName); restorePoller.waitForCompletion(); }
String[] segments = backupUri.split("/");
public static void main(String[] args) { /* Instantiate a KeyVaultBackupClient that will be used to call the service. Notice that the client is using default Azure credentials. To make default credentials work, ensure that environment variables 'AZURE_CLIENT_ID', 'AZURE_CLIENT_KEY' and 'AZURE_TENANT_ID' are set with the service principal credentials. To get started, you'll need a URI to an Azure Key Vault. See the README (https: for links and instructions. */ KeyVaultBackupClient backupClient = new KeyVaultBackupClientBuilder() .vaultUrl("https: .credential(new DefaultAzureCredentialBuilder().build()) .buildClient(); /* Using the KeyVaultBackupClient, you can back up your entire collection of keys. The backing store for full key backups is a blob storage container using Shared Access Signature authentication. For more details on creating a SAS token using the BlobServiceClient, see the Azure Storage Blobs client README (https: Alternatively, it is possible to generate a SAS token in Storage Explorer (https: To ensure you have some keys for backup, you may want to first create a key using the KeyClient. To create a new KeyClient to create a key, see the 'Azure Key Vault Key client library for Java' README (https: In the sample below, you can set blobStorageUrl and sasToken based on environment variables, configuration settings, or any way that works for your application. */ String blobStorageUrl = "<blob-storage-url>"; String sasToken = "<sas-token>"; SyncPoller<KeyVaultBackupOperation, String> backupPoller = backupClient.beginBackup(blobStorageUrl, sasToken); backupPoller.waitForCompletion(); /* Now let's restore the entire collection of keys from the backup. We will need the get the URI for the location the backup, as well as Shared Access Signature for accessing it. */ String backupFolderUrl = backupPoller.getFinalResult(); SyncPoller<KeyVaultRestoreOperation, Void> restorePoller = backupClient.beginRestore(backupFolderUrl, sasToken); restorePoller.waitForCompletion(); }
class BackupAndRestoreHelloWorld { /** * Authenticates with the key vault and shows how to fully backup and restore a key vault synchronously. * * @param args Unused. Arguments to the program. * @throws IllegalArgumentException when an invalid key vault URL is passed. */ }
class BackupAndRestoreHelloWorld { /** * Authenticates with the key vault and shows how to fully backup and restore a key vault synchronously. * * @param args Unused. Arguments to the program. * @throws IllegalArgumentException when an invalid key vault URL is passed. */ }
listRoleDefinitions. They can also get this with the azure CLI, which you've used before: ```bash az keyvault role definition list --hsm-name <name> ```
public static void main(String[] args) { KeyVaultAccessControlClient accessControlClient = new KeyVaultAccessControlClientBuilder() .vaultUrl("https: .credential(new DefaultAzureCredentialBuilder().build()) .buildClient(); String roleDefinitionId = "<role-definition-id>"; String servicePrincipalId = "<service-principal-id>"; KeyVaultRoleAssignmentProperties roleAssignmentProperties = new KeyVaultRoleAssignmentProperties(roleDefinitionId, servicePrincipalId); KeyVaultRoleAssignment roleAssignmentForAllKeys = accessControlClient.createRoleAssignment(KeyVaultRoleAssignmentScope.GLOBAL, roleAssignmentProperties); System.out.printf("Created role assignment with name: %s %n", roleAssignmentForAllKeys.getName()); String keyId = "<key-id>"; KeyVaultRoleAssignment roleAssignmentForSingleKey = accessControlClient.createRoleAssignment(KeyVaultRoleAssignmentScope.fromString(keyId), roleAssignmentProperties); System.out.printf("Created role assignment with name: %s %n", roleAssignmentForSingleKey.getName()); }
public static void main(String[] args) { /* Instantiate a KeyVaultBackupAsyncClient that will be used to call the service. Notice that the client is using default Azure credentials. To make default credentials work, ensure that environment variables 'AZURE_CLIENT_ID', 'AZURE_CLIENT_KEY' and 'AZURE_TENANT_ID' are set with the service principal credentials. To get started, you'll need a URI to an 1Azure Key Vault. See the README (https: for links and instructions. */ KeyVaultAccessControlClient accessControlClient = new KeyVaultAccessControlClientBuilder() .vaultUrl("https: .credential(new DefaultAzureCredentialBuilder().build()) .buildClient(); /* By default role assignments apply to the global scope. It is also possible to be more specific by applying an assignment to the all keys scope or a specific KeyVaultKey. Let's assign a role to a service principal so that it applies to all keys. To do this we'll need a service principal object ID and a role definition ID. A role definition ID can be obtained from the 'id' property of one of the role definitions returned from listRoleDefinitions(). Alternatively, you can use the following Azure CLI command: az keyvault role definition list --hsm-name <name> */ String roleDefinitionId = "<role-definition-id>"; String servicePrincipalId = "<service-principal-id>"; KeyVaultRoleAssignmentProperties roleAssignmentProperties = new KeyVaultRoleAssignmentProperties(roleDefinitionId, servicePrincipalId); KeyVaultRoleAssignment roleAssignmentForAllKeys = accessControlClient.createRoleAssignment(KeyVaultRoleAssignmentScope.GLOBAL, roleAssignmentProperties); System.out.printf("Created role assignment with name: %s %n", roleAssignmentForAllKeys.getName()); /* Now let's assign a role to a service principal so that it applies to a specific KeyVaultKey. To do this we'll use the role definition ID and a service principal object ID from the previous sample. We'll also need the ID of an existing KeyVaultKey, which can be obtained from the service using a KeyClient. */ String keyId = "<key-id>"; KeyVaultRoleAssignment roleAssignmentForSingleKey = accessControlClient.createRoleAssignment(KeyVaultRoleAssignmentScope.fromString(keyId), roleAssignmentProperties); System.out.printf("Created role assignment with name: %s %n", roleAssignmentForSingleKey.getName()); }
class CreateRoleAssignmentsForDifferentScopes { /** * Authenticates with the key vault and shows how to create role assignments in the key vault for different scopes. * * @param args Unused. Arguments to the program. * @throws IllegalArgumentException when an invalid key vault URL is passed. */ }
class CreateRoleAssignmentsForDifferentScopes { /** * Authenticates with the key vault and shows how to create role assignments in the key vault for different scopes * synchronously. For examples of how to perform async operations, please refer to * {@link AccessControlHelloWorldAsync the async client samples}. * * @param args Unused. Arguments to the program. * @throws IllegalArgumentException when an invalid key vault URL is passed. */ }
Shouldn't you wait for it, for consistency with other examples if nothing else?
public static void main(String[] args) { KeyVaultBackupClient backupClient = new KeyVaultBackupClientBuilder() .vaultUrl("https: .credential(new DefaultAzureCredentialBuilder().build()) .buildClient(); String keyName = "<key-name>"; String blobStorageUrl = "<blob-storage-url>"; String sasToken = "<sas-token>"; String folderName = "<folder-name>"; SyncPoller<KeyVaultRestoreOperation, Void> restorePoller = backupClient.beginSelectiveRestore(keyName, blobStorageUrl, sasToken, folderName); }
}
public static void main(String[] args) { /* Instantiate an KeyVaultAccessControlClient that will be used to call the service. Notice that the client is using default Azure credentials. To make default credentials work, ensure that environment variables 'AZURE_CLIENT_ID', 'AZURE_CLIENT_KEY' and 'AZURE_TENANT_ID' are set with the service principal credentials. To get started, you'll need a URI to an Azure Key Vault. See the README (https: for links and instructions. */ KeyVaultBackupClient backupClient = new KeyVaultBackupClientBuilder() .vaultUrl("https: .credential(new DefaultAzureCredentialBuilder().build()) .buildClient(); /* Using the KeyVaultBackupClient, you can restore a single key from backup by key name. The data source for a selective key restore is a storage blob accessed using Shared Access Signature authentication. */ String keyName = "<key-name>"; String backupFolderUrl = "<backup-folder-url>"; String sasToken = "<sas-token>"; SyncPoller<KeyVaultRestoreOperation, Void> restorePoller = backupClient.beginSelectiveRestore(keyName, backupFolderUrl, sasToken); restorePoller.waitForCompletion(); }
class SelectiveRestore { /** * Authenticates with the key vault and shows how to selectively restore a key from key vault backup. * * @param args Unused. Arguments to the program. * @throws IllegalArgumentException when an invalid key vault URL is passed. */ }
class SelectiveRestore { /** * Authenticates with the key vault and shows how to selectively restore a key from key vault backup synchronously. * For examples of how to perform async operations, please refer to * {@link BackupAndRestoreHelloWorldAsync the async client samples}. * * @param args Unused. Arguments to the program. * @throws IllegalArgumentException when an invalid key vault URL is passed. */ }
I think that's probably better for the user experience. I'll set up a separate PR with those changes.
public static void main(String[] args) { KeyVaultBackupClient backupClient = new KeyVaultBackupClientBuilder() .vaultUrl("https: .credential(new DefaultAzureCredentialBuilder().build()) .buildClient(); String blobStorageUrl = "<blob-storage-url>"; String sasToken = "<sas-token>"; SyncPoller<KeyVaultBackupOperation, String> backupPoller = backupClient.beginBackup(blobStorageUrl, sasToken); backupPoller.waitForCompletion(); String backupUri = backupPoller.getFinalResult(); String[] segments = backupUri.split("/"); String folderName = segments[segments.length - 1]; SyncPoller<KeyVaultRestoreOperation, Void> restorePoller = backupClient.beginRestore(blobStorageUrl, sasToken, folderName); restorePoller.waitForCompletion(); }
String[] segments = backupUri.split("/");
public static void main(String[] args) { /* Instantiate a KeyVaultBackupClient that will be used to call the service. Notice that the client is using default Azure credentials. To make default credentials work, ensure that environment variables 'AZURE_CLIENT_ID', 'AZURE_CLIENT_KEY' and 'AZURE_TENANT_ID' are set with the service principal credentials. To get started, you'll need a URI to an Azure Key Vault. See the README (https: for links and instructions. */ KeyVaultBackupClient backupClient = new KeyVaultBackupClientBuilder() .vaultUrl("https: .credential(new DefaultAzureCredentialBuilder().build()) .buildClient(); /* Using the KeyVaultBackupClient, you can back up your entire collection of keys. The backing store for full key backups is a blob storage container using Shared Access Signature authentication. For more details on creating a SAS token using the BlobServiceClient, see the Azure Storage Blobs client README (https: Alternatively, it is possible to generate a SAS token in Storage Explorer (https: To ensure you have some keys for backup, you may want to first create a key using the KeyClient. To create a new KeyClient to create a key, see the 'Azure Key Vault Key client library for Java' README (https: In the sample below, you can set blobStorageUrl and sasToken based on environment variables, configuration settings, or any way that works for your application. */ String blobStorageUrl = "<blob-storage-url>"; String sasToken = "<sas-token>"; SyncPoller<KeyVaultBackupOperation, String> backupPoller = backupClient.beginBackup(blobStorageUrl, sasToken); backupPoller.waitForCompletion(); /* Now let's restore the entire collection of keys from the backup. We will need the get the URI for the location the backup, as well as Shared Access Signature for accessing it. */ String backupFolderUrl = backupPoller.getFinalResult(); SyncPoller<KeyVaultRestoreOperation, Void> restorePoller = backupClient.beginRestore(backupFolderUrl, sasToken); restorePoller.waitForCompletion(); }
class BackupAndRestoreHelloWorld { /** * Authenticates with the key vault and shows how to fully backup and restore a key vault synchronously. * * @param args Unused. Arguments to the program. * @throws IllegalArgumentException when an invalid key vault URL is passed. */ }
class BackupAndRestoreHelloWorld { /** * Authenticates with the key vault and shows how to fully backup and restore a key vault synchronously. * * @param args Unused. Arguments to the program. * @throws IllegalArgumentException when an invalid key vault URL is passed. */ }
You are right, I must have missed it :)
public static void main(String[] args) { KeyVaultBackupClient backupClient = new KeyVaultBackupClientBuilder() .vaultUrl("https: .credential(new DefaultAzureCredentialBuilder().build()) .buildClient(); String keyName = "<key-name>"; String blobStorageUrl = "<blob-storage-url>"; String sasToken = "<sas-token>"; String folderName = "<folder-name>"; SyncPoller<KeyVaultRestoreOperation, Void> restorePoller = backupClient.beginSelectiveRestore(keyName, blobStorageUrl, sasToken, folderName); }
}
public static void main(String[] args) { /* Instantiate an KeyVaultAccessControlClient that will be used to call the service. Notice that the client is using default Azure credentials. To make default credentials work, ensure that environment variables 'AZURE_CLIENT_ID', 'AZURE_CLIENT_KEY' and 'AZURE_TENANT_ID' are set with the service principal credentials. To get started, you'll need a URI to an Azure Key Vault. See the README (https: for links and instructions. */ KeyVaultBackupClient backupClient = new KeyVaultBackupClientBuilder() .vaultUrl("https: .credential(new DefaultAzureCredentialBuilder().build()) .buildClient(); /* Using the KeyVaultBackupClient, you can restore a single key from backup by key name. The data source for a selective key restore is a storage blob accessed using Shared Access Signature authentication. */ String keyName = "<key-name>"; String backupFolderUrl = "<backup-folder-url>"; String sasToken = "<sas-token>"; SyncPoller<KeyVaultRestoreOperation, Void> restorePoller = backupClient.beginSelectiveRestore(keyName, backupFolderUrl, sasToken); restorePoller.waitForCompletion(); }
class SelectiveRestore { /** * Authenticates with the key vault and shows how to selectively restore a key from key vault backup. * * @param args Unused. Arguments to the program. * @throws IllegalArgumentException when an invalid key vault URL is passed. */ }
class SelectiveRestore { /** * Authenticates with the key vault and shows how to selectively restore a key from key vault backup synchronously. * For examples of how to perform async operations, please refer to * {@link BackupAndRestoreHelloWorldAsync the async client samples}. * * @param args Unused. Arguments to the program. * @throws IllegalArgumentException when an invalid key vault URL is passed. */ }
cc @sadasant and @chlowell for consistency.
public static void main(String[] args) { KeyVaultBackupClient backupClient = new KeyVaultBackupClientBuilder() .vaultUrl("https: .credential(new DefaultAzureCredentialBuilder().build()) .buildClient(); String blobStorageUrl = "<blob-storage-url>"; String sasToken = "<sas-token>"; SyncPoller<KeyVaultBackupOperation, String> backupPoller = backupClient.beginBackup(blobStorageUrl, sasToken); backupPoller.waitForCompletion(); String backupUri = backupPoller.getFinalResult(); String[] segments = backupUri.split("/"); String folderName = segments[segments.length - 1]; SyncPoller<KeyVaultRestoreOperation, Void> restorePoller = backupClient.beginRestore(blobStorageUrl, sasToken, folderName); restorePoller.waitForCompletion(); }
String[] segments = backupUri.split("/");
public static void main(String[] args) { /* Instantiate a KeyVaultBackupClient that will be used to call the service. Notice that the client is using default Azure credentials. To make default credentials work, ensure that environment variables 'AZURE_CLIENT_ID', 'AZURE_CLIENT_KEY' and 'AZURE_TENANT_ID' are set with the service principal credentials. To get started, you'll need a URI to an Azure Key Vault. See the README (https: for links and instructions. */ KeyVaultBackupClient backupClient = new KeyVaultBackupClientBuilder() .vaultUrl("https: .credential(new DefaultAzureCredentialBuilder().build()) .buildClient(); /* Using the KeyVaultBackupClient, you can back up your entire collection of keys. The backing store for full key backups is a blob storage container using Shared Access Signature authentication. For more details on creating a SAS token using the BlobServiceClient, see the Azure Storage Blobs client README (https: Alternatively, it is possible to generate a SAS token in Storage Explorer (https: To ensure you have some keys for backup, you may want to first create a key using the KeyClient. To create a new KeyClient to create a key, see the 'Azure Key Vault Key client library for Java' README (https: In the sample below, you can set blobStorageUrl and sasToken based on environment variables, configuration settings, or any way that works for your application. */ String blobStorageUrl = "<blob-storage-url>"; String sasToken = "<sas-token>"; SyncPoller<KeyVaultBackupOperation, String> backupPoller = backupClient.beginBackup(blobStorageUrl, sasToken); backupPoller.waitForCompletion(); /* Now let's restore the entire collection of keys from the backup. We will need the get the URI for the location the backup, as well as Shared Access Signature for accessing it. */ String backupFolderUrl = backupPoller.getFinalResult(); SyncPoller<KeyVaultRestoreOperation, Void> restorePoller = backupClient.beginRestore(backupFolderUrl, sasToken); restorePoller.waitForCompletion(); }
class BackupAndRestoreHelloWorld { /** * Authenticates with the key vault and shows how to fully backup and restore a key vault synchronously. * * @param args Unused. Arguments to the program. * @throws IllegalArgumentException when an invalid key vault URL is passed. */ }
class BackupAndRestoreHelloWorld { /** * Authenticates with the key vault and shows how to fully backup and restore a key vault synchronously. * * @param args Unused. Arguments to the program. * @throws IllegalArgumentException when an invalid key vault URL is passed. */ }
Created said PR: #17040.
public static void main(String[] args) { KeyVaultBackupClient backupClient = new KeyVaultBackupClientBuilder() .vaultUrl("https: .credential(new DefaultAzureCredentialBuilder().build()) .buildClient(); String blobStorageUrl = "<blob-storage-url>"; String sasToken = "<sas-token>"; SyncPoller<KeyVaultBackupOperation, String> backupPoller = backupClient.beginBackup(blobStorageUrl, sasToken); backupPoller.waitForCompletion(); String backupUri = backupPoller.getFinalResult(); String[] segments = backupUri.split("/"); String folderName = segments[segments.length - 1]; SyncPoller<KeyVaultRestoreOperation, Void> restorePoller = backupClient.beginRestore(blobStorageUrl, sasToken, folderName); restorePoller.waitForCompletion(); }
String[] segments = backupUri.split("/");
public static void main(String[] args) { /* Instantiate a KeyVaultBackupClient that will be used to call the service. Notice that the client is using default Azure credentials. To make default credentials work, ensure that environment variables 'AZURE_CLIENT_ID', 'AZURE_CLIENT_KEY' and 'AZURE_TENANT_ID' are set with the service principal credentials. To get started, you'll need a URI to an Azure Key Vault. See the README (https: for links and instructions. */ KeyVaultBackupClient backupClient = new KeyVaultBackupClientBuilder() .vaultUrl("https: .credential(new DefaultAzureCredentialBuilder().build()) .buildClient(); /* Using the KeyVaultBackupClient, you can back up your entire collection of keys. The backing store for full key backups is a blob storage container using Shared Access Signature authentication. For more details on creating a SAS token using the BlobServiceClient, see the Azure Storage Blobs client README (https: Alternatively, it is possible to generate a SAS token in Storage Explorer (https: To ensure you have some keys for backup, you may want to first create a key using the KeyClient. To create a new KeyClient to create a key, see the 'Azure Key Vault Key client library for Java' README (https: In the sample below, you can set blobStorageUrl and sasToken based on environment variables, configuration settings, or any way that works for your application. */ String blobStorageUrl = "<blob-storage-url>"; String sasToken = "<sas-token>"; SyncPoller<KeyVaultBackupOperation, String> backupPoller = backupClient.beginBackup(blobStorageUrl, sasToken); backupPoller.waitForCompletion(); /* Now let's restore the entire collection of keys from the backup. We will need the get the URI for the location the backup, as well as Shared Access Signature for accessing it. */ String backupFolderUrl = backupPoller.getFinalResult(); SyncPoller<KeyVaultRestoreOperation, Void> restorePoller = backupClient.beginRestore(backupFolderUrl, sasToken); restorePoller.waitForCompletion(); }
class BackupAndRestoreHelloWorld { /** * Authenticates with the key vault and shows how to fully backup and restore a key vault synchronously. * * @param args Unused. Arguments to the program. * @throws IllegalArgumentException when an invalid key vault URL is passed. */ }
class BackupAndRestoreHelloWorld { /** * Authenticates with the key vault and shows how to fully backup and restore a key vault synchronously. * * @param args Unused. Arguments to the program. * @throws IllegalArgumentException when an invalid key vault URL is passed. */ }
Merged.
public static void main(String[] args) { KeyVaultBackupClient backupClient = new KeyVaultBackupClientBuilder() .vaultUrl("https: .credential(new DefaultAzureCredentialBuilder().build()) .buildClient(); String blobStorageUrl = "<blob-storage-url>"; String sasToken = "<sas-token>"; SyncPoller<KeyVaultBackupOperation, String> backupPoller = backupClient.beginBackup(blobStorageUrl, sasToken); backupPoller.waitForCompletion(); String backupUri = backupPoller.getFinalResult(); String[] segments = backupUri.split("/"); String folderName = segments[segments.length - 1]; SyncPoller<KeyVaultRestoreOperation, Void> restorePoller = backupClient.beginRestore(blobStorageUrl, sasToken, folderName); restorePoller.waitForCompletion(); }
String[] segments = backupUri.split("/");
public static void main(String[] args) { /* Instantiate a KeyVaultBackupClient that will be used to call the service. Notice that the client is using default Azure credentials. To make default credentials work, ensure that environment variables 'AZURE_CLIENT_ID', 'AZURE_CLIENT_KEY' and 'AZURE_TENANT_ID' are set with the service principal credentials. To get started, you'll need a URI to an Azure Key Vault. See the README (https: for links and instructions. */ KeyVaultBackupClient backupClient = new KeyVaultBackupClientBuilder() .vaultUrl("https: .credential(new DefaultAzureCredentialBuilder().build()) .buildClient(); /* Using the KeyVaultBackupClient, you can back up your entire collection of keys. The backing store for full key backups is a blob storage container using Shared Access Signature authentication. For more details on creating a SAS token using the BlobServiceClient, see the Azure Storage Blobs client README (https: Alternatively, it is possible to generate a SAS token in Storage Explorer (https: To ensure you have some keys for backup, you may want to first create a key using the KeyClient. To create a new KeyClient to create a key, see the 'Azure Key Vault Key client library for Java' README (https: In the sample below, you can set blobStorageUrl and sasToken based on environment variables, configuration settings, or any way that works for your application. */ String blobStorageUrl = "<blob-storage-url>"; String sasToken = "<sas-token>"; SyncPoller<KeyVaultBackupOperation, String> backupPoller = backupClient.beginBackup(blobStorageUrl, sasToken); backupPoller.waitForCompletion(); /* Now let's restore the entire collection of keys from the backup. We will need the get the URI for the location the backup, as well as Shared Access Signature for accessing it. */ String backupFolderUrl = backupPoller.getFinalResult(); SyncPoller<KeyVaultRestoreOperation, Void> restorePoller = backupClient.beginRestore(backupFolderUrl, sasToken); restorePoller.waitForCompletion(); }
class BackupAndRestoreHelloWorld { /** * Authenticates with the key vault and shows how to fully backup and restore a key vault synchronously. * * @param args Unused. Arguments to the program. * @throws IllegalArgumentException when an invalid key vault URL is passed. */ }
class BackupAndRestoreHelloWorld { /** * Authenticates with the key vault and shows how to fully backup and restore a key vault synchronously. * * @param args Unused. Arguments to the program. * @throws IllegalArgumentException when an invalid key vault URL is passed. */ }
This is the module that was missing from our default mapper in the digital twins client. We get this module for free when we construct the JacksonAdapter and call .serializer() on that jacksonAdapter. We need to explicitly use it here, though
public static void main(String[] args) throws IOException { SamplesArguments parsedArguments = new SamplesArguments(args); client = new DigitalTwinsClientBuilder() .credential( new ClientSecretCredentialBuilder() .tenantId(parsedArguments.getTenantId()) .clientId(parsedArguments.getClientId()) .clientSecret(parsedArguments.getClientSecret()) .build() ) .endpoint(parsedArguments.getDigitalTwinEndpoint()) .httpLogOptions( new HttpLogOptions() .setLogLevel(parsedArguments.getHttpLogDetailLevel())) .buildClient(); mapper.registerModule(new JavaTimeModule()); runComponentSample(); }
mapper.registerModule(new JavaTimeModule());
public static void main(String[] args) throws IOException { SamplesArguments parsedArguments = new SamplesArguments(args); client = new DigitalTwinsClientBuilder() .credential( new ClientSecretCredentialBuilder() .tenantId(parsedArguments.getTenantId()) .clientId(parsedArguments.getClientId()) .clientSecret(parsedArguments.getClientSecret()) .build() ) .endpoint(parsedArguments.getDigitalTwinEndpoint()) .httpLogOptions( new HttpLogOptions() .setLogLevel(parsedArguments.getHttpLogDetailLevel())) .buildClient(); mapper.registerModule(new JavaTimeModule()); runComponentSample(); }
class ComponentSyncSamples { private static DigitalTwinsClient client; private static final ObjectMapper mapper = new ObjectMapper(); public static Function<Integer, String> randomIntegerStringGenerator = (maxLength) -> { int randInt = new Random().nextInt((int)Math.pow(10, 8) - 1) + 1; return String.valueOf(randInt); }; @SuppressWarnings("rawtypes") public static void runComponentSample() throws JsonProcessingException { ConsoleLogger.printHeader("COMPONENT SAMPLES"); String componentModelId = UniqueIdHelper.getUniqueModelId(SamplesConstants.TEMPORARY_COMPONENT_MODEL_PREFIX, client, randomIntegerStringGenerator); String modelId = UniqueIdHelper.getUniqueModelId(SamplesConstants.TEMPORARY_MODEL_PREFIX, client, randomIntegerStringGenerator); String basicDigitalTwinId = UniqueIdHelper.getUniqueDigitalTwinId(SamplesConstants.TEMPORARY_TWIN_PREFIX, client, randomIntegerStringGenerator); String newComponentModelPayload = SamplesConstants.TEMPORARY_COMPONENT_MODEL_PAYLOAD .replace(SamplesConstants.COMPONENT_ID, componentModelId); String newModelPayload = SamplesConstants.TEMPORARY_MODEL_WITH_COMPONENT_PAYLOAD .replace(SamplesConstants.MODEL_ID, modelId) .replace(SamplesConstants.COMPONENT_ID, componentModelId); List<String> modelsList = new ArrayList<>(Arrays.asList(newComponentModelPayload, newModelPayload)); ConsoleLogger.printHeader("Create Models"); Iterable<DigitalTwinsModelData> modelList = client.createModels(modelsList); for (DigitalTwinsModelData model : modelList) { ConsoleLogger.print("Created model: " + model.getModelId()); } ConsoleLogger.printHeader("Create digital twin with components"); BasicDigitalTwin basicTwin = new BasicDigitalTwin(basicDigitalTwinId) .setMetadata( new BasicDigitalTwinMetadata() .setModelId(modelId) ) .addToContents("Prop1", "Value1") .addToContents("Prop2", 987) .addToContents( "Component1", new BasicDigitalTwinComponent() .addToContents("ComponentProp1", "Component value 1") .addToContents("ComponentProp2", 123) ); BasicDigitalTwin basicTwinResponse = client.createOrReplaceDigitalTwin(basicDigitalTwinId, basicTwin, BasicDigitalTwin.class); ConsoleLogger.print("Created digital twin " + basicTwinResponse.getId()); Response<String> getStringDigitalTwinResponse = client.getDigitalTwinWithResponse(basicDigitalTwinId, String.class, Context.NONE); ConsoleLogger.print("Successfully retrieved digital twin as a json string \n" + getStringDigitalTwinResponse.getValue()); BasicDigitalTwin deserializedDigitalTwin = mapper.readValue(getStringDigitalTwinResponse.getValue(), BasicDigitalTwin.class); ConsoleLogger.print("Deserialized the string response into a BasicDigitalTwin with Id: " + deserializedDigitalTwin.getId()); Response<BasicDigitalTwin> basicDigitalTwinResponse = client.getDigitalTwinWithResponse(basicDigitalTwinId, BasicDigitalTwin.class, Context.NONE); if (basicDigitalTwinResponse.getStatusCode() == HttpsURLConnection.HTTP_OK) { BasicDigitalTwin basicDigitalTwin = basicDigitalTwinResponse.getValue(); String component1RawText = mapper.writeValueAsString(basicDigitalTwin.getContents().get("Component1")); HashMap component1 = mapper.readValue(component1RawText, HashMap.class); ConsoleLogger.print("Retrieved digital twin using generic API to use built in deserialization into a BasicDigitalTwin with Id: " + basicDigitalTwin.getId() + ":\n\t" + "ETag: " + basicDigitalTwin.getETag() + "\n\t" + "Prop1: " + basicDigitalTwin.getContents().get("Prop1") + "\n\t" + "Prop2: " + basicDigitalTwin.getContents().get("Prop2") + "\n\t" + "ComponentProp1: " + component1.get("ComponentProp1") + "\n\t" + "ComponentProp2: " + component1.get("ComponentProp2") + "\n\t" ); } ConsoleLogger.printHeader("Update Component"); UpdateOperationUtility updateOperationUtility = new UpdateOperationUtility(); updateOperationUtility.appendReplaceOperation("/ComponentProp1", "Some new Value"); client.updateComponent(basicDigitalTwinId, "Component1", updateOperationUtility.getUpdateOperations()); ConsoleLogger.print("Updated component for digital twin: " + basicDigitalTwinId); ConsoleLogger.printHeader("Get Component"); BasicDigitalTwinComponent getComponentResponse = client.getComponent(basicDigitalTwinId, "Component1", BasicDigitalTwinComponent.class); ConsoleLogger.print("Retrieved component for digital twin " + basicDigitalTwinId + " :"); for (String key : getComponentResponse.getContents().keySet()) { ConsoleLogger.print("\t" + key + " : " + getComponentResponse.getContents().get(key)); ConsoleLogger.print("\t\tLast updated on: " + getComponentResponse.getMetadata().get(key).getLastUpdatedOn()); } try { client.deleteDigitalTwin(basicDigitalTwinId); } catch (ErrorResponseException ex) { ConsoleLogger.printFatal("Failed to delete digital twin due to" + ex); } try { client.deleteModel(modelId); client.deleteModel(componentModelId); } catch (ErrorResponseException ex) { ConsoleLogger.printFatal("Failed to delete models due to" + ex); } } }
class ComponentSyncSamples { private static DigitalTwinsClient client; private static final ObjectMapper mapper = new ObjectMapper(); public static Function<Integer, String> randomIntegerStringGenerator = (maxLength) -> { int randInt = new Random().nextInt((int)Math.pow(10, 8) - 1) + 1; return String.valueOf(randInt); }; @SuppressWarnings("rawtypes") public static void runComponentSample() throws JsonProcessingException { ConsoleLogger.printHeader("COMPONENT SAMPLES"); String componentModelId = UniqueIdHelper.getUniqueModelId(SamplesConstants.TEMPORARY_COMPONENT_MODEL_PREFIX, client, randomIntegerStringGenerator); String modelId = UniqueIdHelper.getUniqueModelId(SamplesConstants.TEMPORARY_MODEL_PREFIX, client, randomIntegerStringGenerator); String basicDigitalTwinId = UniqueIdHelper.getUniqueDigitalTwinId(SamplesConstants.TEMPORARY_TWIN_PREFIX, client, randomIntegerStringGenerator); String newComponentModelPayload = SamplesConstants.TEMPORARY_COMPONENT_MODEL_PAYLOAD .replace(SamplesConstants.COMPONENT_ID, componentModelId); String newModelPayload = SamplesConstants.TEMPORARY_MODEL_WITH_COMPONENT_PAYLOAD .replace(SamplesConstants.MODEL_ID, modelId) .replace(SamplesConstants.COMPONENT_ID, componentModelId); List<String> modelsList = new ArrayList<>(Arrays.asList(newComponentModelPayload, newModelPayload)); ConsoleLogger.printHeader("Create Models"); Iterable<DigitalTwinsModelData> modelList = client.createModels(modelsList); for (DigitalTwinsModelData model : modelList) { ConsoleLogger.print("Created model: " + model.getModelId()); } ConsoleLogger.printHeader("Create digital twin with components"); BasicDigitalTwin basicTwin = new BasicDigitalTwin(basicDigitalTwinId) .setMetadata( new BasicDigitalTwinMetadata() .setModelId(modelId) ) .addToContents("Prop1", "Value1") .addToContents("Prop2", 987) .addToContents( "Component1", new BasicDigitalTwinComponent() .addToContents("ComponentProp1", "Component value 1") .addToContents("ComponentProp2", 123) ); BasicDigitalTwin basicTwinResponse = client.createOrReplaceDigitalTwin(basicDigitalTwinId, basicTwin, BasicDigitalTwin.class); ConsoleLogger.print("Created digital twin " + basicTwinResponse.getId()); Response<String> getStringDigitalTwinResponse = client.getDigitalTwinWithResponse(basicDigitalTwinId, String.class, Context.NONE); ConsoleLogger.print("Successfully retrieved digital twin as a json string \n" + getStringDigitalTwinResponse.getValue()); BasicDigitalTwin deserializedDigitalTwin = mapper.readValue(getStringDigitalTwinResponse.getValue(), BasicDigitalTwin.class); ConsoleLogger.print("Deserialized the string response into a BasicDigitalTwin with Id: " + deserializedDigitalTwin.getId()); Response<BasicDigitalTwin> basicDigitalTwinResponse = client.getDigitalTwinWithResponse(basicDigitalTwinId, BasicDigitalTwin.class, Context.NONE); if (basicDigitalTwinResponse.getStatusCode() == HttpsURLConnection.HTTP_OK) { BasicDigitalTwin basicDigitalTwin = basicDigitalTwinResponse.getValue(); String component1RawText = mapper.writeValueAsString(basicDigitalTwin.getContents().get("Component1")); HashMap component1 = mapper.readValue(component1RawText, HashMap.class); ConsoleLogger.print("Retrieved digital twin using generic API to use built in deserialization into a BasicDigitalTwin with Id: " + basicDigitalTwin.getId() + ":\n\t" + "ETag: " + basicDigitalTwin.getETag() + "\n\t" + "Prop1: " + basicDigitalTwin.getContents().get("Prop1") + "\n\t" + "Prop2: " + basicDigitalTwin.getContents().get("Prop2") + "\n\t" + "ComponentProp1: " + component1.get("ComponentProp1") + "\n\t" + "ComponentProp2: " + component1.get("ComponentProp2") + "\n\t" ); } ConsoleLogger.printHeader("Update Component"); UpdateOperationUtility updateOperationUtility = new UpdateOperationUtility(); updateOperationUtility.appendReplaceOperation("/ComponentProp1", "Some new Value"); client.updateComponent(basicDigitalTwinId, "Component1", updateOperationUtility.getUpdateOperations()); ConsoleLogger.print("Updated component for digital twin: " + basicDigitalTwinId); ConsoleLogger.printHeader("Get Component"); BasicDigitalTwinComponent getComponentResponse = client.getComponent(basicDigitalTwinId, "Component1", BasicDigitalTwinComponent.class); ConsoleLogger.print("Retrieved component for digital twin " + basicDigitalTwinId + " :"); for (String key : getComponentResponse.getContents().keySet()) { ConsoleLogger.print("\t" + key + " : " + getComponentResponse.getContents().get(key)); ConsoleLogger.print("\t\tLast updated on: " + getComponentResponse.getMetadata().get(key).getLastUpdatedOn()); } try { client.deleteDigitalTwin(basicDigitalTwinId); } catch (ErrorResponseException ex) { ConsoleLogger.printFatal("Failed to delete digital twin due to" + ex); } try { client.deleteModel(modelId); client.deleteModel(componentModelId); } catch (ErrorResponseException ex) { ConsoleLogger.printFatal("Failed to delete models due to" + ex); } } }
In what case a link name is the same as a tracking id?
public void onNext(AmqpReceiveLink next) { Objects.requireNonNull(next, "'next' cannot be null."); if (isTerminated()) { logger.warning("linkName[{}] entityPath[{}]. Got another link when we have already terminated processor.", next.getLinkName(), next.getEntityPath()); Operators.onNextDropped(next, currentContext()); return; } final String linkName = next.getLinkName(); final String entityPath = next.getEntityPath(); logger.info("linkName[{}] entityPath[{}]. Setting next AMQP receive link.", linkName, entityPath); final AmqpReceiveLink oldChannel; final Disposable oldSubscription; synchronized (lock) { oldChannel = currentLink; oldSubscription = currentLinkSubscriptions; currentLink = next; linkCreditsAdded.set(true); next.addCredits(prefetch); next.setEmptyCreditListener(this::getCreditsToAdd); currentLinkSubscriptions = Disposables.composite( next.getEndpointStates().subscribe( state -> { if (state == AmqpEndpointState.ACTIVE) { logger.info("Link {} is now active with {} credits.", linkName, next.getCredits()); retryAttempts.set(0); } }, error -> { if (error instanceof AmqpException) { AmqpException amqpException = (AmqpException) error; if (amqpException.getErrorCondition() == AmqpErrorCondition.LINK_STOLEN && amqpException.getContext() != null && amqpException.getContext() instanceof LinkErrorContext) { LinkErrorContext errorContext = (LinkErrorContext) amqpException.getContext(); if (currentLink != null && !currentLink.getLinkName().equals(errorContext.getTrackingId())) { logger.info("EntityPath[{}]: Link lost signal received for a link " + "that is not current. Ignoring the error. Current link {}, link lost {}", entityPath, linkName, errorContext.getTrackingId()); return; } } } currentLink = null; logger.warning("linkName[{}] entityPath[{}]. Error occurred in link.", linkName, entityPath); onError(error); }, () -> { if (parentConnection.isDisposed() || isTerminated() || UPSTREAM.get(this) == Operators.cancelledSubscription()) { logger.info("Terminal state reached. Disposing of link processor."); dispose(); } else { logger.info("Receive link endpoint states are closed. Requesting another."); final AmqpReceiveLink existing = currentLink; currentLink = null; if (existing != null) { existing.dispose(); } requestUpstream(); } }), next.receive().subscribe(message -> { messageQueue.add(message); drain(); })); } if (oldChannel != null) { oldChannel.dispose(); } if (oldSubscription != null) { oldSubscription.dispose(); } }
&& !currentLink.getLinkName().equals(errorContext.getTrackingId())) {
public void onNext(AmqpReceiveLink next) { Objects.requireNonNull(next, "'next' cannot be null."); if (isTerminated()) { logger.warning("linkName[{}] entityPath[{}]. Got another link when we have already terminated processor.", next.getLinkName(), next.getEntityPath()); Operators.onNextDropped(next, currentContext()); return; } final String linkName = next.getLinkName(); final String entityPath = next.getEntityPath(); logger.info("linkName[{}] entityPath[{}]. Setting next AMQP receive link.", linkName, entityPath); final AmqpReceiveLink oldChannel; final Disposable oldSubscription; synchronized (lock) { oldChannel = currentLink; oldSubscription = currentLinkSubscriptions; currentLink = next; linkCreditsAdded.set(true); next.addCredits(prefetch); next.setEmptyCreditListener(this::getCreditsToAdd); currentLinkSubscriptions = Disposables.composite( next.getEndpointStates().subscribe( state -> { if (state == AmqpEndpointState.ACTIVE) { logger.info("Link {} is now active with {} credits.", linkName, next.getCredits()); retryAttempts.set(0); } }, error -> { if (error instanceof AmqpException) { AmqpException amqpException = (AmqpException) error; if (amqpException.getErrorCondition() == AmqpErrorCondition.LINK_STOLEN && amqpException.getContext() != null && amqpException.getContext() instanceof LinkErrorContext) { LinkErrorContext errorContext = (LinkErrorContext) amqpException.getContext(); if (currentLink != null && !currentLink.getLinkName().equals(errorContext.getTrackingId())) { logger.info("EntityPath[{}]: Link lost signal received for a link " + "that is not current. Ignoring the error. Current link {}, link lost {}", entityPath, linkName, errorContext.getTrackingId()); return; } } } currentLink = null; logger.warning("linkName[{}] entityPath[{}]. Error occurred in link.", linkName, entityPath); onError(error); }, () -> { if (parentConnection.isDisposed() || isTerminated() || UPSTREAM.get(this) == Operators.cancelledSubscription()) { logger.info("Terminal state reached. Disposing of link processor."); dispose(); } else { logger.info("Receive link endpoint states are closed. Requesting another."); final AmqpReceiveLink existing = currentLink; currentLink = null; if (existing != null) { existing.dispose(); } requestUpstream(); } }), next.receive().subscribe(message -> { messageQueue.add(message); drain(); })); } if (oldChannel != null) { oldChannel.dispose(); } if (oldSubscription != null) { oldSubscription.dispose(); } }
class AmqpReceiveLinkProcessor extends FluxProcessor<AmqpReceiveLink, Message> implements Subscription { private final ClientLogger logger = new ClientLogger(AmqpReceiveLinkProcessor.class); private final Object lock = new Object(); private final AtomicBoolean isTerminated = new AtomicBoolean(); private final AtomicInteger retryAttempts = new AtomicInteger(); private final Deque<Message> messageQueue = new ConcurrentLinkedDeque<>(); private final AtomicBoolean linkCreditsAdded = new AtomicBoolean(); private final AtomicReference<CoreSubscriber<? super Message>> downstream = new AtomicReference<>(); private final AtomicInteger wip = new AtomicInteger(); private final int prefetch; private final AmqpRetryPolicy retryPolicy; private final Disposable parentConnection; private volatile Throwable lastError; private volatile boolean isCancelled; private volatile AmqpReceiveLink currentLink; private volatile Disposable currentLinkSubscriptions; private volatile Disposable retrySubscription; private volatile Subscription upstream; private static final AtomicReferenceFieldUpdater<AmqpReceiveLinkProcessor, Subscription> UPSTREAM = AtomicReferenceFieldUpdater.newUpdater(AmqpReceiveLinkProcessor.class, Subscription.class, "upstream"); private volatile long requested; private static final AtomicLongFieldUpdater<AmqpReceiveLinkProcessor> REQUESTED = AtomicLongFieldUpdater.newUpdater(AmqpReceiveLinkProcessor.class, "requested"); /** * Creates an instance of {@link AmqpReceiveLinkProcessor}. * * @param prefetch The number if messages to initially fetch. * @param retryPolicy Retry policy to apply when fetching a new AMQP channel. * @param parentConnection Represents the parent connection. * * @throws NullPointerException if {@code retryPolicy} is null. * @throws IllegalArgumentException if {@code prefetch} is less than 0. */ public AmqpReceiveLinkProcessor(int prefetch, AmqpRetryPolicy retryPolicy, Disposable parentConnection) { this.retryPolicy = Objects.requireNonNull(retryPolicy, "'retryPolicy' cannot be null."); this.parentConnection = Objects.requireNonNull(parentConnection, "'parentConnection' cannot be null."); if (prefetch < 0) { throw logger.logExceptionAsError(new IllegalArgumentException("'prefetch' cannot be less than 0.")); } this.prefetch = prefetch; } /** * Gets the error associated with this processor. * * @return Error associated with this processor. {@code null} if there is no error. */ @Override public Throwable getError() { return lastError; } /** * Gets whether or not the processor is terminated. * * @return {@code true} if the processor has terminated; false otherwise. */ @Override public boolean isTerminated() { return isTerminated.get() || isCancelled; } /** * When a subscription is obtained from upstream publisher. * * @param subscription Subscription to upstream publisher. */ @Override public void onSubscribe(Subscription subscription) { Objects.requireNonNull(subscription, "'subscription' cannot be null"); logger.info("Setting new subscription for receive link processor"); if (!Operators.setOnce(UPSTREAM, this, subscription)) { throw logger.logExceptionAsError(new IllegalStateException("Cannot set upstream twice.")); } requestUpstream(); } @Override public int getPrefetch() { return prefetch; } /** * When the next AMQP link is fetched. * * @param next The next AMQP receive link. */ @Override /** * Sets up the downstream subscriber. * * @param actual The downstream subscriber. * * @throws IllegalStateException if there is already a downstream subscriber. */ @Override public void subscribe(CoreSubscriber<? super Message> actual) { Objects.requireNonNull(actual, "'actual' cannot be null."); final boolean terminateSubscriber = isTerminated() || (currentLink == null && upstream == Operators.cancelledSubscription()); if (isTerminated()) { final AmqpReceiveLink link = currentLink; final String linkName = link != null ? link.getLinkName() : "n/a"; final String entityPath = link != null ? link.getEntityPath() : "n/a"; logger.info("linkName[{}] entityPath[{}]. AmqpReceiveLink is already terminated.", linkName, entityPath); } else if (currentLink == null && upstream == Operators.cancelledSubscription()) { logger.info("There is no current link and upstream is terminated."); } if (terminateSubscriber) { actual.onSubscribe(Operators.emptySubscription()); if (hasError()) { actual.onError(lastError); } else { actual.onComplete(); } return; } if (downstream.compareAndSet(null, actual)) { actual.onSubscribe(this); drain(); } else { Operators.error(actual, logger.logExceptionAsError(new IllegalStateException( "There is already one downstream subscriber.'"))); } } /** * When an error occurs from the upstream publisher. If the {@code throwable} is a transient failure, another AMQP * element is requested if the {@link AmqpRetryPolicy} allows. Otherwise, the processor closes. * * @param throwable Error that occurred in upstream publisher. */ @Override public void onError(Throwable throwable) { Objects.requireNonNull(throwable, "'throwable' is required."); logger.info("Error on receive link {}", currentLink, throwable); if (isTerminated() || isCancelled) { logger.info("AmqpReceiveLinkProcessor is terminated. Cannot process another error.", throwable); Operators.onErrorDropped(throwable, currentContext()); return; } if (parentConnection.isDisposed()) { logger.info("Parent connection is disposed. Not reopening on error."); } lastError = throwable; isTerminated.set(true); final CoreSubscriber<? super Message> subscriber = downstream.get(); if (subscriber != null) { subscriber.onError(throwable); } onDispose(); } /** * When the upstream publisher has no more items to emit. */ @Override public void onComplete() { logger.info("Receive link completed {}", currentLink); UPSTREAM.set(this, Operators.cancelledSubscription()); } @Override public void dispose() { logger.info("Disposing receive link {}", currentLink); if (isTerminated.getAndSet(true)) { return; } drain(); onDispose(); } /** * When downstream subscriber makes a back-pressure request. */ @Override public void request(long request) { if (!Operators.validate(request)) { logger.warning("Invalid request: {}", request); return; } Operators.addCap(REQUESTED, this, request); final AmqpReceiveLink link = currentLink; if (link != null && !linkCreditsAdded.getAndSet(true)) { int credits = getCreditsToAdd(); logger.verbose("Link credits not yet added. Adding: {}", credits); link.addCredits(credits); } drain(); } /** * When downstream subscriber cancels their subscription. */ @Override public void cancel() { if (isCancelled) { return; } isCancelled = true; drain(); } /** * Requests another receive link from upstream. */ private void requestUpstream() { if (isTerminated()) { logger.info("Processor is terminated. Not requesting another link."); return; } else if (UPSTREAM.get(this) == null) { logger.info("There is no upstream. Not requesting another link."); return; } else if (UPSTREAM.get(this) == Operators.cancelledSubscription()) { logger.info("Upstream is cancelled or complete. Not requesting another link."); return; } synchronized (lock) { if (currentLink != null) { logger.info("Current link exists. Not requesting another link."); return; } } logger.info("Requesting a new AmqpReceiveLink from upstream."); UPSTREAM.get(this).request(1L); } private void onDispose() { if (retrySubscription != null && !retrySubscription.isDisposed()) { retrySubscription.dispose(); } if (currentLink != null) { currentLink.dispose(); } currentLink = null; if (currentLinkSubscriptions != null) { currentLinkSubscriptions.dispose(); } Operators.onDiscardQueueWithClear(messageQueue, currentContext(), null); } private void drain() { if (!wip.compareAndSet(0, 1)) { return; } try { drainQueue(); } finally { if (wip.decrementAndGet() != 0) { logger.warning("There is another worker in drainLoop. But there should only be 1 worker."); } } } private void drainQueue() { final CoreSubscriber<? super Message> subscriber = downstream.get(); if (subscriber == null || checkAndSetTerminated()) { return; } long numberRequested = requested; boolean isEmpty = messageQueue.isEmpty(); while (numberRequested != 0L && !isEmpty) { if (checkAndSetTerminated()) { break; } long numberEmitted = 0L; while (numberRequested != numberEmitted) { if (isEmpty && checkAndSetTerminated()) { break; } Message message = messageQueue.poll(); if (message == null) { break; } if (isCancelled) { Operators.onDiscard(message, subscriber.currentContext()); Operators.onDiscardQueueWithClear(messageQueue, subscriber.currentContext(), null); return; } try { subscriber.onNext(message); } catch (Exception e) { logger.error("Exception occurred while handling downstream onNext operation.", e); throw logger.logExceptionAsError(Exceptions.propagate( Operators.onOperatorError(upstream, e, message, subscriber.currentContext()))); } numberEmitted++; isEmpty = messageQueue.isEmpty(); } if (requested != Long.MAX_VALUE) { numberRequested = REQUESTED.addAndGet(this, -numberEmitted); } } } private boolean checkAndSetTerminated() { if (!isTerminated()) { return false; } final CoreSubscriber<? super Message> subscriber = downstream.get(); final Throwable error = lastError; if (error != null) { subscriber.onError(error); } else { subscriber.onComplete(); } if (currentLink != null) { currentLink.dispose(); } messageQueue.clear(); return true; } private int getCreditsToAdd() { final CoreSubscriber<? super Message> subscriber = downstream.get(); final long r = requested; if (subscriber == null || r == 0) { logger.verbose("Not adding credits. No downstream subscribers or items requested."); linkCreditsAdded.set(false); return 0; } linkCreditsAdded.set(true); return r == Long.MAX_VALUE ? 1 : Long.valueOf(r).intValue(); } }
class AmqpReceiveLinkProcessor extends FluxProcessor<AmqpReceiveLink, Message> implements Subscription { private final ClientLogger logger = new ClientLogger(AmqpReceiveLinkProcessor.class); private final Object lock = new Object(); private final AtomicBoolean isTerminated = new AtomicBoolean(); private final AtomicInteger retryAttempts = new AtomicInteger(); private final Deque<Message> messageQueue = new ConcurrentLinkedDeque<>(); private final AtomicBoolean linkCreditsAdded = new AtomicBoolean(); private final AtomicReference<CoreSubscriber<? super Message>> downstream = new AtomicReference<>(); private final AtomicInteger wip = new AtomicInteger(); private final int prefetch; private final AmqpRetryPolicy retryPolicy; private final Disposable parentConnection; private volatile Throwable lastError; private volatile boolean isCancelled; private volatile AmqpReceiveLink currentLink; private volatile Disposable currentLinkSubscriptions; private volatile Disposable retrySubscription; private volatile Subscription upstream; private static final AtomicReferenceFieldUpdater<AmqpReceiveLinkProcessor, Subscription> UPSTREAM = AtomicReferenceFieldUpdater.newUpdater(AmqpReceiveLinkProcessor.class, Subscription.class, "upstream"); private volatile long requested; private static final AtomicLongFieldUpdater<AmqpReceiveLinkProcessor> REQUESTED = AtomicLongFieldUpdater.newUpdater(AmqpReceiveLinkProcessor.class, "requested"); /** * Creates an instance of {@link AmqpReceiveLinkProcessor}. * * @param prefetch The number if messages to initially fetch. * @param retryPolicy Retry policy to apply when fetching a new AMQP channel. * @param parentConnection Represents the parent connection. * * @throws NullPointerException if {@code retryPolicy} is null. * @throws IllegalArgumentException if {@code prefetch} is less than 0. */ public AmqpReceiveLinkProcessor(int prefetch, AmqpRetryPolicy retryPolicy, Disposable parentConnection) { this.retryPolicy = Objects.requireNonNull(retryPolicy, "'retryPolicy' cannot be null."); this.parentConnection = Objects.requireNonNull(parentConnection, "'parentConnection' cannot be null."); if (prefetch < 0) { throw logger.logExceptionAsError(new IllegalArgumentException("'prefetch' cannot be less than 0.")); } this.prefetch = prefetch; } /** * Gets the error associated with this processor. * * @return Error associated with this processor. {@code null} if there is no error. */ @Override public Throwable getError() { return lastError; } /** * Gets whether or not the processor is terminated. * * @return {@code true} if the processor has terminated; false otherwise. */ @Override public boolean isTerminated() { return isTerminated.get() || isCancelled; } /** * When a subscription is obtained from upstream publisher. * * @param subscription Subscription to upstream publisher. */ @Override public void onSubscribe(Subscription subscription) { Objects.requireNonNull(subscription, "'subscription' cannot be null"); logger.info("Setting new subscription for receive link processor"); if (!Operators.setOnce(UPSTREAM, this, subscription)) { throw logger.logExceptionAsError(new IllegalStateException("Cannot set upstream twice.")); } requestUpstream(); } @Override public int getPrefetch() { return prefetch; } /** * When the next AMQP link is fetched. * * @param next The next AMQP receive link. */ @Override /** * Sets up the downstream subscriber. * * @param actual The downstream subscriber. * * @throws IllegalStateException if there is already a downstream subscriber. */ @Override public void subscribe(CoreSubscriber<? super Message> actual) { Objects.requireNonNull(actual, "'actual' cannot be null."); final boolean terminateSubscriber = isTerminated() || (currentLink == null && upstream == Operators.cancelledSubscription()); if (isTerminated()) { final AmqpReceiveLink link = currentLink; final String linkName = link != null ? link.getLinkName() : "n/a"; final String entityPath = link != null ? link.getEntityPath() : "n/a"; logger.info("linkName[{}] entityPath[{}]. AmqpReceiveLink is already terminated.", linkName, entityPath); } else if (currentLink == null && upstream == Operators.cancelledSubscription()) { logger.info("There is no current link and upstream is terminated."); } if (terminateSubscriber) { actual.onSubscribe(Operators.emptySubscription()); if (hasError()) { actual.onError(lastError); } else { actual.onComplete(); } return; } if (downstream.compareAndSet(null, actual)) { actual.onSubscribe(this); drain(); } else { Operators.error(actual, logger.logExceptionAsError(new IllegalStateException( "There is already one downstream subscriber.'"))); } } /** * When an error occurs from the upstream publisher. If the {@code throwable} is a transient failure, another AMQP * element is requested if the {@link AmqpRetryPolicy} allows. Otherwise, the processor closes. * * @param throwable Error that occurred in upstream publisher. */ @Override public void onError(Throwable throwable) { Objects.requireNonNull(throwable, "'throwable' is required."); logger.info("Error on receive link {}", currentLink, throwable); if (isTerminated() || isCancelled) { logger.info("AmqpReceiveLinkProcessor is terminated. Cannot process another error.", throwable); Operators.onErrorDropped(throwable, currentContext()); return; } if (parentConnection.isDisposed()) { logger.info("Parent connection is disposed. Not reopening on error."); } lastError = throwable; isTerminated.set(true); final CoreSubscriber<? super Message> subscriber = downstream.get(); if (subscriber != null) { subscriber.onError(throwable); } onDispose(); } /** * When the upstream publisher has no more items to emit. */ @Override public void onComplete() { logger.info("Receive link completed {}", currentLink); UPSTREAM.set(this, Operators.cancelledSubscription()); } @Override public void dispose() { logger.info("Disposing receive link {}", currentLink); if (isTerminated.getAndSet(true)) { return; } drain(); onDispose(); } /** * When downstream subscriber makes a back-pressure request. */ @Override public void request(long request) { if (!Operators.validate(request)) { logger.warning("Invalid request: {}", request); return; } Operators.addCap(REQUESTED, this, request); final AmqpReceiveLink link = currentLink; if (link != null && !linkCreditsAdded.getAndSet(true)) { int credits = getCreditsToAdd(); logger.verbose("Link credits not yet added. Adding: {}", credits); link.addCredits(credits); } drain(); } /** * When downstream subscriber cancels their subscription. */ @Override public void cancel() { if (isCancelled) { return; } isCancelled = true; drain(); } /** * Requests another receive link from upstream. */ private void requestUpstream() { if (isTerminated()) { logger.info("Processor is terminated. Not requesting another link."); return; } else if (UPSTREAM.get(this) == null) { logger.info("There is no upstream. Not requesting another link."); return; } else if (UPSTREAM.get(this) == Operators.cancelledSubscription()) { logger.info("Upstream is cancelled or complete. Not requesting another link."); return; } synchronized (lock) { if (currentLink != null) { logger.info("Current link exists. Not requesting another link."); return; } } logger.info("Requesting a new AmqpReceiveLink from upstream."); UPSTREAM.get(this).request(1L); } private void onDispose() { if (retrySubscription != null && !retrySubscription.isDisposed()) { retrySubscription.dispose(); } if (currentLink != null) { currentLink.dispose(); } currentLink = null; if (currentLinkSubscriptions != null) { currentLinkSubscriptions.dispose(); } Operators.onDiscardQueueWithClear(messageQueue, currentContext(), null); } private void drain() { if (!wip.compareAndSet(0, 1)) { return; } try { drainQueue(); } finally { if (wip.decrementAndGet() != 0) { logger.warning("There is another worker in drainLoop. But there should only be 1 worker."); } } } private void drainQueue() { final CoreSubscriber<? super Message> subscriber = downstream.get(); if (subscriber == null || checkAndSetTerminated()) { return; } long numberRequested = requested; boolean isEmpty = messageQueue.isEmpty(); while (numberRequested != 0L && !isEmpty) { if (checkAndSetTerminated()) { break; } long numberEmitted = 0L; while (numberRequested != numberEmitted) { if (isEmpty && checkAndSetTerminated()) { break; } Message message = messageQueue.poll(); if (message == null) { break; } if (isCancelled) { Operators.onDiscard(message, subscriber.currentContext()); Operators.onDiscardQueueWithClear(messageQueue, subscriber.currentContext(), null); return; } try { subscriber.onNext(message); } catch (Exception e) { logger.error("Exception occurred while handling downstream onNext operation.", e); throw logger.logExceptionAsError(Exceptions.propagate( Operators.onOperatorError(upstream, e, message, subscriber.currentContext()))); } numberEmitted++; isEmpty = messageQueue.isEmpty(); } if (requested != Long.MAX_VALUE) { numberRequested = REQUESTED.addAndGet(this, -numberEmitted); } } } private boolean checkAndSetTerminated() { if (!isTerminated()) { return false; } final CoreSubscriber<? super Message> subscriber = downstream.get(); final Throwable error = lastError; if (error != null) { subscriber.onError(error); } else { subscriber.onComplete(); } if (currentLink != null) { currentLink.dispose(); } messageQueue.clear(); return true; } private int getCreditsToAdd() { final CoreSubscriber<? super Message> subscriber = downstream.get(); final long r = requested; if (subscriber == null || r == 0) { logger.verbose("Not adding credits. No downstream subscribers or items requested."); linkCreditsAdded.set(false); return 0; } linkCreditsAdded.set(true); return r == Long.MAX_VALUE ? 1 : Long.valueOf(r).intValue(); } }
I thought the trackingId was an autogenerated value?
public void onNext(AmqpReceiveLink next) { Objects.requireNonNull(next, "'next' cannot be null."); if (isTerminated()) { logger.warning("linkName[{}] entityPath[{}]. Got another link when we have already terminated processor.", next.getLinkName(), next.getEntityPath()); Operators.onNextDropped(next, currentContext()); return; } final String linkName = next.getLinkName(); final String entityPath = next.getEntityPath(); logger.info("linkName[{}] entityPath[{}]. Setting next AMQP receive link.", linkName, entityPath); final AmqpReceiveLink oldChannel; final Disposable oldSubscription; synchronized (lock) { oldChannel = currentLink; oldSubscription = currentLinkSubscriptions; currentLink = next; linkCreditsAdded.set(true); next.addCredits(prefetch); next.setEmptyCreditListener(this::getCreditsToAdd); currentLinkSubscriptions = Disposables.composite( next.getEndpointStates().subscribe( state -> { if (state == AmqpEndpointState.ACTIVE) { logger.info("Link {} is now active with {} credits.", linkName, next.getCredits()); retryAttempts.set(0); } }, error -> { if (error instanceof AmqpException) { AmqpException amqpException = (AmqpException) error; if (amqpException.getErrorCondition() == AmqpErrorCondition.LINK_STOLEN && amqpException.getContext() != null && amqpException.getContext() instanceof LinkErrorContext) { LinkErrorContext errorContext = (LinkErrorContext) amqpException.getContext(); if (currentLink != null && !currentLink.getLinkName().equals(errorContext.getTrackingId())) { logger.info("EntityPath[{}]: Link lost signal received for a link " + "that is not current. Ignoring the error. Current link {}, link lost {}", entityPath, linkName, errorContext.getTrackingId()); return; } } } currentLink = null; logger.warning("linkName[{}] entityPath[{}]. Error occurred in link.", linkName, entityPath); onError(error); }, () -> { if (parentConnection.isDisposed() || isTerminated() || UPSTREAM.get(this) == Operators.cancelledSubscription()) { logger.info("Terminal state reached. Disposing of link processor."); dispose(); } else { logger.info("Receive link endpoint states are closed. Requesting another."); final AmqpReceiveLink existing = currentLink; currentLink = null; if (existing != null) { existing.dispose(); } requestUpstream(); } }), next.receive().subscribe(message -> { messageQueue.add(message); drain(); })); } if (oldChannel != null) { oldChannel.dispose(); } if (oldSubscription != null) { oldSubscription.dispose(); } }
&& !currentLink.getLinkName().equals(errorContext.getTrackingId())) {
public void onNext(AmqpReceiveLink next) { Objects.requireNonNull(next, "'next' cannot be null."); if (isTerminated()) { logger.warning("linkName[{}] entityPath[{}]. Got another link when we have already terminated processor.", next.getLinkName(), next.getEntityPath()); Operators.onNextDropped(next, currentContext()); return; } final String linkName = next.getLinkName(); final String entityPath = next.getEntityPath(); logger.info("linkName[{}] entityPath[{}]. Setting next AMQP receive link.", linkName, entityPath); final AmqpReceiveLink oldChannel; final Disposable oldSubscription; synchronized (lock) { oldChannel = currentLink; oldSubscription = currentLinkSubscriptions; currentLink = next; linkCreditsAdded.set(true); next.addCredits(prefetch); next.setEmptyCreditListener(this::getCreditsToAdd); currentLinkSubscriptions = Disposables.composite( next.getEndpointStates().subscribe( state -> { if (state == AmqpEndpointState.ACTIVE) { logger.info("Link {} is now active with {} credits.", linkName, next.getCredits()); retryAttempts.set(0); } }, error -> { if (error instanceof AmqpException) { AmqpException amqpException = (AmqpException) error; if (amqpException.getErrorCondition() == AmqpErrorCondition.LINK_STOLEN && amqpException.getContext() != null && amqpException.getContext() instanceof LinkErrorContext) { LinkErrorContext errorContext = (LinkErrorContext) amqpException.getContext(); if (currentLink != null && !currentLink.getLinkName().equals(errorContext.getTrackingId())) { logger.info("EntityPath[{}]: Link lost signal received for a link " + "that is not current. Ignoring the error. Current link {}, link lost {}", entityPath, linkName, errorContext.getTrackingId()); return; } } } currentLink = null; logger.warning("linkName[{}] entityPath[{}]. Error occurred in link.", linkName, entityPath); onError(error); }, () -> { if (parentConnection.isDisposed() || isTerminated() || UPSTREAM.get(this) == Operators.cancelledSubscription()) { logger.info("Terminal state reached. Disposing of link processor."); dispose(); } else { logger.info("Receive link endpoint states are closed. Requesting another."); final AmqpReceiveLink existing = currentLink; currentLink = null; if (existing != null) { existing.dispose(); } requestUpstream(); } }), next.receive().subscribe(message -> { messageQueue.add(message); drain(); })); } if (oldChannel != null) { oldChannel.dispose(); } if (oldSubscription != null) { oldSubscription.dispose(); } }
class AmqpReceiveLinkProcessor extends FluxProcessor<AmqpReceiveLink, Message> implements Subscription { private final ClientLogger logger = new ClientLogger(AmqpReceiveLinkProcessor.class); private final Object lock = new Object(); private final AtomicBoolean isTerminated = new AtomicBoolean(); private final AtomicInteger retryAttempts = new AtomicInteger(); private final Deque<Message> messageQueue = new ConcurrentLinkedDeque<>(); private final AtomicBoolean linkCreditsAdded = new AtomicBoolean(); private final AtomicReference<CoreSubscriber<? super Message>> downstream = new AtomicReference<>(); private final AtomicInteger wip = new AtomicInteger(); private final int prefetch; private final AmqpRetryPolicy retryPolicy; private final Disposable parentConnection; private volatile Throwable lastError; private volatile boolean isCancelled; private volatile AmqpReceiveLink currentLink; private volatile Disposable currentLinkSubscriptions; private volatile Disposable retrySubscription; private volatile Subscription upstream; private static final AtomicReferenceFieldUpdater<AmqpReceiveLinkProcessor, Subscription> UPSTREAM = AtomicReferenceFieldUpdater.newUpdater(AmqpReceiveLinkProcessor.class, Subscription.class, "upstream"); private volatile long requested; private static final AtomicLongFieldUpdater<AmqpReceiveLinkProcessor> REQUESTED = AtomicLongFieldUpdater.newUpdater(AmqpReceiveLinkProcessor.class, "requested"); /** * Creates an instance of {@link AmqpReceiveLinkProcessor}. * * @param prefetch The number if messages to initially fetch. * @param retryPolicy Retry policy to apply when fetching a new AMQP channel. * @param parentConnection Represents the parent connection. * * @throws NullPointerException if {@code retryPolicy} is null. * @throws IllegalArgumentException if {@code prefetch} is less than 0. */ public AmqpReceiveLinkProcessor(int prefetch, AmqpRetryPolicy retryPolicy, Disposable parentConnection) { this.retryPolicy = Objects.requireNonNull(retryPolicy, "'retryPolicy' cannot be null."); this.parentConnection = Objects.requireNonNull(parentConnection, "'parentConnection' cannot be null."); if (prefetch < 0) { throw logger.logExceptionAsError(new IllegalArgumentException("'prefetch' cannot be less than 0.")); } this.prefetch = prefetch; } /** * Gets the error associated with this processor. * * @return Error associated with this processor. {@code null} if there is no error. */ @Override public Throwable getError() { return lastError; } /** * Gets whether or not the processor is terminated. * * @return {@code true} if the processor has terminated; false otherwise. */ @Override public boolean isTerminated() { return isTerminated.get() || isCancelled; } /** * When a subscription is obtained from upstream publisher. * * @param subscription Subscription to upstream publisher. */ @Override public void onSubscribe(Subscription subscription) { Objects.requireNonNull(subscription, "'subscription' cannot be null"); logger.info("Setting new subscription for receive link processor"); if (!Operators.setOnce(UPSTREAM, this, subscription)) { throw logger.logExceptionAsError(new IllegalStateException("Cannot set upstream twice.")); } requestUpstream(); } @Override public int getPrefetch() { return prefetch; } /** * When the next AMQP link is fetched. * * @param next The next AMQP receive link. */ @Override /** * Sets up the downstream subscriber. * * @param actual The downstream subscriber. * * @throws IllegalStateException if there is already a downstream subscriber. */ @Override public void subscribe(CoreSubscriber<? super Message> actual) { Objects.requireNonNull(actual, "'actual' cannot be null."); final boolean terminateSubscriber = isTerminated() || (currentLink == null && upstream == Operators.cancelledSubscription()); if (isTerminated()) { final AmqpReceiveLink link = currentLink; final String linkName = link != null ? link.getLinkName() : "n/a"; final String entityPath = link != null ? link.getEntityPath() : "n/a"; logger.info("linkName[{}] entityPath[{}]. AmqpReceiveLink is already terminated.", linkName, entityPath); } else if (currentLink == null && upstream == Operators.cancelledSubscription()) { logger.info("There is no current link and upstream is terminated."); } if (terminateSubscriber) { actual.onSubscribe(Operators.emptySubscription()); if (hasError()) { actual.onError(lastError); } else { actual.onComplete(); } return; } if (downstream.compareAndSet(null, actual)) { actual.onSubscribe(this); drain(); } else { Operators.error(actual, logger.logExceptionAsError(new IllegalStateException( "There is already one downstream subscriber.'"))); } } /** * When an error occurs from the upstream publisher. If the {@code throwable} is a transient failure, another AMQP * element is requested if the {@link AmqpRetryPolicy} allows. Otherwise, the processor closes. * * @param throwable Error that occurred in upstream publisher. */ @Override public void onError(Throwable throwable) { Objects.requireNonNull(throwable, "'throwable' is required."); logger.info("Error on receive link {}", currentLink, throwable); if (isTerminated() || isCancelled) { logger.info("AmqpReceiveLinkProcessor is terminated. Cannot process another error.", throwable); Operators.onErrorDropped(throwable, currentContext()); return; } if (parentConnection.isDisposed()) { logger.info("Parent connection is disposed. Not reopening on error."); } lastError = throwable; isTerminated.set(true); final CoreSubscriber<? super Message> subscriber = downstream.get(); if (subscriber != null) { subscriber.onError(throwable); } onDispose(); } /** * When the upstream publisher has no more items to emit. */ @Override public void onComplete() { logger.info("Receive link completed {}", currentLink); UPSTREAM.set(this, Operators.cancelledSubscription()); } @Override public void dispose() { logger.info("Disposing receive link {}", currentLink); if (isTerminated.getAndSet(true)) { return; } drain(); onDispose(); } /** * When downstream subscriber makes a back-pressure request. */ @Override public void request(long request) { if (!Operators.validate(request)) { logger.warning("Invalid request: {}", request); return; } Operators.addCap(REQUESTED, this, request); final AmqpReceiveLink link = currentLink; if (link != null && !linkCreditsAdded.getAndSet(true)) { int credits = getCreditsToAdd(); logger.verbose("Link credits not yet added. Adding: {}", credits); link.addCredits(credits); } drain(); } /** * When downstream subscriber cancels their subscription. */ @Override public void cancel() { if (isCancelled) { return; } isCancelled = true; drain(); } /** * Requests another receive link from upstream. */ private void requestUpstream() { if (isTerminated()) { logger.info("Processor is terminated. Not requesting another link."); return; } else if (UPSTREAM.get(this) == null) { logger.info("There is no upstream. Not requesting another link."); return; } else if (UPSTREAM.get(this) == Operators.cancelledSubscription()) { logger.info("Upstream is cancelled or complete. Not requesting another link."); return; } synchronized (lock) { if (currentLink != null) { logger.info("Current link exists. Not requesting another link."); return; } } logger.info("Requesting a new AmqpReceiveLink from upstream."); UPSTREAM.get(this).request(1L); } private void onDispose() { if (retrySubscription != null && !retrySubscription.isDisposed()) { retrySubscription.dispose(); } if (currentLink != null) { currentLink.dispose(); } currentLink = null; if (currentLinkSubscriptions != null) { currentLinkSubscriptions.dispose(); } Operators.onDiscardQueueWithClear(messageQueue, currentContext(), null); } private void drain() { if (!wip.compareAndSet(0, 1)) { return; } try { drainQueue(); } finally { if (wip.decrementAndGet() != 0) { logger.warning("There is another worker in drainLoop. But there should only be 1 worker."); } } } private void drainQueue() { final CoreSubscriber<? super Message> subscriber = downstream.get(); if (subscriber == null || checkAndSetTerminated()) { return; } long numberRequested = requested; boolean isEmpty = messageQueue.isEmpty(); while (numberRequested != 0L && !isEmpty) { if (checkAndSetTerminated()) { break; } long numberEmitted = 0L; while (numberRequested != numberEmitted) { if (isEmpty && checkAndSetTerminated()) { break; } Message message = messageQueue.poll(); if (message == null) { break; } if (isCancelled) { Operators.onDiscard(message, subscriber.currentContext()); Operators.onDiscardQueueWithClear(messageQueue, subscriber.currentContext(), null); return; } try { subscriber.onNext(message); } catch (Exception e) { logger.error("Exception occurred while handling downstream onNext operation.", e); throw logger.logExceptionAsError(Exceptions.propagate( Operators.onOperatorError(upstream, e, message, subscriber.currentContext()))); } numberEmitted++; isEmpty = messageQueue.isEmpty(); } if (requested != Long.MAX_VALUE) { numberRequested = REQUESTED.addAndGet(this, -numberEmitted); } } } private boolean checkAndSetTerminated() { if (!isTerminated()) { return false; } final CoreSubscriber<? super Message> subscriber = downstream.get(); final Throwable error = lastError; if (error != null) { subscriber.onError(error); } else { subscriber.onComplete(); } if (currentLink != null) { currentLink.dispose(); } messageQueue.clear(); return true; } private int getCreditsToAdd() { final CoreSubscriber<? super Message> subscriber = downstream.get(); final long r = requested; if (subscriber == null || r == 0) { logger.verbose("Not adding credits. No downstream subscribers or items requested."); linkCreditsAdded.set(false); return 0; } linkCreditsAdded.set(true); return r == Long.MAX_VALUE ? 1 : Long.valueOf(r).intValue(); } }
class AmqpReceiveLinkProcessor extends FluxProcessor<AmqpReceiveLink, Message> implements Subscription { private final ClientLogger logger = new ClientLogger(AmqpReceiveLinkProcessor.class); private final Object lock = new Object(); private final AtomicBoolean isTerminated = new AtomicBoolean(); private final AtomicInteger retryAttempts = new AtomicInteger(); private final Deque<Message> messageQueue = new ConcurrentLinkedDeque<>(); private final AtomicBoolean linkCreditsAdded = new AtomicBoolean(); private final AtomicReference<CoreSubscriber<? super Message>> downstream = new AtomicReference<>(); private final AtomicInteger wip = new AtomicInteger(); private final int prefetch; private final AmqpRetryPolicy retryPolicy; private final Disposable parentConnection; private volatile Throwable lastError; private volatile boolean isCancelled; private volatile AmqpReceiveLink currentLink; private volatile Disposable currentLinkSubscriptions; private volatile Disposable retrySubscription; private volatile Subscription upstream; private static final AtomicReferenceFieldUpdater<AmqpReceiveLinkProcessor, Subscription> UPSTREAM = AtomicReferenceFieldUpdater.newUpdater(AmqpReceiveLinkProcessor.class, Subscription.class, "upstream"); private volatile long requested; private static final AtomicLongFieldUpdater<AmqpReceiveLinkProcessor> REQUESTED = AtomicLongFieldUpdater.newUpdater(AmqpReceiveLinkProcessor.class, "requested"); /** * Creates an instance of {@link AmqpReceiveLinkProcessor}. * * @param prefetch The number if messages to initially fetch. * @param retryPolicy Retry policy to apply when fetching a new AMQP channel. * @param parentConnection Represents the parent connection. * * @throws NullPointerException if {@code retryPolicy} is null. * @throws IllegalArgumentException if {@code prefetch} is less than 0. */ public AmqpReceiveLinkProcessor(int prefetch, AmqpRetryPolicy retryPolicy, Disposable parentConnection) { this.retryPolicy = Objects.requireNonNull(retryPolicy, "'retryPolicy' cannot be null."); this.parentConnection = Objects.requireNonNull(parentConnection, "'parentConnection' cannot be null."); if (prefetch < 0) { throw logger.logExceptionAsError(new IllegalArgumentException("'prefetch' cannot be less than 0.")); } this.prefetch = prefetch; } /** * Gets the error associated with this processor. * * @return Error associated with this processor. {@code null} if there is no error. */ @Override public Throwable getError() { return lastError; } /** * Gets whether or not the processor is terminated. * * @return {@code true} if the processor has terminated; false otherwise. */ @Override public boolean isTerminated() { return isTerminated.get() || isCancelled; } /** * When a subscription is obtained from upstream publisher. * * @param subscription Subscription to upstream publisher. */ @Override public void onSubscribe(Subscription subscription) { Objects.requireNonNull(subscription, "'subscription' cannot be null"); logger.info("Setting new subscription for receive link processor"); if (!Operators.setOnce(UPSTREAM, this, subscription)) { throw logger.logExceptionAsError(new IllegalStateException("Cannot set upstream twice.")); } requestUpstream(); } @Override public int getPrefetch() { return prefetch; } /** * When the next AMQP link is fetched. * * @param next The next AMQP receive link. */ @Override /** * Sets up the downstream subscriber. * * @param actual The downstream subscriber. * * @throws IllegalStateException if there is already a downstream subscriber. */ @Override public void subscribe(CoreSubscriber<? super Message> actual) { Objects.requireNonNull(actual, "'actual' cannot be null."); final boolean terminateSubscriber = isTerminated() || (currentLink == null && upstream == Operators.cancelledSubscription()); if (isTerminated()) { final AmqpReceiveLink link = currentLink; final String linkName = link != null ? link.getLinkName() : "n/a"; final String entityPath = link != null ? link.getEntityPath() : "n/a"; logger.info("linkName[{}] entityPath[{}]. AmqpReceiveLink is already terminated.", linkName, entityPath); } else if (currentLink == null && upstream == Operators.cancelledSubscription()) { logger.info("There is no current link and upstream is terminated."); } if (terminateSubscriber) { actual.onSubscribe(Operators.emptySubscription()); if (hasError()) { actual.onError(lastError); } else { actual.onComplete(); } return; } if (downstream.compareAndSet(null, actual)) { actual.onSubscribe(this); drain(); } else { Operators.error(actual, logger.logExceptionAsError(new IllegalStateException( "There is already one downstream subscriber.'"))); } } /** * When an error occurs from the upstream publisher. If the {@code throwable} is a transient failure, another AMQP * element is requested if the {@link AmqpRetryPolicy} allows. Otherwise, the processor closes. * * @param throwable Error that occurred in upstream publisher. */ @Override public void onError(Throwable throwable) { Objects.requireNonNull(throwable, "'throwable' is required."); logger.info("Error on receive link {}", currentLink, throwable); if (isTerminated() || isCancelled) { logger.info("AmqpReceiveLinkProcessor is terminated. Cannot process another error.", throwable); Operators.onErrorDropped(throwable, currentContext()); return; } if (parentConnection.isDisposed()) { logger.info("Parent connection is disposed. Not reopening on error."); } lastError = throwable; isTerminated.set(true); final CoreSubscriber<? super Message> subscriber = downstream.get(); if (subscriber != null) { subscriber.onError(throwable); } onDispose(); } /** * When the upstream publisher has no more items to emit. */ @Override public void onComplete() { logger.info("Receive link completed {}", currentLink); UPSTREAM.set(this, Operators.cancelledSubscription()); } @Override public void dispose() { logger.info("Disposing receive link {}", currentLink); if (isTerminated.getAndSet(true)) { return; } drain(); onDispose(); } /** * When downstream subscriber makes a back-pressure request. */ @Override public void request(long request) { if (!Operators.validate(request)) { logger.warning("Invalid request: {}", request); return; } Operators.addCap(REQUESTED, this, request); final AmqpReceiveLink link = currentLink; if (link != null && !linkCreditsAdded.getAndSet(true)) { int credits = getCreditsToAdd(); logger.verbose("Link credits not yet added. Adding: {}", credits); link.addCredits(credits); } drain(); } /** * When downstream subscriber cancels their subscription. */ @Override public void cancel() { if (isCancelled) { return; } isCancelled = true; drain(); } /** * Requests another receive link from upstream. */ private void requestUpstream() { if (isTerminated()) { logger.info("Processor is terminated. Not requesting another link."); return; } else if (UPSTREAM.get(this) == null) { logger.info("There is no upstream. Not requesting another link."); return; } else if (UPSTREAM.get(this) == Operators.cancelledSubscription()) { logger.info("Upstream is cancelled or complete. Not requesting another link."); return; } synchronized (lock) { if (currentLink != null) { logger.info("Current link exists. Not requesting another link."); return; } } logger.info("Requesting a new AmqpReceiveLink from upstream."); UPSTREAM.get(this).request(1L); } private void onDispose() { if (retrySubscription != null && !retrySubscription.isDisposed()) { retrySubscription.dispose(); } if (currentLink != null) { currentLink.dispose(); } currentLink = null; if (currentLinkSubscriptions != null) { currentLinkSubscriptions.dispose(); } Operators.onDiscardQueueWithClear(messageQueue, currentContext(), null); } private void drain() { if (!wip.compareAndSet(0, 1)) { return; } try { drainQueue(); } finally { if (wip.decrementAndGet() != 0) { logger.warning("There is another worker in drainLoop. But there should only be 1 worker."); } } } private void drainQueue() { final CoreSubscriber<? super Message> subscriber = downstream.get(); if (subscriber == null || checkAndSetTerminated()) { return; } long numberRequested = requested; boolean isEmpty = messageQueue.isEmpty(); while (numberRequested != 0L && !isEmpty) { if (checkAndSetTerminated()) { break; } long numberEmitted = 0L; while (numberRequested != numberEmitted) { if (isEmpty && checkAndSetTerminated()) { break; } Message message = messageQueue.poll(); if (message == null) { break; } if (isCancelled) { Operators.onDiscard(message, subscriber.currentContext()); Operators.onDiscardQueueWithClear(messageQueue, subscriber.currentContext(), null); return; } try { subscriber.onNext(message); } catch (Exception e) { logger.error("Exception occurred while handling downstream onNext operation.", e); throw logger.logExceptionAsError(Exceptions.propagate( Operators.onOperatorError(upstream, e, message, subscriber.currentContext()))); } numberEmitted++; isEmpty = messageQueue.isEmpty(); } if (requested != Long.MAX_VALUE) { numberRequested = REQUESTED.addAndGet(this, -numberEmitted); } } } private boolean checkAndSetTerminated() { if (!isTerminated()) { return false; } final CoreSubscriber<? super Message> subscriber = downstream.get(); final Throwable error = lastError; if (error != null) { subscriber.onError(error); } else { subscriber.onComplete(); } if (currentLink != null) { currentLink.dispose(); } messageQueue.clear(); return true; } private int getCreditsToAdd() { final CoreSubscriber<? super Message> subscriber = downstream.get(); final long r = requested; if (subscriber == null || r == 0) { logger.verbose("Not adding credits. No downstream subscribers or items requested."); linkCreditsAdded.set(false); return 0; } linkCreditsAdded.set(true); return r == Long.MAX_VALUE ? 1 : Long.valueOf(r).intValue(); } }
When the link is stolen, the `trackingId` in LinkErrorContext is the link name. ```java if (link.getRemoteProperties() != null && link.getRemoteProperties().containsKey(TRACKING_ID_PROPERTY)) { referenceId = link.getRemoteProperties().get(TRACKING_ID_PROPERTY).toString(); } else { referenceId = link.getName(); } return new LinkErrorContext(getHostname(), entityPath, referenceId, link.getCredit()); ```
public void onNext(AmqpReceiveLink next) { Objects.requireNonNull(next, "'next' cannot be null."); if (isTerminated()) { logger.warning("linkName[{}] entityPath[{}]. Got another link when we have already terminated processor.", next.getLinkName(), next.getEntityPath()); Operators.onNextDropped(next, currentContext()); return; } final String linkName = next.getLinkName(); final String entityPath = next.getEntityPath(); logger.info("linkName[{}] entityPath[{}]. Setting next AMQP receive link.", linkName, entityPath); final AmqpReceiveLink oldChannel; final Disposable oldSubscription; synchronized (lock) { oldChannel = currentLink; oldSubscription = currentLinkSubscriptions; currentLink = next; linkCreditsAdded.set(true); next.addCredits(prefetch); next.setEmptyCreditListener(this::getCreditsToAdd); currentLinkSubscriptions = Disposables.composite( next.getEndpointStates().subscribe( state -> { if (state == AmqpEndpointState.ACTIVE) { logger.info("Link {} is now active with {} credits.", linkName, next.getCredits()); retryAttempts.set(0); } }, error -> { if (error instanceof AmqpException) { AmqpException amqpException = (AmqpException) error; if (amqpException.getErrorCondition() == AmqpErrorCondition.LINK_STOLEN && amqpException.getContext() != null && amqpException.getContext() instanceof LinkErrorContext) { LinkErrorContext errorContext = (LinkErrorContext) amqpException.getContext(); if (currentLink != null && !currentLink.getLinkName().equals(errorContext.getTrackingId())) { logger.info("EntityPath[{}]: Link lost signal received for a link " + "that is not current. Ignoring the error. Current link {}, link lost {}", entityPath, linkName, errorContext.getTrackingId()); return; } } } currentLink = null; logger.warning("linkName[{}] entityPath[{}]. Error occurred in link.", linkName, entityPath); onError(error); }, () -> { if (parentConnection.isDisposed() || isTerminated() || UPSTREAM.get(this) == Operators.cancelledSubscription()) { logger.info("Terminal state reached. Disposing of link processor."); dispose(); } else { logger.info("Receive link endpoint states are closed. Requesting another."); final AmqpReceiveLink existing = currentLink; currentLink = null; if (existing != null) { existing.dispose(); } requestUpstream(); } }), next.receive().subscribe(message -> { messageQueue.add(message); drain(); })); } if (oldChannel != null) { oldChannel.dispose(); } if (oldSubscription != null) { oldSubscription.dispose(); } }
&& !currentLink.getLinkName().equals(errorContext.getTrackingId())) {
public void onNext(AmqpReceiveLink next) { Objects.requireNonNull(next, "'next' cannot be null."); if (isTerminated()) { logger.warning("linkName[{}] entityPath[{}]. Got another link when we have already terminated processor.", next.getLinkName(), next.getEntityPath()); Operators.onNextDropped(next, currentContext()); return; } final String linkName = next.getLinkName(); final String entityPath = next.getEntityPath(); logger.info("linkName[{}] entityPath[{}]. Setting next AMQP receive link.", linkName, entityPath); final AmqpReceiveLink oldChannel; final Disposable oldSubscription; synchronized (lock) { oldChannel = currentLink; oldSubscription = currentLinkSubscriptions; currentLink = next; linkCreditsAdded.set(true); next.addCredits(prefetch); next.setEmptyCreditListener(this::getCreditsToAdd); currentLinkSubscriptions = Disposables.composite( next.getEndpointStates().subscribe( state -> { if (state == AmqpEndpointState.ACTIVE) { logger.info("Link {} is now active with {} credits.", linkName, next.getCredits()); retryAttempts.set(0); } }, error -> { if (error instanceof AmqpException) { AmqpException amqpException = (AmqpException) error; if (amqpException.getErrorCondition() == AmqpErrorCondition.LINK_STOLEN && amqpException.getContext() != null && amqpException.getContext() instanceof LinkErrorContext) { LinkErrorContext errorContext = (LinkErrorContext) amqpException.getContext(); if (currentLink != null && !currentLink.getLinkName().equals(errorContext.getTrackingId())) { logger.info("EntityPath[{}]: Link lost signal received for a link " + "that is not current. Ignoring the error. Current link {}, link lost {}", entityPath, linkName, errorContext.getTrackingId()); return; } } } currentLink = null; logger.warning("linkName[{}] entityPath[{}]. Error occurred in link.", linkName, entityPath); onError(error); }, () -> { if (parentConnection.isDisposed() || isTerminated() || UPSTREAM.get(this) == Operators.cancelledSubscription()) { logger.info("Terminal state reached. Disposing of link processor."); dispose(); } else { logger.info("Receive link endpoint states are closed. Requesting another."); final AmqpReceiveLink existing = currentLink; currentLink = null; if (existing != null) { existing.dispose(); } requestUpstream(); } }), next.receive().subscribe(message -> { messageQueue.add(message); drain(); })); } if (oldChannel != null) { oldChannel.dispose(); } if (oldSubscription != null) { oldSubscription.dispose(); } }
class AmqpReceiveLinkProcessor extends FluxProcessor<AmqpReceiveLink, Message> implements Subscription { private final ClientLogger logger = new ClientLogger(AmqpReceiveLinkProcessor.class); private final Object lock = new Object(); private final AtomicBoolean isTerminated = new AtomicBoolean(); private final AtomicInteger retryAttempts = new AtomicInteger(); private final Deque<Message> messageQueue = new ConcurrentLinkedDeque<>(); private final AtomicBoolean linkCreditsAdded = new AtomicBoolean(); private final AtomicReference<CoreSubscriber<? super Message>> downstream = new AtomicReference<>(); private final AtomicInteger wip = new AtomicInteger(); private final int prefetch; private final AmqpRetryPolicy retryPolicy; private final Disposable parentConnection; private volatile Throwable lastError; private volatile boolean isCancelled; private volatile AmqpReceiveLink currentLink; private volatile Disposable currentLinkSubscriptions; private volatile Disposable retrySubscription; private volatile Subscription upstream; private static final AtomicReferenceFieldUpdater<AmqpReceiveLinkProcessor, Subscription> UPSTREAM = AtomicReferenceFieldUpdater.newUpdater(AmqpReceiveLinkProcessor.class, Subscription.class, "upstream"); private volatile long requested; private static final AtomicLongFieldUpdater<AmqpReceiveLinkProcessor> REQUESTED = AtomicLongFieldUpdater.newUpdater(AmqpReceiveLinkProcessor.class, "requested"); /** * Creates an instance of {@link AmqpReceiveLinkProcessor}. * * @param prefetch The number if messages to initially fetch. * @param retryPolicy Retry policy to apply when fetching a new AMQP channel. * @param parentConnection Represents the parent connection. * * @throws NullPointerException if {@code retryPolicy} is null. * @throws IllegalArgumentException if {@code prefetch} is less than 0. */ public AmqpReceiveLinkProcessor(int prefetch, AmqpRetryPolicy retryPolicy, Disposable parentConnection) { this.retryPolicy = Objects.requireNonNull(retryPolicy, "'retryPolicy' cannot be null."); this.parentConnection = Objects.requireNonNull(parentConnection, "'parentConnection' cannot be null."); if (prefetch < 0) { throw logger.logExceptionAsError(new IllegalArgumentException("'prefetch' cannot be less than 0.")); } this.prefetch = prefetch; } /** * Gets the error associated with this processor. * * @return Error associated with this processor. {@code null} if there is no error. */ @Override public Throwable getError() { return lastError; } /** * Gets whether or not the processor is terminated. * * @return {@code true} if the processor has terminated; false otherwise. */ @Override public boolean isTerminated() { return isTerminated.get() || isCancelled; } /** * When a subscription is obtained from upstream publisher. * * @param subscription Subscription to upstream publisher. */ @Override public void onSubscribe(Subscription subscription) { Objects.requireNonNull(subscription, "'subscription' cannot be null"); logger.info("Setting new subscription for receive link processor"); if (!Operators.setOnce(UPSTREAM, this, subscription)) { throw logger.logExceptionAsError(new IllegalStateException("Cannot set upstream twice.")); } requestUpstream(); } @Override public int getPrefetch() { return prefetch; } /** * When the next AMQP link is fetched. * * @param next The next AMQP receive link. */ @Override /** * Sets up the downstream subscriber. * * @param actual The downstream subscriber. * * @throws IllegalStateException if there is already a downstream subscriber. */ @Override public void subscribe(CoreSubscriber<? super Message> actual) { Objects.requireNonNull(actual, "'actual' cannot be null."); final boolean terminateSubscriber = isTerminated() || (currentLink == null && upstream == Operators.cancelledSubscription()); if (isTerminated()) { final AmqpReceiveLink link = currentLink; final String linkName = link != null ? link.getLinkName() : "n/a"; final String entityPath = link != null ? link.getEntityPath() : "n/a"; logger.info("linkName[{}] entityPath[{}]. AmqpReceiveLink is already terminated.", linkName, entityPath); } else if (currentLink == null && upstream == Operators.cancelledSubscription()) { logger.info("There is no current link and upstream is terminated."); } if (terminateSubscriber) { actual.onSubscribe(Operators.emptySubscription()); if (hasError()) { actual.onError(lastError); } else { actual.onComplete(); } return; } if (downstream.compareAndSet(null, actual)) { actual.onSubscribe(this); drain(); } else { Operators.error(actual, logger.logExceptionAsError(new IllegalStateException( "There is already one downstream subscriber.'"))); } } /** * When an error occurs from the upstream publisher. If the {@code throwable} is a transient failure, another AMQP * element is requested if the {@link AmqpRetryPolicy} allows. Otherwise, the processor closes. * * @param throwable Error that occurred in upstream publisher. */ @Override public void onError(Throwable throwable) { Objects.requireNonNull(throwable, "'throwable' is required."); logger.info("Error on receive link {}", currentLink, throwable); if (isTerminated() || isCancelled) { logger.info("AmqpReceiveLinkProcessor is terminated. Cannot process another error.", throwable); Operators.onErrorDropped(throwable, currentContext()); return; } if (parentConnection.isDisposed()) { logger.info("Parent connection is disposed. Not reopening on error."); } lastError = throwable; isTerminated.set(true); final CoreSubscriber<? super Message> subscriber = downstream.get(); if (subscriber != null) { subscriber.onError(throwable); } onDispose(); } /** * When the upstream publisher has no more items to emit. */ @Override public void onComplete() { logger.info("Receive link completed {}", currentLink); UPSTREAM.set(this, Operators.cancelledSubscription()); } @Override public void dispose() { logger.info("Disposing receive link {}", currentLink); if (isTerminated.getAndSet(true)) { return; } drain(); onDispose(); } /** * When downstream subscriber makes a back-pressure request. */ @Override public void request(long request) { if (!Operators.validate(request)) { logger.warning("Invalid request: {}", request); return; } Operators.addCap(REQUESTED, this, request); final AmqpReceiveLink link = currentLink; if (link != null && !linkCreditsAdded.getAndSet(true)) { int credits = getCreditsToAdd(); logger.verbose("Link credits not yet added. Adding: {}", credits); link.addCredits(credits); } drain(); } /** * When downstream subscriber cancels their subscription. */ @Override public void cancel() { if (isCancelled) { return; } isCancelled = true; drain(); } /** * Requests another receive link from upstream. */ private void requestUpstream() { if (isTerminated()) { logger.info("Processor is terminated. Not requesting another link."); return; } else if (UPSTREAM.get(this) == null) { logger.info("There is no upstream. Not requesting another link."); return; } else if (UPSTREAM.get(this) == Operators.cancelledSubscription()) { logger.info("Upstream is cancelled or complete. Not requesting another link."); return; } synchronized (lock) { if (currentLink != null) { logger.info("Current link exists. Not requesting another link."); return; } } logger.info("Requesting a new AmqpReceiveLink from upstream."); UPSTREAM.get(this).request(1L); } private void onDispose() { if (retrySubscription != null && !retrySubscription.isDisposed()) { retrySubscription.dispose(); } if (currentLink != null) { currentLink.dispose(); } currentLink = null; if (currentLinkSubscriptions != null) { currentLinkSubscriptions.dispose(); } Operators.onDiscardQueueWithClear(messageQueue, currentContext(), null); } private void drain() { if (!wip.compareAndSet(0, 1)) { return; } try { drainQueue(); } finally { if (wip.decrementAndGet() != 0) { logger.warning("There is another worker in drainLoop. But there should only be 1 worker."); } } } private void drainQueue() { final CoreSubscriber<? super Message> subscriber = downstream.get(); if (subscriber == null || checkAndSetTerminated()) { return; } long numberRequested = requested; boolean isEmpty = messageQueue.isEmpty(); while (numberRequested != 0L && !isEmpty) { if (checkAndSetTerminated()) { break; } long numberEmitted = 0L; while (numberRequested != numberEmitted) { if (isEmpty && checkAndSetTerminated()) { break; } Message message = messageQueue.poll(); if (message == null) { break; } if (isCancelled) { Operators.onDiscard(message, subscriber.currentContext()); Operators.onDiscardQueueWithClear(messageQueue, subscriber.currentContext(), null); return; } try { subscriber.onNext(message); } catch (Exception e) { logger.error("Exception occurred while handling downstream onNext operation.", e); throw logger.logExceptionAsError(Exceptions.propagate( Operators.onOperatorError(upstream, e, message, subscriber.currentContext()))); } numberEmitted++; isEmpty = messageQueue.isEmpty(); } if (requested != Long.MAX_VALUE) { numberRequested = REQUESTED.addAndGet(this, -numberEmitted); } } } private boolean checkAndSetTerminated() { if (!isTerminated()) { return false; } final CoreSubscriber<? super Message> subscriber = downstream.get(); final Throwable error = lastError; if (error != null) { subscriber.onError(error); } else { subscriber.onComplete(); } if (currentLink != null) { currentLink.dispose(); } messageQueue.clear(); return true; } private int getCreditsToAdd() { final CoreSubscriber<? super Message> subscriber = downstream.get(); final long r = requested; if (subscriber == null || r == 0) { logger.verbose("Not adding credits. No downstream subscribers or items requested."); linkCreditsAdded.set(false); return 0; } linkCreditsAdded.set(true); return r == Long.MAX_VALUE ? 1 : Long.valueOf(r).intValue(); } }
class AmqpReceiveLinkProcessor extends FluxProcessor<AmqpReceiveLink, Message> implements Subscription { private final ClientLogger logger = new ClientLogger(AmqpReceiveLinkProcessor.class); private final Object lock = new Object(); private final AtomicBoolean isTerminated = new AtomicBoolean(); private final AtomicInteger retryAttempts = new AtomicInteger(); private final Deque<Message> messageQueue = new ConcurrentLinkedDeque<>(); private final AtomicBoolean linkCreditsAdded = new AtomicBoolean(); private final AtomicReference<CoreSubscriber<? super Message>> downstream = new AtomicReference<>(); private final AtomicInteger wip = new AtomicInteger(); private final int prefetch; private final AmqpRetryPolicy retryPolicy; private final Disposable parentConnection; private volatile Throwable lastError; private volatile boolean isCancelled; private volatile AmqpReceiveLink currentLink; private volatile Disposable currentLinkSubscriptions; private volatile Disposable retrySubscription; private volatile Subscription upstream; private static final AtomicReferenceFieldUpdater<AmqpReceiveLinkProcessor, Subscription> UPSTREAM = AtomicReferenceFieldUpdater.newUpdater(AmqpReceiveLinkProcessor.class, Subscription.class, "upstream"); private volatile long requested; private static final AtomicLongFieldUpdater<AmqpReceiveLinkProcessor> REQUESTED = AtomicLongFieldUpdater.newUpdater(AmqpReceiveLinkProcessor.class, "requested"); /** * Creates an instance of {@link AmqpReceiveLinkProcessor}. * * @param prefetch The number if messages to initially fetch. * @param retryPolicy Retry policy to apply when fetching a new AMQP channel. * @param parentConnection Represents the parent connection. * * @throws NullPointerException if {@code retryPolicy} is null. * @throws IllegalArgumentException if {@code prefetch} is less than 0. */ public AmqpReceiveLinkProcessor(int prefetch, AmqpRetryPolicy retryPolicy, Disposable parentConnection) { this.retryPolicy = Objects.requireNonNull(retryPolicy, "'retryPolicy' cannot be null."); this.parentConnection = Objects.requireNonNull(parentConnection, "'parentConnection' cannot be null."); if (prefetch < 0) { throw logger.logExceptionAsError(new IllegalArgumentException("'prefetch' cannot be less than 0.")); } this.prefetch = prefetch; } /** * Gets the error associated with this processor. * * @return Error associated with this processor. {@code null} if there is no error. */ @Override public Throwable getError() { return lastError; } /** * Gets whether or not the processor is terminated. * * @return {@code true} if the processor has terminated; false otherwise. */ @Override public boolean isTerminated() { return isTerminated.get() || isCancelled; } /** * When a subscription is obtained from upstream publisher. * * @param subscription Subscription to upstream publisher. */ @Override public void onSubscribe(Subscription subscription) { Objects.requireNonNull(subscription, "'subscription' cannot be null"); logger.info("Setting new subscription for receive link processor"); if (!Operators.setOnce(UPSTREAM, this, subscription)) { throw logger.logExceptionAsError(new IllegalStateException("Cannot set upstream twice.")); } requestUpstream(); } @Override public int getPrefetch() { return prefetch; } /** * When the next AMQP link is fetched. * * @param next The next AMQP receive link. */ @Override /** * Sets up the downstream subscriber. * * @param actual The downstream subscriber. * * @throws IllegalStateException if there is already a downstream subscriber. */ @Override public void subscribe(CoreSubscriber<? super Message> actual) { Objects.requireNonNull(actual, "'actual' cannot be null."); final boolean terminateSubscriber = isTerminated() || (currentLink == null && upstream == Operators.cancelledSubscription()); if (isTerminated()) { final AmqpReceiveLink link = currentLink; final String linkName = link != null ? link.getLinkName() : "n/a"; final String entityPath = link != null ? link.getEntityPath() : "n/a"; logger.info("linkName[{}] entityPath[{}]. AmqpReceiveLink is already terminated.", linkName, entityPath); } else if (currentLink == null && upstream == Operators.cancelledSubscription()) { logger.info("There is no current link and upstream is terminated."); } if (terminateSubscriber) { actual.onSubscribe(Operators.emptySubscription()); if (hasError()) { actual.onError(lastError); } else { actual.onComplete(); } return; } if (downstream.compareAndSet(null, actual)) { actual.onSubscribe(this); drain(); } else { Operators.error(actual, logger.logExceptionAsError(new IllegalStateException( "There is already one downstream subscriber.'"))); } } /** * When an error occurs from the upstream publisher. If the {@code throwable} is a transient failure, another AMQP * element is requested if the {@link AmqpRetryPolicy} allows. Otherwise, the processor closes. * * @param throwable Error that occurred in upstream publisher. */ @Override public void onError(Throwable throwable) { Objects.requireNonNull(throwable, "'throwable' is required."); logger.info("Error on receive link {}", currentLink, throwable); if (isTerminated() || isCancelled) { logger.info("AmqpReceiveLinkProcessor is terminated. Cannot process another error.", throwable); Operators.onErrorDropped(throwable, currentContext()); return; } if (parentConnection.isDisposed()) { logger.info("Parent connection is disposed. Not reopening on error."); } lastError = throwable; isTerminated.set(true); final CoreSubscriber<? super Message> subscriber = downstream.get(); if (subscriber != null) { subscriber.onError(throwable); } onDispose(); } /** * When the upstream publisher has no more items to emit. */ @Override public void onComplete() { logger.info("Receive link completed {}", currentLink); UPSTREAM.set(this, Operators.cancelledSubscription()); } @Override public void dispose() { logger.info("Disposing receive link {}", currentLink); if (isTerminated.getAndSet(true)) { return; } drain(); onDispose(); } /** * When downstream subscriber makes a back-pressure request. */ @Override public void request(long request) { if (!Operators.validate(request)) { logger.warning("Invalid request: {}", request); return; } Operators.addCap(REQUESTED, this, request); final AmqpReceiveLink link = currentLink; if (link != null && !linkCreditsAdded.getAndSet(true)) { int credits = getCreditsToAdd(); logger.verbose("Link credits not yet added. Adding: {}", credits); link.addCredits(credits); } drain(); } /** * When downstream subscriber cancels their subscription. */ @Override public void cancel() { if (isCancelled) { return; } isCancelled = true; drain(); } /** * Requests another receive link from upstream. */ private void requestUpstream() { if (isTerminated()) { logger.info("Processor is terminated. Not requesting another link."); return; } else if (UPSTREAM.get(this) == null) { logger.info("There is no upstream. Not requesting another link."); return; } else if (UPSTREAM.get(this) == Operators.cancelledSubscription()) { logger.info("Upstream is cancelled or complete. Not requesting another link."); return; } synchronized (lock) { if (currentLink != null) { logger.info("Current link exists. Not requesting another link."); return; } } logger.info("Requesting a new AmqpReceiveLink from upstream."); UPSTREAM.get(this).request(1L); } private void onDispose() { if (retrySubscription != null && !retrySubscription.isDisposed()) { retrySubscription.dispose(); } if (currentLink != null) { currentLink.dispose(); } currentLink = null; if (currentLinkSubscriptions != null) { currentLinkSubscriptions.dispose(); } Operators.onDiscardQueueWithClear(messageQueue, currentContext(), null); } private void drain() { if (!wip.compareAndSet(0, 1)) { return; } try { drainQueue(); } finally { if (wip.decrementAndGet() != 0) { logger.warning("There is another worker in drainLoop. But there should only be 1 worker."); } } } private void drainQueue() { final CoreSubscriber<? super Message> subscriber = downstream.get(); if (subscriber == null || checkAndSetTerminated()) { return; } long numberRequested = requested; boolean isEmpty = messageQueue.isEmpty(); while (numberRequested != 0L && !isEmpty) { if (checkAndSetTerminated()) { break; } long numberEmitted = 0L; while (numberRequested != numberEmitted) { if (isEmpty && checkAndSetTerminated()) { break; } Message message = messageQueue.poll(); if (message == null) { break; } if (isCancelled) { Operators.onDiscard(message, subscriber.currentContext()); Operators.onDiscardQueueWithClear(messageQueue, subscriber.currentContext(), null); return; } try { subscriber.onNext(message); } catch (Exception e) { logger.error("Exception occurred while handling downstream onNext operation.", e); throw logger.logExceptionAsError(Exceptions.propagate( Operators.onOperatorError(upstream, e, message, subscriber.currentContext()))); } numberEmitted++; isEmpty = messageQueue.isEmpty(); } if (requested != Long.MAX_VALUE) { numberRequested = REQUESTED.addAndGet(this, -numberEmitted); } } } private boolean checkAndSetTerminated() { if (!isTerminated()) { return false; } final CoreSubscriber<? super Message> subscriber = downstream.get(); final Throwable error = lastError; if (error != null) { subscriber.onError(error); } else { subscriber.onComplete(); } if (currentLink != null) { currentLink.dispose(); } messageQueue.clear(); return true; } private int getCreditsToAdd() { final CoreSubscriber<? super Message> subscriber = downstream.get(); final long r = requested; if (subscriber == null || r == 0) { logger.verbose("Not adding credits. No downstream subscribers or items requested."); linkCreditsAdded.set(false); return 0; } linkCreditsAdded.set(true); return r == Long.MAX_VALUE ? 1 : Long.valueOf(r).intValue(); } }
Rules might also have ns1 in some cases.
public String serialize(Object object, SerializerEncoding encoding) throws IOException { final String contents = jacksonAdapter.serialize(object, encoding); final Class<?> clazz = object.getClass(); if (!CreateQueueBody.class.equals(clazz) && !CreateRuleBody.class.equals(clazz) && !CreateSubscriptionBody.class.equals(clazz)) { return contents; } final Matcher namespaceMatcher = NAMESPACE_PATTERN.matcher(contents); if (!namespaceMatcher.find()) { logger.info("Could not find {} in {}", NAMESPACE_PATTERN.pattern(), contents); return contents; } final String namespace = namespaceMatcher.group("namespace"); final String replaced = contents .replaceAll(namespace + ":", "") .replace("xmlns:" + namespace + "=", "xmlns="); if (!CreateRuleBody.class.equals(clazz)) { return replaced; } final Matcher filterType = FILTER_ACTION_PATTERN.matcher(replaced); if (filterType.find()) { return filterType.replaceAll("<$1 xmlns:ns0=\"http: } else { logger.info("Could not find filter name pattern '{}' in {}.", FILTER_ACTION_PATTERN.pattern(), contents); return replaced; } }
return filterType.replaceAll("<$1 xmlns:ns0=\"http:
public String serialize(Object object, SerializerEncoding encoding) throws IOException { final String contents = jacksonAdapter.serialize(object, encoding); final Class<?> clazz = object.getClass(); if (!CreateQueueBody.class.equals(clazz) && !CreateRuleBody.class.equals(clazz) && !CreateSubscriptionBody.class.equals(clazz)) { return contents; } final Matcher namespaceMatcher = NAMESPACE_PATTERN.matcher(contents); if (!namespaceMatcher.find()) { logger.warning("Could not find {} in {}", NAMESPACE_PATTERN.pattern(), contents); return contents; } final String namespace = namespaceMatcher.group("namespace"); final String replaced = contents .replaceAll(namespace + ":", "") .replace("xmlns:" + namespace + "=", "xmlns="); if (!CreateRuleBody.class.equals(clazz)) { return replaced; } final Matcher filterType = FILTER_ACTION_PATTERN.matcher(replaced); if (filterType.find()) { return filterType.replaceAll("<$1 xmlns:ns0=\"http: } else { logger.warning("Could not find filter name pattern '{}' in {}.", FILTER_ACTION_PATTERN.pattern(), contents); return replaced; } }
class ServiceBusManagementSerializer implements SerializerAdapter { private static final String MINIMUM_DATETIME_FORMATTED = ">0001-01-01T00:00:00Z</"; private static final Pattern MINIMUM_DATETIME_PATTERN = Pattern.compile(">0001-01-01T00:00:00</", Pattern.MULTILINE); private static final Pattern NAMESPACE_PATTERN = Pattern.compile( "xmlns:(?<namespace>\\w+)=\"http: Pattern.MULTILINE); private static final Pattern FILTER_ACTION_PATTERN = Pattern.compile("<(Filter|Action) type=", Pattern.MULTILINE); private final JacksonAdapter jacksonAdapter = new JacksonAdapter(); private final ClientLogger logger = new ClientLogger(ServiceBusManagementSerializer.class); @Override @Override public String serializeRaw(Object object) { return jacksonAdapter.serializeRaw(object); } @Override public String serializeList(List<?> list, CollectionFormat format) { return jacksonAdapter.serializeList(list, format); } public <T> T deserialize(String value, Type type) throws IOException { final Matcher matcher = MINIMUM_DATETIME_PATTERN.matcher(value); final String serializedString; if (matcher.find(0)) { logger.verbose("Found instances of '{}' to replace. Value: {}", MINIMUM_DATETIME_PATTERN.pattern(), value); serializedString = matcher.replaceAll(MINIMUM_DATETIME_FORMATTED); } else { serializedString = value; } return jacksonAdapter.deserialize(serializedString, type, SerializerEncoding.XML); } @Override @SuppressWarnings("unchecked") public <T> T deserialize(String value, Type type, SerializerEncoding encoding) throws IOException { if (encoding != SerializerEncoding.XML) { return jacksonAdapter.deserialize(value, type, encoding); } if (Object.class == type) { return (T) value; } else { return (T) deserialize(value, type); } } @Override public <T> T deserialize(HttpHeaders headers, Type type) throws IOException { return jacksonAdapter.deserialize(headers, type); } }
class ServiceBusManagementSerializer implements SerializerAdapter { private static final String MINIMUM_DATETIME_FORMATTED = ">0001-01-01T00:00:00Z</"; private static final Pattern MINIMUM_DATETIME_PATTERN = Pattern.compile(">0001-01-01T00:00:00</", Pattern.MULTILINE); private static final Pattern NAMESPACE_PATTERN = Pattern.compile( "xmlns:(?<namespace>\\w+)=\"http: Pattern.MULTILINE); private static final Pattern FILTER_ACTION_PATTERN = Pattern.compile("<(Filter|Action) type=", Pattern.MULTILINE); private final JacksonAdapter jacksonAdapter = new JacksonAdapter(); private final ClientLogger logger = new ClientLogger(ServiceBusManagementSerializer.class); @Override @Override public String serializeRaw(Object object) { return jacksonAdapter.serializeRaw(object); } @Override public String serializeList(List<?> list, CollectionFormat format) { return jacksonAdapter.serializeList(list, format); } public <T> T deserialize(String value, Type type) throws IOException { final Matcher matcher = MINIMUM_DATETIME_PATTERN.matcher(value); final String serializedString; if (matcher.find(0)) { logger.verbose("Found instances of '{}' to replace. Value: {}", MINIMUM_DATETIME_PATTERN.pattern(), value); serializedString = matcher.replaceAll(MINIMUM_DATETIME_FORMATTED); } else { serializedString = value; } return jacksonAdapter.deserialize(serializedString, type, SerializerEncoding.XML); } @Override @SuppressWarnings("unchecked") public <T> T deserialize(String value, Type type, SerializerEncoding encoding) throws IOException { if (encoding != SerializerEncoding.XML) { return jacksonAdapter.deserialize(value, type, encoding); } if (Object.class == type) { return (T) value; } else { return (T) deserialize(value, type); } } @Override public <T> T deserialize(HttpHeaders headers, Type type) throws IOException { return jacksonAdapter.deserialize(headers, type); } }
Do you know when? I couldn't find an example.
public String serialize(Object object, SerializerEncoding encoding) throws IOException { final String contents = jacksonAdapter.serialize(object, encoding); final Class<?> clazz = object.getClass(); if (!CreateQueueBody.class.equals(clazz) && !CreateRuleBody.class.equals(clazz) && !CreateSubscriptionBody.class.equals(clazz)) { return contents; } final Matcher namespaceMatcher = NAMESPACE_PATTERN.matcher(contents); if (!namespaceMatcher.find()) { logger.info("Could not find {} in {}", NAMESPACE_PATTERN.pattern(), contents); return contents; } final String namespace = namespaceMatcher.group("namespace"); final String replaced = contents .replaceAll(namespace + ":", "") .replace("xmlns:" + namespace + "=", "xmlns="); if (!CreateRuleBody.class.equals(clazz)) { return replaced; } final Matcher filterType = FILTER_ACTION_PATTERN.matcher(replaced); if (filterType.find()) { return filterType.replaceAll("<$1 xmlns:ns0=\"http: } else { logger.info("Could not find filter name pattern '{}' in {}.", FILTER_ACTION_PATTERN.pattern(), contents); return replaced; } }
return filterType.replaceAll("<$1 xmlns:ns0=\"http:
public String serialize(Object object, SerializerEncoding encoding) throws IOException { final String contents = jacksonAdapter.serialize(object, encoding); final Class<?> clazz = object.getClass(); if (!CreateQueueBody.class.equals(clazz) && !CreateRuleBody.class.equals(clazz) && !CreateSubscriptionBody.class.equals(clazz)) { return contents; } final Matcher namespaceMatcher = NAMESPACE_PATTERN.matcher(contents); if (!namespaceMatcher.find()) { logger.warning("Could not find {} in {}", NAMESPACE_PATTERN.pattern(), contents); return contents; } final String namespace = namespaceMatcher.group("namespace"); final String replaced = contents .replaceAll(namespace + ":", "") .replace("xmlns:" + namespace + "=", "xmlns="); if (!CreateRuleBody.class.equals(clazz)) { return replaced; } final Matcher filterType = FILTER_ACTION_PATTERN.matcher(replaced); if (filterType.find()) { return filterType.replaceAll("<$1 xmlns:ns0=\"http: } else { logger.warning("Could not find filter name pattern '{}' in {}.", FILTER_ACTION_PATTERN.pattern(), contents); return replaced; } }
class ServiceBusManagementSerializer implements SerializerAdapter { private static final String MINIMUM_DATETIME_FORMATTED = ">0001-01-01T00:00:00Z</"; private static final Pattern MINIMUM_DATETIME_PATTERN = Pattern.compile(">0001-01-01T00:00:00</", Pattern.MULTILINE); private static final Pattern NAMESPACE_PATTERN = Pattern.compile( "xmlns:(?<namespace>\\w+)=\"http: Pattern.MULTILINE); private static final Pattern FILTER_ACTION_PATTERN = Pattern.compile("<(Filter|Action) type=", Pattern.MULTILINE); private final JacksonAdapter jacksonAdapter = new JacksonAdapter(); private final ClientLogger logger = new ClientLogger(ServiceBusManagementSerializer.class); @Override @Override public String serializeRaw(Object object) { return jacksonAdapter.serializeRaw(object); } @Override public String serializeList(List<?> list, CollectionFormat format) { return jacksonAdapter.serializeList(list, format); } public <T> T deserialize(String value, Type type) throws IOException { final Matcher matcher = MINIMUM_DATETIME_PATTERN.matcher(value); final String serializedString; if (matcher.find(0)) { logger.verbose("Found instances of '{}' to replace. Value: {}", MINIMUM_DATETIME_PATTERN.pattern(), value); serializedString = matcher.replaceAll(MINIMUM_DATETIME_FORMATTED); } else { serializedString = value; } return jacksonAdapter.deserialize(serializedString, type, SerializerEncoding.XML); } @Override @SuppressWarnings("unchecked") public <T> T deserialize(String value, Type type, SerializerEncoding encoding) throws IOException { if (encoding != SerializerEncoding.XML) { return jacksonAdapter.deserialize(value, type, encoding); } if (Object.class == type) { return (T) value; } else { return (T) deserialize(value, type); } } @Override public <T> T deserialize(HttpHeaders headers, Type type) throws IOException { return jacksonAdapter.deserialize(headers, type); } }
class ServiceBusManagementSerializer implements SerializerAdapter { private static final String MINIMUM_DATETIME_FORMATTED = ">0001-01-01T00:00:00Z</"; private static final Pattern MINIMUM_DATETIME_PATTERN = Pattern.compile(">0001-01-01T00:00:00</", Pattern.MULTILINE); private static final Pattern NAMESPACE_PATTERN = Pattern.compile( "xmlns:(?<namespace>\\w+)=\"http: Pattern.MULTILINE); private static final Pattern FILTER_ACTION_PATTERN = Pattern.compile("<(Filter|Action) type=", Pattern.MULTILINE); private final JacksonAdapter jacksonAdapter = new JacksonAdapter(); private final ClientLogger logger = new ClientLogger(ServiceBusManagementSerializer.class); @Override @Override public String serializeRaw(Object object) { return jacksonAdapter.serializeRaw(object); } @Override public String serializeList(List<?> list, CollectionFormat format) { return jacksonAdapter.serializeList(list, format); } public <T> T deserialize(String value, Type type) throws IOException { final Matcher matcher = MINIMUM_DATETIME_PATTERN.matcher(value); final String serializedString; if (matcher.find(0)) { logger.verbose("Found instances of '{}' to replace. Value: {}", MINIMUM_DATETIME_PATTERN.pattern(), value); serializedString = matcher.replaceAll(MINIMUM_DATETIME_FORMATTED); } else { serializedString = value; } return jacksonAdapter.deserialize(serializedString, type, SerializerEncoding.XML); } @Override @SuppressWarnings("unchecked") public <T> T deserialize(String value, Type type, SerializerEncoding encoding) throws IOException { if (encoding != SerializerEncoding.XML) { return jacksonAdapter.deserialize(value, type, encoding); } if (Object.class == type) { return (T) value; } else { return (T) deserialize(value, type); } } @Override public <T> T deserialize(HttpHeaders headers, Type type) throws IOException { return jacksonAdapter.deserialize(headers, type); } }
This is a Python generated XML that has both ns0 and ns1. Disregard this comment if Java generates differently without ns1. In the following file, "ns0" doesn't hurt. "ns1" hurts. ```xml <ns0:entry xmlns:ns0="http://www.w3.org/2005/Atom" xmlns:ns1="http://schemas.microsoft.com/netservices/2010/10/servicebus/connect" xmlns:xsi="http://www.w3.org/2001/XMLSchema-instance"> <ns0:content type="application/xml"> <ns1:RuleDescription> <ns1:Filter xsi:type="SqlFilter"> <ns1:CompatibilityLevel>20</ns1:CompatibilityLevel> <ns1:RequiresPreprocessing>true</ns1:RequiresPreprocessing> </ns1:Filter> <ns1:Action xsi:type="EmptyRuleAction"/> <ns1:Name>rule1</ns1:Name> </ns1:RuleDescription> </ns0:content> </ns0:entry> ```
public String serialize(Object object, SerializerEncoding encoding) throws IOException { final String contents = jacksonAdapter.serialize(object, encoding); final Class<?> clazz = object.getClass(); if (!CreateQueueBody.class.equals(clazz) && !CreateRuleBody.class.equals(clazz) && !CreateSubscriptionBody.class.equals(clazz)) { return contents; } final Matcher namespaceMatcher = NAMESPACE_PATTERN.matcher(contents); if (!namespaceMatcher.find()) { logger.info("Could not find {} in {}", NAMESPACE_PATTERN.pattern(), contents); return contents; } final String namespace = namespaceMatcher.group("namespace"); final String replaced = contents .replaceAll(namespace + ":", "") .replace("xmlns:" + namespace + "=", "xmlns="); if (!CreateRuleBody.class.equals(clazz)) { return replaced; } final Matcher filterType = FILTER_ACTION_PATTERN.matcher(replaced); if (filterType.find()) { return filterType.replaceAll("<$1 xmlns:ns0=\"http: } else { logger.info("Could not find filter name pattern '{}' in {}.", FILTER_ACTION_PATTERN.pattern(), contents); return replaced; } }
return filterType.replaceAll("<$1 xmlns:ns0=\"http:
public String serialize(Object object, SerializerEncoding encoding) throws IOException { final String contents = jacksonAdapter.serialize(object, encoding); final Class<?> clazz = object.getClass(); if (!CreateQueueBody.class.equals(clazz) && !CreateRuleBody.class.equals(clazz) && !CreateSubscriptionBody.class.equals(clazz)) { return contents; } final Matcher namespaceMatcher = NAMESPACE_PATTERN.matcher(contents); if (!namespaceMatcher.find()) { logger.warning("Could not find {} in {}", NAMESPACE_PATTERN.pattern(), contents); return contents; } final String namespace = namespaceMatcher.group("namespace"); final String replaced = contents .replaceAll(namespace + ":", "") .replace("xmlns:" + namespace + "=", "xmlns="); if (!CreateRuleBody.class.equals(clazz)) { return replaced; } final Matcher filterType = FILTER_ACTION_PATTERN.matcher(replaced); if (filterType.find()) { return filterType.replaceAll("<$1 xmlns:ns0=\"http: } else { logger.warning("Could not find filter name pattern '{}' in {}.", FILTER_ACTION_PATTERN.pattern(), contents); return replaced; } }
class ServiceBusManagementSerializer implements SerializerAdapter { private static final String MINIMUM_DATETIME_FORMATTED = ">0001-01-01T00:00:00Z</"; private static final Pattern MINIMUM_DATETIME_PATTERN = Pattern.compile(">0001-01-01T00:00:00</", Pattern.MULTILINE); private static final Pattern NAMESPACE_PATTERN = Pattern.compile( "xmlns:(?<namespace>\\w+)=\"http: Pattern.MULTILINE); private static final Pattern FILTER_ACTION_PATTERN = Pattern.compile("<(Filter|Action) type=", Pattern.MULTILINE); private final JacksonAdapter jacksonAdapter = new JacksonAdapter(); private final ClientLogger logger = new ClientLogger(ServiceBusManagementSerializer.class); @Override @Override public String serializeRaw(Object object) { return jacksonAdapter.serializeRaw(object); } @Override public String serializeList(List<?> list, CollectionFormat format) { return jacksonAdapter.serializeList(list, format); } public <T> T deserialize(String value, Type type) throws IOException { final Matcher matcher = MINIMUM_DATETIME_PATTERN.matcher(value); final String serializedString; if (matcher.find(0)) { logger.verbose("Found instances of '{}' to replace. Value: {}", MINIMUM_DATETIME_PATTERN.pattern(), value); serializedString = matcher.replaceAll(MINIMUM_DATETIME_FORMATTED); } else { serializedString = value; } return jacksonAdapter.deserialize(serializedString, type, SerializerEncoding.XML); } @Override @SuppressWarnings("unchecked") public <T> T deserialize(String value, Type type, SerializerEncoding encoding) throws IOException { if (encoding != SerializerEncoding.XML) { return jacksonAdapter.deserialize(value, type, encoding); } if (Object.class == type) { return (T) value; } else { return (T) deserialize(value, type); } } @Override public <T> T deserialize(HttpHeaders headers, Type type) throws IOException { return jacksonAdapter.deserialize(headers, type); } }
class ServiceBusManagementSerializer implements SerializerAdapter { private static final String MINIMUM_DATETIME_FORMATTED = ">0001-01-01T00:00:00Z</"; private static final Pattern MINIMUM_DATETIME_PATTERN = Pattern.compile(">0001-01-01T00:00:00</", Pattern.MULTILINE); private static final Pattern NAMESPACE_PATTERN = Pattern.compile( "xmlns:(?<namespace>\\w+)=\"http: Pattern.MULTILINE); private static final Pattern FILTER_ACTION_PATTERN = Pattern.compile("<(Filter|Action) type=", Pattern.MULTILINE); private final JacksonAdapter jacksonAdapter = new JacksonAdapter(); private final ClientLogger logger = new ClientLogger(ServiceBusManagementSerializer.class); @Override @Override public String serializeRaw(Object object) { return jacksonAdapter.serializeRaw(object); } @Override public String serializeList(List<?> list, CollectionFormat format) { return jacksonAdapter.serializeList(list, format); } public <T> T deserialize(String value, Type type) throws IOException { final Matcher matcher = MINIMUM_DATETIME_PATTERN.matcher(value); final String serializedString; if (matcher.find(0)) { logger.verbose("Found instances of '{}' to replace. Value: {}", MINIMUM_DATETIME_PATTERN.pattern(), value); serializedString = matcher.replaceAll(MINIMUM_DATETIME_FORMATTED); } else { serializedString = value; } return jacksonAdapter.deserialize(serializedString, type, SerializerEncoding.XML); } @Override @SuppressWarnings("unchecked") public <T> T deserialize(String value, Type type, SerializerEncoding encoding) throws IOException { if (encoding != SerializerEncoding.XML) { return jacksonAdapter.deserialize(value, type, encoding); } if (Object.class == type) { return (T) value; } else { return (T) deserialize(value, type); } } @Override public <T> T deserialize(HttpHeaders headers, Type type) throws IOException { return jacksonAdapter.deserialize(headers, type); } }
Normally validity of XML can be verified at generation time, I am not sure if generator are doing that.
public String serialize(Object object, SerializerEncoding encoding) throws IOException { final String contents = jacksonAdapter.serialize(object, encoding); final Class<?> clazz = object.getClass(); if (!CreateQueueBody.class.equals(clazz) && !CreateRuleBody.class.equals(clazz) && !CreateSubscriptionBody.class.equals(clazz)) { return contents; } final Matcher namespaceMatcher = NAMESPACE_PATTERN.matcher(contents); if (!namespaceMatcher.find()) { logger.info("Could not find {} in {}", NAMESPACE_PATTERN.pattern(), contents); return contents; } final String namespace = namespaceMatcher.group("namespace"); final String replaced = contents .replaceAll(namespace + ":", "") .replace("xmlns:" + namespace + "=", "xmlns="); if (!CreateRuleBody.class.equals(clazz)) { return replaced; } final Matcher filterType = FILTER_ACTION_PATTERN.matcher(replaced); if (filterType.find()) { return filterType.replaceAll("<$1 xmlns:ns0=\"http: } else { logger.info("Could not find filter name pattern '{}' in {}.", FILTER_ACTION_PATTERN.pattern(), contents); return replaced; } }
return filterType.replaceAll("<$1 xmlns:ns0=\"http:
public String serialize(Object object, SerializerEncoding encoding) throws IOException { final String contents = jacksonAdapter.serialize(object, encoding); final Class<?> clazz = object.getClass(); if (!CreateQueueBody.class.equals(clazz) && !CreateRuleBody.class.equals(clazz) && !CreateSubscriptionBody.class.equals(clazz)) { return contents; } final Matcher namespaceMatcher = NAMESPACE_PATTERN.matcher(contents); if (!namespaceMatcher.find()) { logger.warning("Could not find {} in {}", NAMESPACE_PATTERN.pattern(), contents); return contents; } final String namespace = namespaceMatcher.group("namespace"); final String replaced = contents .replaceAll(namespace + ":", "") .replace("xmlns:" + namespace + "=", "xmlns="); if (!CreateRuleBody.class.equals(clazz)) { return replaced; } final Matcher filterType = FILTER_ACTION_PATTERN.matcher(replaced); if (filterType.find()) { return filterType.replaceAll("<$1 xmlns:ns0=\"http: } else { logger.warning("Could not find filter name pattern '{}' in {}.", FILTER_ACTION_PATTERN.pattern(), contents); return replaced; } }
class ServiceBusManagementSerializer implements SerializerAdapter { private static final String MINIMUM_DATETIME_FORMATTED = ">0001-01-01T00:00:00Z</"; private static final Pattern MINIMUM_DATETIME_PATTERN = Pattern.compile(">0001-01-01T00:00:00</", Pattern.MULTILINE); private static final Pattern NAMESPACE_PATTERN = Pattern.compile( "xmlns:(?<namespace>\\w+)=\"http: Pattern.MULTILINE); private static final Pattern FILTER_ACTION_PATTERN = Pattern.compile("<(Filter|Action) type=", Pattern.MULTILINE); private final JacksonAdapter jacksonAdapter = new JacksonAdapter(); private final ClientLogger logger = new ClientLogger(ServiceBusManagementSerializer.class); @Override @Override public String serializeRaw(Object object) { return jacksonAdapter.serializeRaw(object); } @Override public String serializeList(List<?> list, CollectionFormat format) { return jacksonAdapter.serializeList(list, format); } public <T> T deserialize(String value, Type type) throws IOException { final Matcher matcher = MINIMUM_DATETIME_PATTERN.matcher(value); final String serializedString; if (matcher.find(0)) { logger.verbose("Found instances of '{}' to replace. Value: {}", MINIMUM_DATETIME_PATTERN.pattern(), value); serializedString = matcher.replaceAll(MINIMUM_DATETIME_FORMATTED); } else { serializedString = value; } return jacksonAdapter.deserialize(serializedString, type, SerializerEncoding.XML); } @Override @SuppressWarnings("unchecked") public <T> T deserialize(String value, Type type, SerializerEncoding encoding) throws IOException { if (encoding != SerializerEncoding.XML) { return jacksonAdapter.deserialize(value, type, encoding); } if (Object.class == type) { return (T) value; } else { return (T) deserialize(value, type); } } @Override public <T> T deserialize(HttpHeaders headers, Type type) throws IOException { return jacksonAdapter.deserialize(headers, type); } }
class ServiceBusManagementSerializer implements SerializerAdapter { private static final String MINIMUM_DATETIME_FORMATTED = ">0001-01-01T00:00:00Z</"; private static final Pattern MINIMUM_DATETIME_PATTERN = Pattern.compile(">0001-01-01T00:00:00</", Pattern.MULTILINE); private static final Pattern NAMESPACE_PATTERN = Pattern.compile( "xmlns:(?<namespace>\\w+)=\"http: Pattern.MULTILINE); private static final Pattern FILTER_ACTION_PATTERN = Pattern.compile("<(Filter|Action) type=", Pattern.MULTILINE); private final JacksonAdapter jacksonAdapter = new JacksonAdapter(); private final ClientLogger logger = new ClientLogger(ServiceBusManagementSerializer.class); @Override @Override public String serializeRaw(Object object) { return jacksonAdapter.serializeRaw(object); } @Override public String serializeList(List<?> list, CollectionFormat format) { return jacksonAdapter.serializeList(list, format); } public <T> T deserialize(String value, Type type) throws IOException { final Matcher matcher = MINIMUM_DATETIME_PATTERN.matcher(value); final String serializedString; if (matcher.find(0)) { logger.verbose("Found instances of '{}' to replace. Value: {}", MINIMUM_DATETIME_PATTERN.pattern(), value); serializedString = matcher.replaceAll(MINIMUM_DATETIME_FORMATTED); } else { serializedString = value; } return jacksonAdapter.deserialize(serializedString, type, SerializerEncoding.XML); } @Override @SuppressWarnings("unchecked") public <T> T deserialize(String value, Type type, SerializerEncoding encoding) throws IOException { if (encoding != SerializerEncoding.XML) { return jacksonAdapter.deserialize(value, type, encoding); } if (Object.class == type) { return (T) value; } else { return (T) deserialize(value, type); } } @Override public <T> T deserialize(HttpHeaders headers, Type type) throws IOException { return jacksonAdapter.deserialize(headers, type); } }
If we could not find namespace pattern , wouldn't this be a error or warning ?
public String serialize(Object object, SerializerEncoding encoding) throws IOException { final String contents = jacksonAdapter.serialize(object, encoding); final Class<?> clazz = object.getClass(); if (!CreateQueueBody.class.equals(clazz) && !CreateRuleBody.class.equals(clazz) && !CreateSubscriptionBody.class.equals(clazz)) { return contents; } final Matcher namespaceMatcher = NAMESPACE_PATTERN.matcher(contents); if (!namespaceMatcher.find()) { logger.info("Could not find {} in {}", NAMESPACE_PATTERN.pattern(), contents); return contents; } final String namespace = namespaceMatcher.group("namespace"); final String replaced = contents .replaceAll(namespace + ":", "") .replace("xmlns:" + namespace + "=", "xmlns="); if (!CreateRuleBody.class.equals(clazz)) { return replaced; } final Matcher filterType = FILTER_ACTION_PATTERN.matcher(replaced); if (filterType.find()) { return filterType.replaceAll("<$1 xmlns:ns0=\"http: } else { logger.info("Could not find filter name pattern '{}' in {}.", FILTER_ACTION_PATTERN.pattern(), contents); return replaced; } }
return contents;
public String serialize(Object object, SerializerEncoding encoding) throws IOException { final String contents = jacksonAdapter.serialize(object, encoding); final Class<?> clazz = object.getClass(); if (!CreateQueueBody.class.equals(clazz) && !CreateRuleBody.class.equals(clazz) && !CreateSubscriptionBody.class.equals(clazz)) { return contents; } final Matcher namespaceMatcher = NAMESPACE_PATTERN.matcher(contents); if (!namespaceMatcher.find()) { logger.warning("Could not find {} in {}", NAMESPACE_PATTERN.pattern(), contents); return contents; } final String namespace = namespaceMatcher.group("namespace"); final String replaced = contents .replaceAll(namespace + ":", "") .replace("xmlns:" + namespace + "=", "xmlns="); if (!CreateRuleBody.class.equals(clazz)) { return replaced; } final Matcher filterType = FILTER_ACTION_PATTERN.matcher(replaced); if (filterType.find()) { return filterType.replaceAll("<$1 xmlns:ns0=\"http: } else { logger.warning("Could not find filter name pattern '{}' in {}.", FILTER_ACTION_PATTERN.pattern(), contents); return replaced; } }
class ServiceBusManagementSerializer implements SerializerAdapter { private static final String MINIMUM_DATETIME_FORMATTED = ">0001-01-01T00:00:00Z</"; private static final Pattern MINIMUM_DATETIME_PATTERN = Pattern.compile(">0001-01-01T00:00:00</", Pattern.MULTILINE); private static final Pattern NAMESPACE_PATTERN = Pattern.compile( "xmlns:(?<namespace>\\w+)=\"http: Pattern.MULTILINE); private static final Pattern FILTER_ACTION_PATTERN = Pattern.compile("<(Filter|Action) type=", Pattern.MULTILINE); private final JacksonAdapter jacksonAdapter = new JacksonAdapter(); private final ClientLogger logger = new ClientLogger(ServiceBusManagementSerializer.class); @Override @Override public String serializeRaw(Object object) { return jacksonAdapter.serializeRaw(object); } @Override public String serializeList(List<?> list, CollectionFormat format) { return jacksonAdapter.serializeList(list, format); } public <T> T deserialize(String value, Type type) throws IOException { final Matcher matcher = MINIMUM_DATETIME_PATTERN.matcher(value); final String serializedString; if (matcher.find(0)) { logger.verbose("Found instances of '{}' to replace. Value: {}", MINIMUM_DATETIME_PATTERN.pattern(), value); serializedString = matcher.replaceAll(MINIMUM_DATETIME_FORMATTED); } else { serializedString = value; } return jacksonAdapter.deserialize(serializedString, type, SerializerEncoding.XML); } @Override @SuppressWarnings("unchecked") public <T> T deserialize(String value, Type type, SerializerEncoding encoding) throws IOException { if (encoding != SerializerEncoding.XML) { return jacksonAdapter.deserialize(value, type, encoding); } if (Object.class == type) { return (T) value; } else { return (T) deserialize(value, type); } } @Override public <T> T deserialize(HttpHeaders headers, Type type) throws IOException { return jacksonAdapter.deserialize(headers, type); } }
class ServiceBusManagementSerializer implements SerializerAdapter { private static final String MINIMUM_DATETIME_FORMATTED = ">0001-01-01T00:00:00Z</"; private static final Pattern MINIMUM_DATETIME_PATTERN = Pattern.compile(">0001-01-01T00:00:00</", Pattern.MULTILINE); private static final Pattern NAMESPACE_PATTERN = Pattern.compile( "xmlns:(?<namespace>\\w+)=\"http: Pattern.MULTILINE); private static final Pattern FILTER_ACTION_PATTERN = Pattern.compile("<(Filter|Action) type=", Pattern.MULTILINE); private final JacksonAdapter jacksonAdapter = new JacksonAdapter(); private final ClientLogger logger = new ClientLogger(ServiceBusManagementSerializer.class); @Override @Override public String serializeRaw(Object object) { return jacksonAdapter.serializeRaw(object); } @Override public String serializeList(List<?> list, CollectionFormat format) { return jacksonAdapter.serializeList(list, format); } public <T> T deserialize(String value, Type type) throws IOException { final Matcher matcher = MINIMUM_DATETIME_PATTERN.matcher(value); final String serializedString; if (matcher.find(0)) { logger.verbose("Found instances of '{}' to replace. Value: {}", MINIMUM_DATETIME_PATTERN.pattern(), value); serializedString = matcher.replaceAll(MINIMUM_DATETIME_FORMATTED); } else { serializedString = value; } return jacksonAdapter.deserialize(serializedString, type, SerializerEncoding.XML); } @Override @SuppressWarnings("unchecked") public <T> T deserialize(String value, Type type, SerializerEncoding encoding) throws IOException { if (encoding != SerializerEncoding.XML) { return jacksonAdapter.deserialize(value, type, encoding); } if (Object.class == type) { return (T) value; } else { return (T) deserialize(value, type); } } @Override public <T> T deserialize(HttpHeaders headers, Type type) throws IOException { return jacksonAdapter.deserialize(headers, type); } }
The XML generated by Jackson is valid. It's the service side that is fickle about what XML it likes to consume, that's why we're doing all these hacks. I verified that we aren't generating multiple namespaces. The reason for this is because we use a global xml namespace, so we don't need n0.
public String serialize(Object object, SerializerEncoding encoding) throws IOException { final String contents = jacksonAdapter.serialize(object, encoding); final Class<?> clazz = object.getClass(); if (!CreateQueueBody.class.equals(clazz) && !CreateRuleBody.class.equals(clazz) && !CreateSubscriptionBody.class.equals(clazz)) { return contents; } final Matcher namespaceMatcher = NAMESPACE_PATTERN.matcher(contents); if (!namespaceMatcher.find()) { logger.info("Could not find {} in {}", NAMESPACE_PATTERN.pattern(), contents); return contents; } final String namespace = namespaceMatcher.group("namespace"); final String replaced = contents .replaceAll(namespace + ":", "") .replace("xmlns:" + namespace + "=", "xmlns="); if (!CreateRuleBody.class.equals(clazz)) { return replaced; } final Matcher filterType = FILTER_ACTION_PATTERN.matcher(replaced); if (filterType.find()) { return filterType.replaceAll("<$1 xmlns:ns0=\"http: } else { logger.info("Could not find filter name pattern '{}' in {}.", FILTER_ACTION_PATTERN.pattern(), contents); return replaced; } }
return filterType.replaceAll("<$1 xmlns:ns0=\"http:
public String serialize(Object object, SerializerEncoding encoding) throws IOException { final String contents = jacksonAdapter.serialize(object, encoding); final Class<?> clazz = object.getClass(); if (!CreateQueueBody.class.equals(clazz) && !CreateRuleBody.class.equals(clazz) && !CreateSubscriptionBody.class.equals(clazz)) { return contents; } final Matcher namespaceMatcher = NAMESPACE_PATTERN.matcher(contents); if (!namespaceMatcher.find()) { logger.warning("Could not find {} in {}", NAMESPACE_PATTERN.pattern(), contents); return contents; } final String namespace = namespaceMatcher.group("namespace"); final String replaced = contents .replaceAll(namespace + ":", "") .replace("xmlns:" + namespace + "=", "xmlns="); if (!CreateRuleBody.class.equals(clazz)) { return replaced; } final Matcher filterType = FILTER_ACTION_PATTERN.matcher(replaced); if (filterType.find()) { return filterType.replaceAll("<$1 xmlns:ns0=\"http: } else { logger.warning("Could not find filter name pattern '{}' in {}.", FILTER_ACTION_PATTERN.pattern(), contents); return replaced; } }
class ServiceBusManagementSerializer implements SerializerAdapter { private static final String MINIMUM_DATETIME_FORMATTED = ">0001-01-01T00:00:00Z</"; private static final Pattern MINIMUM_DATETIME_PATTERN = Pattern.compile(">0001-01-01T00:00:00</", Pattern.MULTILINE); private static final Pattern NAMESPACE_PATTERN = Pattern.compile( "xmlns:(?<namespace>\\w+)=\"http: Pattern.MULTILINE); private static final Pattern FILTER_ACTION_PATTERN = Pattern.compile("<(Filter|Action) type=", Pattern.MULTILINE); private final JacksonAdapter jacksonAdapter = new JacksonAdapter(); private final ClientLogger logger = new ClientLogger(ServiceBusManagementSerializer.class); @Override @Override public String serializeRaw(Object object) { return jacksonAdapter.serializeRaw(object); } @Override public String serializeList(List<?> list, CollectionFormat format) { return jacksonAdapter.serializeList(list, format); } public <T> T deserialize(String value, Type type) throws IOException { final Matcher matcher = MINIMUM_DATETIME_PATTERN.matcher(value); final String serializedString; if (matcher.find(0)) { logger.verbose("Found instances of '{}' to replace. Value: {}", MINIMUM_DATETIME_PATTERN.pattern(), value); serializedString = matcher.replaceAll(MINIMUM_DATETIME_FORMATTED); } else { serializedString = value; } return jacksonAdapter.deserialize(serializedString, type, SerializerEncoding.XML); } @Override @SuppressWarnings("unchecked") public <T> T deserialize(String value, Type type, SerializerEncoding encoding) throws IOException { if (encoding != SerializerEncoding.XML) { return jacksonAdapter.deserialize(value, type, encoding); } if (Object.class == type) { return (T) value; } else { return (T) deserialize(value, type); } } @Override public <T> T deserialize(HttpHeaders headers, Type type) throws IOException { return jacksonAdapter.deserialize(headers, type); } }
class ServiceBusManagementSerializer implements SerializerAdapter { private static final String MINIMUM_DATETIME_FORMATTED = ">0001-01-01T00:00:00Z</"; private static final Pattern MINIMUM_DATETIME_PATTERN = Pattern.compile(">0001-01-01T00:00:00</", Pattern.MULTILINE); private static final Pattern NAMESPACE_PATTERN = Pattern.compile( "xmlns:(?<namespace>\\w+)=\"http: Pattern.MULTILINE); private static final Pattern FILTER_ACTION_PATTERN = Pattern.compile("<(Filter|Action) type=", Pattern.MULTILINE); private final JacksonAdapter jacksonAdapter = new JacksonAdapter(); private final ClientLogger logger = new ClientLogger(ServiceBusManagementSerializer.class); @Override @Override public String serializeRaw(Object object) { return jacksonAdapter.serializeRaw(object); } @Override public String serializeList(List<?> list, CollectionFormat format) { return jacksonAdapter.serializeList(list, format); } public <T> T deserialize(String value, Type type) throws IOException { final Matcher matcher = MINIMUM_DATETIME_PATTERN.matcher(value); final String serializedString; if (matcher.find(0)) { logger.verbose("Found instances of '{}' to replace. Value: {}", MINIMUM_DATETIME_PATTERN.pattern(), value); serializedString = matcher.replaceAll(MINIMUM_DATETIME_FORMATTED); } else { serializedString = value; } return jacksonAdapter.deserialize(serializedString, type, SerializerEncoding.XML); } @Override @SuppressWarnings("unchecked") public <T> T deserialize(String value, Type type, SerializerEncoding encoding) throws IOException { if (encoding != SerializerEncoding.XML) { return jacksonAdapter.deserialize(value, type, encoding); } if (Object.class == type) { return (T) value; } else { return (T) deserialize(value, type); } } @Override public <T> T deserialize(HttpHeaders headers, Type type) throws IOException { return jacksonAdapter.deserialize(headers, type); } }
receiveMessages calls restartMessageReceiver, which will call receiveMessages again. Will this cause stackoverflow if the processor keeps running for long enough time?
public void onComplete() { logger.info("Completed receiving messages."); if (isRunning.get()) { restartMessageReceiver(); } }
restartMessageReceiver();
public void onComplete() { logger.info("Completed receiving messages."); if (isRunning.get()) { restartMessageReceiver(); } }
class ServiceBusProcessorClient { private static final int SCHEDULER_INTERVAL_IN_SECONDS = 10; private final ClientLogger logger = new ClientLogger(ServiceBusProcessorClient.class); private final ServiceBusClientBuilder.ServiceBusSessionReceiverClientBuilder sessionReceiverBuilder; private final ServiceBusClientBuilder.ServiceBusReceiverClientBuilder receiverBuilder; private final Consumer<ServiceBusProcessorMessageContext> processMessage; private final Consumer<Throwable> processError; private final ServiceBusProcessorClientOptions processorOptions; private final AtomicReference<Subscription> receiverSubscription = new AtomicReference<>(); private final AtomicReference<ServiceBusReceiverAsyncClient> asyncClient = new AtomicReference<>(); private final AtomicBoolean isRunning = new AtomicBoolean(); private ScheduledExecutorService scheduledExecutor; /** * Constructor to create a sessions-enabled processor. * * @param sessionReceiverBuilder The session processor builder to create new instances of async clients. * @param processMessage The message processing callback. * @param processError The error handler. * @param processorOptions Options to configure this instance of the processor. */ ServiceBusProcessorClient(ServiceBusClientBuilder.ServiceBusSessionReceiverClientBuilder sessionReceiverBuilder, Consumer<ServiceBusProcessorMessageContext> processMessage, Consumer<Throwable> processError, ServiceBusProcessorClientOptions processorOptions) { this.sessionReceiverBuilder = Objects.requireNonNull(sessionReceiverBuilder, "'sessionReceiverBuilder' cannot be null"); this.processMessage = Objects.requireNonNull(processMessage, "'processMessage' cannot be null"); this.processError = Objects.requireNonNull(processError, "'processError' cannot be null"); this.processorOptions = Objects.requireNonNull(processorOptions, "'processorOptions' cannot be null"); this.asyncClient.set(sessionReceiverBuilder.buildAsyncClientForProcessor()); this.receiverBuilder = null; } /** * Constructor to create a processor. * * @param receiverBuilder The processor builder to create new instances of async clients. * @param processMessage The message processing callback. * @param processError The error handler. * @param processorOptions Options to configure this instance of the processor. */ ServiceBusProcessorClient(ServiceBusClientBuilder.ServiceBusReceiverClientBuilder receiverBuilder, Consumer<ServiceBusProcessorMessageContext> processMessage, Consumer<Throwable> processError, ServiceBusProcessorClientOptions processorOptions) { this.receiverBuilder = Objects.requireNonNull(receiverBuilder, "'receiverBuilder' cannot be null"); this.processMessage = Objects.requireNonNull(processMessage, "'processMessage' cannot be null"); this.processError = Objects.requireNonNull(processError, "'processError' cannot be null"); this.processorOptions = Objects.requireNonNull(processorOptions, "'processorOptions' cannot be null"); this.asyncClient.set(receiverBuilder.buildAsyncClient()); this.sessionReceiverBuilder = null; } /** * Starts the processor in the background. When this method is called, the processor will initiate a message * receiver that will invoke the message handler when new messages are available. This method is idempotent i.e * calling {@link * after calling {@link * sessions. Calling {@link * a new set of sessions will be processed. */ public synchronized void start() { if (isRunning.getAndSet(true)) { logger.info("Processor is already running"); return; } receiveMessages(); this.scheduledExecutor = Executors.newSingleThreadScheduledExecutor(); scheduledExecutor.scheduleWithFixedDelay(() -> { if (this.asyncClient.get().isConnectionClosed()) { restartMessageReceiver(); } }, SCHEDULER_INTERVAL_IN_SECONDS, SCHEDULER_INTERVAL_IN_SECONDS, TimeUnit.SECONDS); } /** * Stops the message processing for this processor. The receiving links and sessions are kept active and this * processor can resume processing messages by calling {@link */ public synchronized void stop() { isRunning.set(false); } /** * Stops message processing and closes the processor. The receiving links and sessions are closed and calling * {@link */ public synchronized void close() { isRunning.set(false); if (receiverSubscription.get() != null) { receiverSubscription.get().cancel(); } asyncClient.get().close(); scheduledExecutor.shutdown(); } /** * Returns {@code true} if the processor is running. If the processor is stopped or closed, this method returns * {@code false}. * * @return {@code true} if the processor is running. */ public synchronized boolean isRunning() { return isRunning.get(); } private synchronized void receiveMessages() { if (receiverSubscription.get() != null) { receiverSubscription.get().request(1); return; } ServiceBusReceiverAsyncClient receiverClient = asyncClient.get(); receiverClient.receiveMessages() .parallel(processorOptions.getMaxConcurrentCalls()) .runOn(Schedulers.boundedElastic()) .subscribe(new Subscriber<ServiceBusReceivedMessageContext>() { @Override public void onSubscribe(Subscription subscription) { receiverSubscription.set(subscription); receiverSubscription.get().request(1); } @Override public void onNext(ServiceBusReceivedMessageContext serviceBusReceivedMessageContext) { if (serviceBusReceivedMessageContext.hasError()) { handleError(serviceBusReceivedMessageContext.getThrowable()); } else { try { ServiceBusProcessorMessageContext serviceBusProcessorMessageContext = new ServiceBusProcessorMessageContext(receiverClient, serviceBusReceivedMessageContext); processMessage.accept(serviceBusProcessorMessageContext); } catch (Exception exception) { handleError(exception); } } if (isRunning.get()) { logger.verbose("Requesting 1 more message from upstream"); receiverSubscription.get().request(1); } } @Override public void onError(Throwable throwable) { logger.info("Error receiving messages.", throwable); handleError(throwable); if (isRunning.get()) { restartMessageReceiver(); } } @Override }); } private void handleError(Throwable throwable) { try { processError.accept(throwable); } catch (Exception ex) { logger.verbose("Error from error handler. Ignoring error.", ex); } } private void restartMessageReceiver() { receiverSubscription.set(null); ServiceBusReceiverAsyncClient receiverClient = asyncClient.get(); receiverClient.close(); ServiceBusReceiverAsyncClient newReceiverClient = this.receiverBuilder == null ? this.sessionReceiverBuilder.buildAsyncClientForProcessor() : this.receiverBuilder.buildAsyncClient(); asyncClient.set(newReceiverClient); receiveMessages(); } }
class ServiceBusProcessorClient implements AutoCloseable { private static final int SCHEDULER_INTERVAL_IN_SECONDS = 10; private final ClientLogger logger = new ClientLogger(ServiceBusProcessorClient.class); private final ServiceBusClientBuilder.ServiceBusSessionReceiverClientBuilder sessionReceiverBuilder; private final ServiceBusClientBuilder.ServiceBusReceiverClientBuilder receiverBuilder; private final Consumer<ServiceBusProcessorMessageContext> processMessage; private final Consumer<Throwable> processError; private final ServiceBusProcessorClientOptions processorOptions; private final AtomicReference<Subscription> receiverSubscription = new AtomicReference<>(); private final AtomicReference<ServiceBusReceiverAsyncClient> asyncClient = new AtomicReference<>(); private final AtomicBoolean isRunning = new AtomicBoolean(); private ScheduledExecutorService scheduledExecutor; /** * Constructor to create a sessions-enabled processor. * * @param sessionReceiverBuilder The session processor builder to create new instances of async clients. * @param processMessage The message processing callback. * @param processError The error handler. * @param processorOptions Options to configure this instance of the processor. */ ServiceBusProcessorClient(ServiceBusClientBuilder.ServiceBusSessionReceiverClientBuilder sessionReceiverBuilder, Consumer<ServiceBusProcessorMessageContext> processMessage, Consumer<Throwable> processError, ServiceBusProcessorClientOptions processorOptions) { this.sessionReceiverBuilder = Objects.requireNonNull(sessionReceiverBuilder, "'sessionReceiverBuilder' cannot be null"); this.processMessage = Objects.requireNonNull(processMessage, "'processMessage' cannot be null"); this.processError = Objects.requireNonNull(processError, "'processError' cannot be null"); this.processorOptions = Objects.requireNonNull(processorOptions, "'processorOptions' cannot be null"); this.asyncClient.set(sessionReceiverBuilder.buildAsyncClientForProcessor()); this.receiverBuilder = null; } /** * Constructor to create a processor. * * @param receiverBuilder The processor builder to create new instances of async clients. * @param processMessage The message processing callback. * @param processError The error handler. * @param processorOptions Options to configure this instance of the processor. */ ServiceBusProcessorClient(ServiceBusClientBuilder.ServiceBusReceiverClientBuilder receiverBuilder, Consumer<ServiceBusProcessorMessageContext> processMessage, Consumer<Throwable> processError, ServiceBusProcessorClientOptions processorOptions) { this.receiverBuilder = Objects.requireNonNull(receiverBuilder, "'receiverBuilder' cannot be null"); this.processMessage = Objects.requireNonNull(processMessage, "'processMessage' cannot be null"); this.processError = Objects.requireNonNull(processError, "'processError' cannot be null"); this.processorOptions = Objects.requireNonNull(processorOptions, "'processorOptions' cannot be null"); this.asyncClient.set(receiverBuilder.buildAsyncClient()); this.sessionReceiverBuilder = null; } /** * Starts the processor in the background. When this method is called, the processor will initiate a message * receiver that will invoke the message handler when new messages are available. This method is idempotent i.e * calling {@link * after calling {@link * sessions. Calling {@link * a new set of sessions will be processed. */ public synchronized void start() { if (isRunning.getAndSet(true)) { logger.info("Processor is already running"); return; } receiveMessages(); this.scheduledExecutor = Executors.newSingleThreadScheduledExecutor(); scheduledExecutor.scheduleWithFixedDelay(() -> { if (this.asyncClient.get().isConnectionClosed()) { restartMessageReceiver(); } }, SCHEDULER_INTERVAL_IN_SECONDS, SCHEDULER_INTERVAL_IN_SECONDS, TimeUnit.SECONDS); } /** * Stops the message processing for this processor. The receiving links and sessions are kept active and this * processor can resume processing messages by calling {@link */ public synchronized void stop() { isRunning.set(false); } /** * Stops message processing and closes the processor. The receiving links and sessions are closed and calling * {@link */ @Override public synchronized void close() { isRunning.set(false); if (receiverSubscription.get() != null) { receiverSubscription.get().cancel(); } asyncClient.get().close(); scheduledExecutor.shutdown(); } /** * Returns {@code true} if the processor is running. If the processor is stopped or closed, this method returns * {@code false}. * * @return {@code true} if the processor is running. */ public synchronized boolean isRunning() { return isRunning.get(); } private synchronized void receiveMessages() { if (receiverSubscription.get() != null) { receiverSubscription.get().request(1); return; } ServiceBusReceiverAsyncClient receiverClient = asyncClient.get(); receiverClient.receiveMessages() .parallel(processorOptions.getMaxConcurrentCalls()) .runOn(Schedulers.boundedElastic()) .subscribe(new Subscriber<ServiceBusReceivedMessageContext>() { @Override public void onSubscribe(Subscription subscription) { receiverSubscription.set(subscription); receiverSubscription.get().request(1); } @Override public void onNext(ServiceBusReceivedMessageContext serviceBusReceivedMessageContext) { if (serviceBusReceivedMessageContext.hasError()) { handleError(serviceBusReceivedMessageContext.getThrowable()); } else { try { ServiceBusProcessorMessageContext serviceBusProcessorMessageContext = new ServiceBusProcessorMessageContext(receiverClient, serviceBusReceivedMessageContext); processMessage.accept(serviceBusProcessorMessageContext); } catch (Exception ex) { handleError(ex); logger.warning("Error when processing message. Abandoning message.", ex); abandonMessage(serviceBusReceivedMessageContext, receiverClient); } } if (isRunning.get()) { logger.verbose("Requesting 1 more message from upstream"); receiverSubscription.get().request(1); } } @Override public void onError(Throwable throwable) { logger.info("Error receiving messages.", throwable); handleError(throwable); if (isRunning.get()) { restartMessageReceiver(); } } @Override }); } private void abandonMessage(ServiceBusReceivedMessageContext serviceBusReceivedMessageContext, ServiceBusReceiverAsyncClient receiverClient) { try { receiverClient.abandon(serviceBusReceivedMessageContext.getMessage()).block(); } catch (Exception exception) { logger.verbose("Failed to abandon message", exception); } } private void handleError(Throwable throwable) { try { processError.accept(throwable); } catch (Exception ex) { logger.verbose("Error from error handler. Ignoring error.", ex); } } private void restartMessageReceiver() { receiverSubscription.set(null); ServiceBusReceiverAsyncClient receiverClient = asyncClient.get(); receiverClient.close(); ServiceBusReceiverAsyncClient newReceiverClient = this.receiverBuilder == null ? this.sessionReceiverBuilder.buildAsyncClientForProcessor() : this.receiverBuilder.buildAsyncClient(); asyncClient.set(newReceiverClient); receiveMessages(); } }
This should be false
public Mono<Response<BlockBlobItem>> uploadWithResponse(BlobParallelUploadOptions options) { try { StorageImplUtils.assertNotNull("options", options); final Map<String, String> metadataFinal = options.getMetadata() == null ? new HashMap<>() : options.getMetadata(); Flux<ByteBuffer> data = options.getDataFlux() == null ? Utility.convertStreamToByteBuffer( options.getDataStream(), options.getLength(), BLOB_DEFAULT_UPLOAD_BLOCK_SIZE, true) : options.getDataFlux(); Mono<Flux<ByteBuffer>> dataFinal = prepareToSendEncryptedRequest(data, metadataFinal); return dataFinal.flatMap(df -> super.uploadWithResponse(new BlobParallelUploadOptions(df) .setParallelTransferOptions(options.getParallelTransferOptions()).setHeaders(options.getHeaders()) .setMetadata(metadataFinal).setTags(options.getTags()).setTier(options.getTier()) .setRequestConditions(options.getRequestConditions()) .setComputeMd5(options.isComputeMd5()))); } catch (RuntimeException ex) { return monoError(logger, ex); } }
options.getDataStream(), options.getLength(), BLOB_DEFAULT_UPLOAD_BLOCK_SIZE, true)
new BlobParallelUploadOptions(df) .setParallelTransferOptions(options.getParallelTransferOptions()).setHeaders(options.getHeaders()) .setMetadata(metadataFinal).setTags(options.getTags()).setTier(options.getTier()) .setRequestConditions(options.getRequestConditions()) .setComputeMd5(options.isComputeMd5()))); } catch (RuntimeException ex) { return monoError(logger, ex); }
class EncryptedBlobAsyncClient extends BlobAsyncClient { static final int BLOB_DEFAULT_UPLOAD_BLOCK_SIZE = 4 * Constants.MB; private static final long BLOB_MAX_UPLOAD_BLOCK_SIZE = 4000L * Constants.MB; private final ClientLogger logger = new ClientLogger(EncryptedBlobAsyncClient.class); /** * An object of type {@link AsyncKeyEncryptionKey} that is used to wrap/unwrap the content key during encryption. */ private final AsyncKeyEncryptionKey keyWrapper; /** * A {@link String} that is used to wrap/unwrap the content key during encryption. */ private final String keyWrapAlgorithm; /** * Package-private constructor for use by {@link EncryptedBlobClientBuilder}. * * @param pipeline The pipeline used to send and receive service requests. * @param url The endpoint where to send service requests. * @param serviceVersion The version of the service to receive requests. * @param accountName The storage account name. * @param containerName The container name. * @param blobName The blob name. * @param snapshot The snapshot identifier for the blob, pass {@code null} to interact with the blob directly. * @param customerProvidedKey Customer provided key used during encryption of the blob's data on the server, pass * {@code null} to allow the service to use its own encryption. * @param key The key used to encrypt and decrypt data. * @param keyWrapAlgorithm The algorithm used to wrap/unwrap the key during encryption. * @param versionId The version identifier for the blob, pass {@code null} to interact with the latest blob version. */ EncryptedBlobAsyncClient(HttpPipeline pipeline, String url, BlobServiceVersion serviceVersion, String accountName, String containerName, String blobName, String snapshot, CpkInfo customerProvidedKey, AsyncKeyEncryptionKey key, String keyWrapAlgorithm, String versionId) { super(pipeline, url, serviceVersion, accountName, containerName, blobName, snapshot, customerProvidedKey, null, versionId); this.keyWrapper = key; this.keyWrapAlgorithm = keyWrapAlgorithm; } /** * Creates a new block blob. By default this method will not overwrite an existing blob. * <p> * Updating an existing block blob overwrites any existing metadata on the blob. Partial updates are not supported * with this method; the content of the existing blob is overwritten with the new content. To perform a partial * update of block blob's, use {@link BlockBlobAsyncClient * BlockBlobAsyncClient * <a href="https: * <a href="https: * <p> * The data passed need not support multiple subscriptions/be replayable as is required in other upload methods when * retries are enabled, and the length of the data need not be known in advance. Therefore, this method should * support uploading any arbitrary data source, including network streams. This behavior is possible because this * method will perform some internal buffering as configured by the blockSize and numBuffers parameters, so while * this method may offer additional convenience, it will not be as performant as other options, which should be * preferred when possible. * <p> * Typically, the greater the number of buffers used, the greater the possible parallelism when transferring the * data. Larger buffers means we will have to stage fewer blocks and therefore require fewer IO operations. The * trade-offs between these values are context-dependent, so some experimentation may be required to optimize inputs * for a given scenario. * * <p><strong>Code Samples</strong></p> * * {@codesnippet com.azure.storage.blob.specialized.cryptography.EncryptedBlobAsyncClient.upload * * @param data The data to write to the blob. Unlike other upload methods, this method does not require that the * {@code Flux} be replayable. In other words, it does not have to support multiple subscribers and is not expected * to produce the same values across subscriptions. * @param parallelTransferOptions {@link ParallelTransferOptions} used to configure buffered uploading. * @return A reactive response containing the information of the uploaded block blob. */ @Override public Mono<BlockBlobItem> upload(Flux<ByteBuffer> data, ParallelTransferOptions parallelTransferOptions) { try { return this.upload(data, parallelTransferOptions, false); } catch (RuntimeException ex) { return monoError(logger, ex); } } /** * Creates a new block blob, or updates the content of an existing block blob. * <p> * Updating an existing block blob overwrites any existing metadata on the blob. Partial updates are not supported * with this method; the content of the existing blob is overwritten with the new content. To perform a partial * update of block blob's, use {@link BlockBlobAsyncClient * BlockBlobAsyncClient * <a href="https: * <a href="https: * <p> * The data passed need not support multiple subscriptions/be replayable as is required in other upload methods when * retries are enabled, and the length of the data need not be known in advance. Therefore, this method should * support uploading any arbitrary data source, including network streams. This behavior is possible because this * method will perform some internal buffering as configured by the blockSize and numBuffers parameters, so while * this method may offer additional convenience, it will not be as performant as other options, which should be * preferred when possible. * <p> * Typically, the greater the number of buffers used, the greater the possible parallelism when transferring the * data. Larger buffers means we will have to stage fewer blocks and therefore require fewer IO operations. The * trade-offs between these values are context-dependent, so some experimentation may be required to optimize inputs * for a given scenario. * * <p><strong>Code Samples</strong></p> * * {@codesnippet com.azure.storage.blob.specialized.cryptography.EncryptedBlobAsyncClient.upload * * @param data The data to write to the blob. Unlike other upload methods, this method does not require that the * {@code Flux} be replayable. In other words, it does not have to support multiple subscribers and is not expected * to produce the same values across subscriptions. * @param parallelTransferOptions {@link ParallelTransferOptions} used to configure buffered uploading. * @param overwrite Whether or not to overwrite, should data exist on the blob. * @return A reactive response containing the information of the uploaded block blob. */ @Override public Mono<BlockBlobItem> upload(Flux<ByteBuffer> data, ParallelTransferOptions parallelTransferOptions, boolean overwrite) { try { Mono<BlockBlobItem> uploadTask = this.uploadWithResponse(data, parallelTransferOptions, null, null, null, null).flatMap(FluxUtil::toMono); if (overwrite) { return uploadTask; } else { return exists() .flatMap(exists -> exists ? monoError(logger, new IllegalArgumentException(Constants.BLOB_ALREADY_EXISTS)) : uploadTask); } } catch (RuntimeException ex) { return monoError(logger, ex); } } /** * Creates a new block blob, or updates the content of an existing block blob. Updating an existing block blob * overwrites any existing metadata on the blob. Partial updates are not supported with this method; the content of * the existing blob is overwritten with the new content. To perform a partial update of a block blob's, use {@link * BlockBlobAsyncClient * {@link BlockBlobAsyncClient * see the <a href="https: * Docs for Put Block</a> and the <a href="https: * Docs for Put Block List</a>. * <p> * The data passed need not support multiple subscriptions/be replayable as is required in other upload methods when * retries are enabled, and the length of the data need not be known in advance. Therefore, this method should * support uploading any arbitrary data source, including network streams. This behavior is possible because this * method will perform some internal buffering as configured by the blockSize and numBuffers parameters, so while * this method may offer additional convenience, it will not be as performant as other options, which should be * preferred when possible. * <p> * Typically, the greater the number of buffers used, the greater the possible parallelism when transferring the * data. Larger buffers means we will have to stage fewer blocks and therefore require fewer IO operations. The * trade-offs between these values are context-dependent, so some experimentation may be required to optimize inputs * for a given scenario. * * <p><strong>Code Samples</strong></p> * * {@codesnippet com.azure.storage.blob.specialized.cryptography.EncryptedBlobAsyncClient.uploadWithResponse * * @param data The data to write to the blob. Unlike other upload methods, this method does not require that the * {@code Flux} be replayable. In other words, it does not have to support multiple subscribers and is not expected * to produce the same values across subscriptions. * @param parallelTransferOptions {@link ParallelTransferOptions} used to configure buffered uploading. * @param headers {@link BlobHttpHeaders} * @param metadata Metadata to associate with the blob. * @param tier {@link AccessTier} for the destination blob. * @param requestConditions {@link BlobRequestConditions} * @return A reactive response containing the information of the uploaded block blob. */ @Override public Mono<Response<BlockBlobItem>> uploadWithResponse(Flux<ByteBuffer> data, ParallelTransferOptions parallelTransferOptions, BlobHttpHeaders headers, Map<String, String> metadata, AccessTier tier, BlobRequestConditions requestConditions) { return this.uploadWithResponse(new BlobParallelUploadOptions(data) .setParallelTransferOptions(parallelTransferOptions).setHeaders(headers).setMetadata(metadata) .setTier(tier).setRequestConditions(requestConditions)); } /** * Creates a new block blob, or updates the content of an existing block blob. Updating an existing block blob * overwrites any existing metadata on the blob. Partial updates are not supported with this method; the content of * the existing blob is overwritten with the new content. To perform a partial update of a block blob's, use {@link * BlockBlobAsyncClient * {@link BlockBlobAsyncClient * see the <a href="https: * Docs for Put Block</a> and the <a href="https: * Docs for Put Block List</a>. * <p> * The data passed need not support multiple subscriptions/be replayable as is required in other upload methods when * retries are enabled, and the length of the data need not be known in advance. Therefore, this method should * support uploading any arbitrary data source, including network streams. This behavior is possible because this * method will perform some internal buffering as configured by the blockSize and numBuffers parameters, so while * this method may offer additional convenience, it will not be as performant as other options, which should be * preferred when possible. * <p> * Typically, the greater the number of buffers used, the greater the possible parallelism when transferring the * data. Larger buffers means we will have to stage fewer blocks and therefore require fewer IO operations. The * trade-offs between these values are context-dependent, so some experimentation may be required to optimize inputs * for a given scenario. * * <p><strong>Code Samples</strong></p> * * {@codesnippet com.azure.storage.blob.specialized.cryptography.EncryptedBlobAsyncClient.uploadWithResponse * * {@code Flux} be replayable. In other words, it does not have to support multiple subscribers and is not expected * to produce the same values across subscriptions. * @param options {@link BlobParallelUploadOptions} * @return A reactive response containing the information of the uploaded block blob. */ @Override public Mono<Response<BlockBlobItem>> uploadWithResponse(BlobParallelUploadOptions options) { try { StorageImplUtils.assertNotNull("options", options); final Map<String, String> metadataFinal = options.getMetadata() == null ? new HashMap<>() : options.getMetadata(); Flux<ByteBuffer> data = options.getDataFlux() == null ? Utility.convertStreamToByteBuffer( options.getDataStream(), options.getLength(), BLOB_DEFAULT_UPLOAD_BLOCK_SIZE, true) : options.getDataFlux(); Mono<Flux<ByteBuffer>> dataFinal = prepareToSendEncryptedRequest(data, metadataFinal); return dataFinal.flatMap(df -> super.uploadWithResponse( } /** * Creates a new block blob with the content of the specified file. By default this method will not overwrite * existing data * * <p><strong>Code Samples</strong></p> * * {@codesnippet com.azure.storage.blob.specialized.cryptography.EncryptedBlobAsyncClient.uploadFromFile * * @param filePath Path to the upload file * @return An empty response */ @Override public Mono<Void> uploadFromFile(String filePath) { try { return uploadFromFile(filePath, false); } catch (RuntimeException ex) { return monoError(logger, ex); } } /** * Creates a new block blob, or updates the content of an existing block blob, with the content of the specified * file. * * <p><strong>Code Samples</strong></p> * * {@codesnippet com.azure.storage.blob.specialized.cryptography.EncryptedBlobAsyncClient.uploadFromFile * * @param filePath Path to the upload file * @param overwrite Whether or not to overwrite should data exist on the blob. * @return An empty response */ @Override public Mono<Void> uploadFromFile(String filePath, boolean overwrite) { try { Mono<Void> uploadTask = uploadFromFile(filePath, null, null, null, null, null); if (overwrite) { return uploadTask; } else { return exists() .flatMap(exists -> exists ? monoError(logger, new IllegalArgumentException(Constants.BLOB_ALREADY_EXISTS)) : uploadTask); } } catch (RuntimeException ex) { return monoError(logger, ex); } } /** * Creates a new block blob, or updates the content of an existing block blob, with the content of the specified * file. * * <p><strong>Code Samples</strong></p> * * {@codesnippet com.azure.storage.blob.specialized.cryptography.EncryptedBlobAsyncClient.uploadFromFile * * @param filePath Path to the upload file * @param parallelTransferOptions {@link ParallelTransferOptions} to use to upload from file. * @param headers {@link BlobHttpHeaders} * @param metadata Metadata to associate with the blob. * @param tier {@link AccessTier} for the destination blob. * @param requestConditions {@link BlobRequestConditions} * @return An empty response * @throws IllegalArgumentException If {@code blockSize} is less than 0 or greater than 4000MB * @throws UncheckedIOException If an I/O error occurs */ @Override public Mono<Void> uploadFromFile(String filePath, ParallelTransferOptions parallelTransferOptions, BlobHttpHeaders headers, Map<String, String> metadata, AccessTier tier, BlobRequestConditions requestConditions) { return this.uploadFromFileWithResponse(new BlobUploadFromFileOptions(filePath) .setParallelTransferOptions(parallelTransferOptions).setHeaders(headers).setMetadata(metadata) .setTier(tier).setRequestConditions(requestConditions)) .then(); } /** * Creates a new block blob, or updates the content of an existing block blob, with the content of the specified * file. * * <p><strong>Code Samples</strong></p> * * {@codesnippet com.azure.storage.blob.specialized.cryptography.EncryptedBlobAsyncClient.uploadFromFileWithResponse * * @param options {@link BlobUploadFromFileOptions} * @return A reactive response containing the information of the uploaded block blob. * @throws IllegalArgumentException If {@code blockSize} is less than 0 or greater than 100MB * @throws UncheckedIOException If an I/O error occurs */ @Override public Mono<Response<BlockBlobItem>> uploadFromFileWithResponse(BlobUploadFromFileOptions options) { try { StorageImplUtils.assertNotNull("options", options); return Mono.using(() -> UploadUtils.uploadFileResourceSupplier(options.getFilePath(), logger), channel -> this.uploadWithResponse(new BlobParallelUploadOptions(FluxUtil.readFile(channel)) .setParallelTransferOptions(options.getParallelTransferOptions()).setHeaders(options.getHeaders()) .setMetadata(options.getMetadata()).setTags(options.getTags()).setTier(options.getTier()) .setRequestConditions(options.getRequestConditions())) .doOnTerminate(() -> { try { channel.close(); } catch (IOException e) { throw logger.logExceptionAsError(new UncheckedIOException(e)); } }), channel -> UploadUtils.uploadFileCleanup(channel, logger)); } catch (RuntimeException ex) { return monoError(logger, ex); } } /** * Encrypts the given Flux ByteBuffer. * * @param plainTextFlux The Flux ByteBuffer to be encrypted. * * @return A {@link EncryptedBlob} * * @throws InvalidKeyException If the key provided is invalid */ Mono<EncryptedBlob> encryptBlob(Flux<ByteBuffer> plainTextFlux) throws InvalidKeyException { Objects.requireNonNull(this.keyWrapper, "keyWrapper cannot be null"); try { KeyGenerator keyGen = KeyGenerator.getInstance(CryptographyConstants.AES); keyGen.init(256); Cipher cipher = Cipher.getInstance(CryptographyConstants.AES_CBC_PKCS5PADDING); SecretKey aesKey = keyGen.generateKey(); cipher.init(Cipher.ENCRYPT_MODE, aesKey); Map<String, String> keyWrappingMetadata = new HashMap<>(); keyWrappingMetadata.put(CryptographyConstants.AGENT_METADATA_KEY, CryptographyConstants.AGENT_METADATA_VALUE); return this.keyWrapper.getKeyId().flatMap(keyId -> this.keyWrapper.wrapKey(keyWrapAlgorithm, aesKey.getEncoded()) .map(encryptedKey -> { WrappedKey wrappedKey = new WrappedKey(keyId, encryptedKey, keyWrapAlgorithm); EncryptionData encryptionData = new EncryptionData() .setEncryptionMode(CryptographyConstants.ENCRYPTION_MODE) .setEncryptionAgent( new EncryptionAgent(CryptographyConstants.ENCRYPTION_PROTOCOL_V1, EncryptionAlgorithm.AES_CBC_256)) .setKeyWrappingMetadata(keyWrappingMetadata) .setContentEncryptionIV(cipher.getIV()) .setWrappedContentKey(wrappedKey); Flux<ByteBuffer> encryptedTextFlux = plainTextFlux.map(plainTextBuffer -> { int outputSize = cipher.getOutputSize(plainTextBuffer.remaining()); /* This should be the only place we allocate memory in encryptBlob(). Although there is an overload that can encrypt in place that would save allocations, we do not want to overwrite customer's memory, so we must allocate our own memory. If memory usage becomes unreasonable, we should implement pooling. */ ByteBuffer encryptedTextBuffer = ByteBuffer.allocate(outputSize); int encryptedBytes; try { encryptedBytes = cipher.update(plainTextBuffer, encryptedTextBuffer); } catch (ShortBufferException e) { throw logger.logExceptionAsError(Exceptions.propagate(e)); } encryptedTextBuffer.position(0); encryptedTextBuffer.limit(encryptedBytes); return encryptedTextBuffer; }); /* Defer() ensures the contained code is not executed until the Flux is subscribed to, in other words, cipher.doFinal() will not be called until the plainTextFlux has completed and therefore all other data has been encrypted. */ encryptedTextFlux = Flux.concat(encryptedTextFlux, Flux.defer(() -> { try { return Flux.just(ByteBuffer.wrap(cipher.doFinal())); } catch (GeneralSecurityException e) { throw logger.logExceptionAsError(Exceptions.propagate(e)); } })); return new EncryptedBlob(encryptionData, encryptedTextFlux); })); } catch (NoSuchAlgorithmException | NoSuchPaddingException e) { throw logger.logExceptionAsError(new RuntimeException(e)); } } /** * Encrypt the blob and add the encryption metadata to the customer's metadata. * * @param plainText The data to encrypt * @param metadata The customer's metadata to be updated. * * @return A Mono containing the cipher text */ private Mono<Flux<ByteBuffer>> prepareToSendEncryptedRequest(Flux<ByteBuffer> plainText, Map<String, String> metadata) { try { return this.encryptBlob(plainText) .flatMap(encryptedBlob -> { try { metadata.put(CryptographyConstants.ENCRYPTION_DATA_KEY, encryptedBlob.getEncryptionData().toJsonString()); return Mono.just(encryptedBlob.getCiphertextFlux()); } catch (JsonProcessingException e) { throw logger.logExceptionAsError(Exceptions.propagate(e)); } }); } catch (InvalidKeyException e) { throw logger.logExceptionAsError(Exceptions.propagate(e)); } } /** * Unsupported. Cannot query data encrypted on client side. */ @Override public Flux<ByteBuffer> query(String expression) { throw logger.logExceptionAsError(new UnsupportedOperationException( "Cannot query data encrypted on client side")); } /** * Unsupported. Cannot query data encrypted on client side. */ @Override public Mono<BlobQueryAsyncResponse> queryWithResponse(BlobQueryOptions queryOptions) { throw logger.logExceptionAsError(new UnsupportedOperationException( "Cannot query data encrypted on client side")); } }
class EncryptedBlobAsyncClient extends BlobAsyncClient { static final int BLOB_DEFAULT_UPLOAD_BLOCK_SIZE = 4 * Constants.MB; private static final long BLOB_MAX_UPLOAD_BLOCK_SIZE = 4000L * Constants.MB; private final ClientLogger logger = new ClientLogger(EncryptedBlobAsyncClient.class); /** * An object of type {@link AsyncKeyEncryptionKey} that is used to wrap/unwrap the content key during encryption. */ private final AsyncKeyEncryptionKey keyWrapper; /** * A {@link String} that is used to wrap/unwrap the content key during encryption. */ private final String keyWrapAlgorithm; /** * Package-private constructor for use by {@link EncryptedBlobClientBuilder}. * * @param pipeline The pipeline used to send and receive service requests. * @param url The endpoint where to send service requests. * @param serviceVersion The version of the service to receive requests. * @param accountName The storage account name. * @param containerName The container name. * @param blobName The blob name. * @param snapshot The snapshot identifier for the blob, pass {@code null} to interact with the blob directly. * @param customerProvidedKey Customer provided key used during encryption of the blob's data on the server, pass * {@code null} to allow the service to use its own encryption. * @param key The key used to encrypt and decrypt data. * @param keyWrapAlgorithm The algorithm used to wrap/unwrap the key during encryption. * @param versionId The version identifier for the blob, pass {@code null} to interact with the latest blob version. */ EncryptedBlobAsyncClient(HttpPipeline pipeline, String url, BlobServiceVersion serviceVersion, String accountName, String containerName, String blobName, String snapshot, CpkInfo customerProvidedKey, AsyncKeyEncryptionKey key, String keyWrapAlgorithm, String versionId) { super(pipeline, url, serviceVersion, accountName, containerName, blobName, snapshot, customerProvidedKey, null, versionId); this.keyWrapper = key; this.keyWrapAlgorithm = keyWrapAlgorithm; } /** * Creates a new block blob. By default this method will not overwrite an existing blob. * <p> * Updating an existing block blob overwrites any existing metadata on the blob. Partial updates are not supported * with this method; the content of the existing blob is overwritten with the new content. To perform a partial * update of block blob's, use {@link BlockBlobAsyncClient * BlockBlobAsyncClient * <a href="https: * <a href="https: * <p> * The data passed need not support multiple subscriptions/be replayable as is required in other upload methods when * retries are enabled, and the length of the data need not be known in advance. Therefore, this method should * support uploading any arbitrary data source, including network streams. This behavior is possible because this * method will perform some internal buffering as configured by the blockSize and numBuffers parameters, so while * this method may offer additional convenience, it will not be as performant as other options, which should be * preferred when possible. * <p> * Typically, the greater the number of buffers used, the greater the possible parallelism when transferring the * data. Larger buffers means we will have to stage fewer blocks and therefore require fewer IO operations. The * trade-offs between these values are context-dependent, so some experimentation may be required to optimize inputs * for a given scenario. * * <p><strong>Code Samples</strong></p> * * {@codesnippet com.azure.storage.blob.specialized.cryptography.EncryptedBlobAsyncClient.upload * * @param data The data to write to the blob. Unlike other upload methods, this method does not require that the * {@code Flux} be replayable. In other words, it does not have to support multiple subscribers and is not expected * to produce the same values across subscriptions. * @param parallelTransferOptions {@link ParallelTransferOptions} used to configure buffered uploading. * @return A reactive response containing the information of the uploaded block blob. */ @Override public Mono<BlockBlobItem> upload(Flux<ByteBuffer> data, ParallelTransferOptions parallelTransferOptions) { try { return this.upload(data, parallelTransferOptions, false); } catch (RuntimeException ex) { return monoError(logger, ex); } } /** * Creates a new block blob, or updates the content of an existing block blob. * <p> * Updating an existing block blob overwrites any existing metadata on the blob. Partial updates are not supported * with this method; the content of the existing blob is overwritten with the new content. To perform a partial * update of block blob's, use {@link BlockBlobAsyncClient * BlockBlobAsyncClient * <a href="https: * <a href="https: * <p> * The data passed need not support multiple subscriptions/be replayable as is required in other upload methods when * retries are enabled, and the length of the data need not be known in advance. Therefore, this method should * support uploading any arbitrary data source, including network streams. This behavior is possible because this * method will perform some internal buffering as configured by the blockSize and numBuffers parameters, so while * this method may offer additional convenience, it will not be as performant as other options, which should be * preferred when possible. * <p> * Typically, the greater the number of buffers used, the greater the possible parallelism when transferring the * data. Larger buffers means we will have to stage fewer blocks and therefore require fewer IO operations. The * trade-offs between these values are context-dependent, so some experimentation may be required to optimize inputs * for a given scenario. * * <p><strong>Code Samples</strong></p> * * {@codesnippet com.azure.storage.blob.specialized.cryptography.EncryptedBlobAsyncClient.upload * * @param data The data to write to the blob. Unlike other upload methods, this method does not require that the * {@code Flux} be replayable. In other words, it does not have to support multiple subscribers and is not expected * to produce the same values across subscriptions. * @param parallelTransferOptions {@link ParallelTransferOptions} used to configure buffered uploading. * @param overwrite Whether or not to overwrite, should data exist on the blob. * @return A reactive response containing the information of the uploaded block blob. */ @Override public Mono<BlockBlobItem> upload(Flux<ByteBuffer> data, ParallelTransferOptions parallelTransferOptions, boolean overwrite) { try { Mono<BlockBlobItem> uploadTask = this.uploadWithResponse(data, parallelTransferOptions, null, null, null, null).flatMap(FluxUtil::toMono); if (overwrite) { return uploadTask; } else { return exists() .flatMap(exists -> exists ? monoError(logger, new IllegalArgumentException(Constants.BLOB_ALREADY_EXISTS)) : uploadTask); } } catch (RuntimeException ex) { return monoError(logger, ex); } } /** * Creates a new block blob, or updates the content of an existing block blob. Updating an existing block blob * overwrites any existing metadata on the blob. Partial updates are not supported with this method; the content of * the existing blob is overwritten with the new content. To perform a partial update of a block blob's, use {@link * BlockBlobAsyncClient * {@link BlockBlobAsyncClient * see the <a href="https: * Docs for Put Block</a> and the <a href="https: * Docs for Put Block List</a>. * <p> * The data passed need not support multiple subscriptions/be replayable as is required in other upload methods when * retries are enabled, and the length of the data need not be known in advance. Therefore, this method should * support uploading any arbitrary data source, including network streams. This behavior is possible because this * method will perform some internal buffering as configured by the blockSize and numBuffers parameters, so while * this method may offer additional convenience, it will not be as performant as other options, which should be * preferred when possible. * <p> * Typically, the greater the number of buffers used, the greater the possible parallelism when transferring the * data. Larger buffers means we will have to stage fewer blocks and therefore require fewer IO operations. The * trade-offs between these values are context-dependent, so some experimentation may be required to optimize inputs * for a given scenario. * * <p><strong>Code Samples</strong></p> * * {@codesnippet com.azure.storage.blob.specialized.cryptography.EncryptedBlobAsyncClient.uploadWithResponse * * @param data The data to write to the blob. Unlike other upload methods, this method does not require that the * {@code Flux} be replayable. In other words, it does not have to support multiple subscribers and is not expected * to produce the same values across subscriptions. * @param parallelTransferOptions {@link ParallelTransferOptions} used to configure buffered uploading. * @param headers {@link BlobHttpHeaders} * @param metadata Metadata to associate with the blob. * @param tier {@link AccessTier} for the destination blob. * @param requestConditions {@link BlobRequestConditions} * @return A reactive response containing the information of the uploaded block blob. */ @Override public Mono<Response<BlockBlobItem>> uploadWithResponse(Flux<ByteBuffer> data, ParallelTransferOptions parallelTransferOptions, BlobHttpHeaders headers, Map<String, String> metadata, AccessTier tier, BlobRequestConditions requestConditions) { return this.uploadWithResponse(new BlobParallelUploadOptions(data) .setParallelTransferOptions(parallelTransferOptions).setHeaders(headers).setMetadata(metadata) .setTier(tier).setRequestConditions(requestConditions)); } /** * Creates a new block blob, or updates the content of an existing block blob. Updating an existing block blob * overwrites any existing metadata on the blob. Partial updates are not supported with this method; the content of * the existing blob is overwritten with the new content. To perform a partial update of a block blob's, use {@link * BlockBlobAsyncClient * {@link BlockBlobAsyncClient * see the <a href="https: * Docs for Put Block</a> and the <a href="https: * Docs for Put Block List</a>. * <p> * The data passed need not support multiple subscriptions/be replayable as is required in other upload methods when * retries are enabled, and the length of the data need not be known in advance. Therefore, this method should * support uploading any arbitrary data source, including network streams. This behavior is possible because this * method will perform some internal buffering as configured by the blockSize and numBuffers parameters, so while * this method may offer additional convenience, it will not be as performant as other options, which should be * preferred when possible. * <p> * Typically, the greater the number of buffers used, the greater the possible parallelism when transferring the * data. Larger buffers means we will have to stage fewer blocks and therefore require fewer IO operations. The * trade-offs between these values are context-dependent, so some experimentation may be required to optimize inputs * for a given scenario. * * <p><strong>Code Samples</strong></p> * * {@codesnippet com.azure.storage.blob.specialized.cryptography.EncryptedBlobAsyncClient.uploadWithResponse * * {@code Flux} be replayable. In other words, it does not have to support multiple subscribers and is not expected * to produce the same values across subscriptions. * @param options {@link BlobParallelUploadOptions} * @return A reactive response containing the information of the uploaded block blob. */ @Override public Mono<Response<BlockBlobItem>> uploadWithResponse(BlobParallelUploadOptions options) { try { StorageImplUtils.assertNotNull("options", options); final Map<String, String> metadataFinal = options.getMetadata() == null ? new HashMap<>() : options.getMetadata(); Flux<ByteBuffer> data = options.getDataFlux() == null ? Utility.convertStreamToByteBuffer( options.getDataStream(), options.getLength(), BLOB_DEFAULT_UPLOAD_BLOCK_SIZE, false) : options.getDataFlux(); Mono<Flux<ByteBuffer>> dataFinal = prepareToSendEncryptedRequest(data, metadataFinal); return dataFinal.flatMap(df -> super.uploadWithResponse( } /** * Creates a new block blob with the content of the specified file. By default this method will not overwrite * existing data * * <p><strong>Code Samples</strong></p> * * {@codesnippet com.azure.storage.blob.specialized.cryptography.EncryptedBlobAsyncClient.uploadFromFile * * @param filePath Path to the upload file * @return An empty response */ @Override public Mono<Void> uploadFromFile(String filePath) { try { return uploadFromFile(filePath, false); } catch (RuntimeException ex) { return monoError(logger, ex); } } /** * Creates a new block blob, or updates the content of an existing block blob, with the content of the specified * file. * * <p><strong>Code Samples</strong></p> * * {@codesnippet com.azure.storage.blob.specialized.cryptography.EncryptedBlobAsyncClient.uploadFromFile * * @param filePath Path to the upload file * @param overwrite Whether or not to overwrite should data exist on the blob. * @return An empty response */ @Override public Mono<Void> uploadFromFile(String filePath, boolean overwrite) { try { Mono<Void> uploadTask = uploadFromFile(filePath, null, null, null, null, null); if (overwrite) { return uploadTask; } else { return exists() .flatMap(exists -> exists ? monoError(logger, new IllegalArgumentException(Constants.BLOB_ALREADY_EXISTS)) : uploadTask); } } catch (RuntimeException ex) { return monoError(logger, ex); } } /** * Creates a new block blob, or updates the content of an existing block blob, with the content of the specified * file. * * <p><strong>Code Samples</strong></p> * * {@codesnippet com.azure.storage.blob.specialized.cryptography.EncryptedBlobAsyncClient.uploadFromFile * * @param filePath Path to the upload file * @param parallelTransferOptions {@link ParallelTransferOptions} to use to upload from file. * @param headers {@link BlobHttpHeaders} * @param metadata Metadata to associate with the blob. * @param tier {@link AccessTier} for the destination blob. * @param requestConditions {@link BlobRequestConditions} * @return An empty response * @throws IllegalArgumentException If {@code blockSize} is less than 0 or greater than 4000MB * @throws UncheckedIOException If an I/O error occurs */ @Override public Mono<Void> uploadFromFile(String filePath, ParallelTransferOptions parallelTransferOptions, BlobHttpHeaders headers, Map<String, String> metadata, AccessTier tier, BlobRequestConditions requestConditions) { return this.uploadFromFileWithResponse(new BlobUploadFromFileOptions(filePath) .setParallelTransferOptions(parallelTransferOptions).setHeaders(headers).setMetadata(metadata) .setTier(tier).setRequestConditions(requestConditions)) .then(); } /** * Creates a new block blob, or updates the content of an existing block blob, with the content of the specified * file. * * <p><strong>Code Samples</strong></p> * * {@codesnippet com.azure.storage.blob.specialized.cryptography.EncryptedBlobAsyncClient.uploadFromFileWithResponse * * @param options {@link BlobUploadFromFileOptions} * @return A reactive response containing the information of the uploaded block blob. * @throws IllegalArgumentException If {@code blockSize} is less than 0 or greater than 100MB * @throws UncheckedIOException If an I/O error occurs */ @Override public Mono<Response<BlockBlobItem>> uploadFromFileWithResponse(BlobUploadFromFileOptions options) { try { StorageImplUtils.assertNotNull("options", options); return Mono.using(() -> UploadUtils.uploadFileResourceSupplier(options.getFilePath(), logger), channel -> this.uploadWithResponse(new BlobParallelUploadOptions(FluxUtil.readFile(channel)) .setParallelTransferOptions(options.getParallelTransferOptions()).setHeaders(options.getHeaders()) .setMetadata(options.getMetadata()).setTags(options.getTags()).setTier(options.getTier()) .setRequestConditions(options.getRequestConditions())) .doOnTerminate(() -> { try { channel.close(); } catch (IOException e) { throw logger.logExceptionAsError(new UncheckedIOException(e)); } }), channel -> UploadUtils.uploadFileCleanup(channel, logger)); } catch (RuntimeException ex) { return monoError(logger, ex); } } /** * Encrypts the given Flux ByteBuffer. * * @param plainTextFlux The Flux ByteBuffer to be encrypted. * * @return A {@link EncryptedBlob} * * @throws InvalidKeyException If the key provided is invalid */ Mono<EncryptedBlob> encryptBlob(Flux<ByteBuffer> plainTextFlux) throws InvalidKeyException { Objects.requireNonNull(this.keyWrapper, "keyWrapper cannot be null"); try { KeyGenerator keyGen = KeyGenerator.getInstance(CryptographyConstants.AES); keyGen.init(256); Cipher cipher = Cipher.getInstance(CryptographyConstants.AES_CBC_PKCS5PADDING); SecretKey aesKey = keyGen.generateKey(); cipher.init(Cipher.ENCRYPT_MODE, aesKey); Map<String, String> keyWrappingMetadata = new HashMap<>(); keyWrappingMetadata.put(CryptographyConstants.AGENT_METADATA_KEY, CryptographyConstants.AGENT_METADATA_VALUE); return this.keyWrapper.getKeyId().flatMap(keyId -> this.keyWrapper.wrapKey(keyWrapAlgorithm, aesKey.getEncoded()) .map(encryptedKey -> { WrappedKey wrappedKey = new WrappedKey(keyId, encryptedKey, keyWrapAlgorithm); EncryptionData encryptionData = new EncryptionData() .setEncryptionMode(CryptographyConstants.ENCRYPTION_MODE) .setEncryptionAgent( new EncryptionAgent(CryptographyConstants.ENCRYPTION_PROTOCOL_V1, EncryptionAlgorithm.AES_CBC_256)) .setKeyWrappingMetadata(keyWrappingMetadata) .setContentEncryptionIV(cipher.getIV()) .setWrappedContentKey(wrappedKey); Flux<ByteBuffer> encryptedTextFlux = plainTextFlux.map(plainTextBuffer -> { int outputSize = cipher.getOutputSize(plainTextBuffer.remaining()); /* This should be the only place we allocate memory in encryptBlob(). Although there is an overload that can encrypt in place that would save allocations, we do not want to overwrite customer's memory, so we must allocate our own memory. If memory usage becomes unreasonable, we should implement pooling. */ ByteBuffer encryptedTextBuffer = ByteBuffer.allocate(outputSize); int encryptedBytes; try { encryptedBytes = cipher.update(plainTextBuffer, encryptedTextBuffer); } catch (ShortBufferException e) { throw logger.logExceptionAsError(Exceptions.propagate(e)); } encryptedTextBuffer.position(0); encryptedTextBuffer.limit(encryptedBytes); return encryptedTextBuffer; }); /* Defer() ensures the contained code is not executed until the Flux is subscribed to, in other words, cipher.doFinal() will not be called until the plainTextFlux has completed and therefore all other data has been encrypted. */ encryptedTextFlux = Flux.concat(encryptedTextFlux, Flux.defer(() -> { try { return Flux.just(ByteBuffer.wrap(cipher.doFinal())); } catch (GeneralSecurityException e) { throw logger.logExceptionAsError(Exceptions.propagate(e)); } })); return new EncryptedBlob(encryptionData, encryptedTextFlux); })); } catch (NoSuchAlgorithmException | NoSuchPaddingException e) { throw logger.logExceptionAsError(new RuntimeException(e)); } } /** * Encrypt the blob and add the encryption metadata to the customer's metadata. * * @param plainText The data to encrypt * @param metadata The customer's metadata to be updated. * * @return A Mono containing the cipher text */ private Mono<Flux<ByteBuffer>> prepareToSendEncryptedRequest(Flux<ByteBuffer> plainText, Map<String, String> metadata) { try { return this.encryptBlob(plainText) .flatMap(encryptedBlob -> { try { metadata.put(CryptographyConstants.ENCRYPTION_DATA_KEY, encryptedBlob.getEncryptionData().toJsonString()); return Mono.just(encryptedBlob.getCiphertextFlux()); } catch (JsonProcessingException e) { throw logger.logExceptionAsError(Exceptions.propagate(e)); } }); } catch (InvalidKeyException e) { throw logger.logExceptionAsError(Exceptions.propagate(e)); } } /** * Unsupported. Cannot query data encrypted on client side. */ @Override public Flux<ByteBuffer> query(String expression) { throw logger.logExceptionAsError(new UnsupportedOperationException( "Cannot query data encrypted on client side")); } /** * Unsupported. Cannot query data encrypted on client side. */ @Override public Mono<BlobQueryAsyncResponse> queryWithResponse(BlobQueryOptions queryOptions) { throw logger.logExceptionAsError(new UnsupportedOperationException( "Cannot query data encrypted on client side")); } }
Should be false
public Mono<Response<PathInfo>> uploadWithResponse(FileParallelUploadOptions options) { try { StorageImplUtils.assertNotNull("options", options); DataLakeRequestConditions validatedRequestConditions = options.getRequestConditions() == null ? new DataLakeRequestConditions() : options.getRequestConditions(); /* Since we are creating a file with the request conditions, everything but lease id becomes invalid after creation, so remove them for the append/flush calls. */ DataLakeRequestConditions validatedUploadRequestConditions = new DataLakeRequestConditions() .setLeaseId(validatedRequestConditions.getLeaseId()); final ParallelTransferOptions validatedParallelTransferOptions = ModelHelper.populateAndApplyDefaults(options.getParallelTransferOptions()); long fileOffset = 0; Function<Flux<ByteBuffer>, Mono<Response<PathInfo>>> uploadInChunksFunction = (stream) -> uploadInChunks(stream, fileOffset, validatedParallelTransferOptions, options.getHeaders(), validatedUploadRequestConditions); BiFunction<Flux<ByteBuffer>, Long, Mono<Response<PathInfo>>> uploadFullMethod = (stream, length) -> uploadWithResponse(ProgressReporter .addProgressReporting(stream, validatedParallelTransferOptions.getProgressReceiver()), fileOffset, length, options.getHeaders(), validatedUploadRequestConditions); Flux<ByteBuffer> data = options.getDataFlux() == null ? Utility.convertStreamToByteBuffer( options.getDataStream(), options.getLength(), (int) Math.min(Integer.MAX_VALUE, validatedParallelTransferOptions.getBlockSizeLong()), true) : options.getDataFlux(); return createWithResponse(options.getPermissions(), options.getUmask(), options.getHeaders(), options.getMetadata(), validatedRequestConditions) .then(UploadUtils.uploadFullOrChunked(data, validatedParallelTransferOptions, uploadInChunksFunction, uploadFullMethod)); } catch (RuntimeException ex) { return monoError(logger, ex); } }
(int) Math.min(Integer.MAX_VALUE, validatedParallelTransferOptions.getBlockSizeLong()), true)
new DataLakeRequestConditions() .setIfNoneMatch(Constants.HeaderConstants.ETAG_WILDCARD); } return overwriteCheck .then(uploadWithResponse(data, parallelTransferOptions, null, null, requestConditions)) .flatMap(FluxUtil::toMono); } /** * Creates a new file. * <p> * To avoid overwriting, pass "*" to {@link DataLakeRequestConditions
class DataLakeFileAsyncClient extends DataLakePathAsyncClient { /** * Indicates the maximum number of bytes that can be sent in a call to upload. */ static final long MAX_APPEND_FILE_BYTES = 4000L * Constants.MB; private final ClientLogger logger = new ClientLogger(DataLakeFileAsyncClient.class); /** * Package-private constructor for use by {@link DataLakePathClientBuilder}. * * @param pipeline The pipeline used to send and receive service requests. * @param url The endpoint where to send service requests. * @param serviceVersion The version of the service to receive requests. * @param accountName The storage account name. * @param fileSystemName The file system name. * @param fileName The file name. * @param blockBlobAsyncClient The underlying {@link BlobContainerAsyncClient} */ DataLakeFileAsyncClient(HttpPipeline pipeline, String url, DataLakeServiceVersion serviceVersion, String accountName, String fileSystemName, String fileName, BlockBlobAsyncClient blockBlobAsyncClient) { super(pipeline, url, serviceVersion, accountName, fileSystemName, fileName, PathResourceType.FILE, blockBlobAsyncClient); } DataLakeFileAsyncClient(DataLakePathAsyncClient pathAsyncClient) { super(pathAsyncClient.getHttpPipeline(), pathAsyncClient.getPathUrl(), pathAsyncClient.getServiceVersion(), pathAsyncClient.getAccountName(), pathAsyncClient.getFileSystemName(), pathAsyncClient.pathName, PathResourceType.FILE, pathAsyncClient.getBlockBlobAsyncClient()); } /** * Gets the URL of the file represented by this client on the Data Lake service. * * @return the URL. */ public String getFileUrl() { return getPathUrl(); } /** * Gets the path of this file, not including the name of the resource itself. * * @return The path of the file. */ public String getFilePath() { return getObjectPath(); } /** * Gets the name of this file, not including its full path. * * @return The name of the file. */ public String getFileName() { return getObjectName(); } /** * Deletes a file. * * <p><strong>Code Samples</strong></p> * * {@codesnippet com.azure.storage.file.datalake.DataLakeFileAsyncClient.delete} * * <p>For more information see the * <a href="https: * Docs</a></p> * * @return A reactive response signalling completion. */ public Mono<Void> delete() { try { return deleteWithResponse(null).flatMap(FluxUtil::toMono); } catch (RuntimeException ex) { return monoError(logger, ex); } } /** * Deletes a file. * * <p><strong>Code Samples</strong></p> * * {@codesnippet com.azure.storage.file.datalake.DataLakeFileAsyncClient.deleteWithResponse * * <p>For more information see the * <a href="https: * Docs</a></p> * * @param requestConditions {@link DataLakeRequestConditions} * * @return A reactive response signalling completion. */ public Mono<Response<Void>> deleteWithResponse(DataLakeRequestConditions requestConditions) { try { return withContext(context -> deleteWithResponse(null /* recursive */, requestConditions, context)); } catch (RuntimeException ex) { return monoError(logger, ex); } } /** * Creates a new file and uploads content. * * <p><strong>Code Samples</strong></p> * * {@codesnippet com.azure.storage.file.datalake.DataLakeFileAsyncClient.upload * * @param data The data to write to the file. Unlike other upload methods, this method does not require that the * {@code Flux} be replayable. In other words, it does not have to support multiple subscribers and is not expected * to produce the same values across subscriptions. * @param parallelTransferOptions {@link ParallelTransferOptions} used to configure buffered uploading. * @return A reactive response containing the information of the uploaded file. */ public Mono<PathInfo> upload(Flux<ByteBuffer> data, ParallelTransferOptions parallelTransferOptions) { return upload(data, parallelTransferOptions, false); } /** * Creates a new file and uploads content. * * <p><strong>Code Samples</strong></p> * * {@codesnippet com.azure.storage.file.datalake.DataLakeFileAsyncClient.upload * * @param data The data to write to the file. Unlike other upload methods, this method does not require that the * {@code Flux} be replayable. In other words, it does not have to support multiple subscribers and is not expected * to produce the same values across subscriptions. * @param parallelTransferOptions {@link ParallelTransferOptions} used to configure buffered uploading. * @param overwrite Whether or not to overwrite, should the file already exist. * @return A reactive response containing the information of the uploaded file. */ public Mono<PathInfo> upload(Flux<ByteBuffer> data, ParallelTransferOptions parallelTransferOptions, boolean overwrite) { Mono<Void> overwriteCheck; DataLakeRequestConditions requestConditions; if (overwrite) { overwriteCheck = Mono.empty(); requestConditions = null; } else { overwriteCheck = exists().flatMap(exists -> exists ? monoError(logger, new IllegalArgumentException(Constants.BLOB_ALREADY_EXISTS)) : Mono.empty()); requestConditions = . * * <p><strong>Code Samples</strong></p> * * {@codesnippet com.azure.storage.file.datalake.DataLakeFileAsyncClient.uploadWithResponse * * <p><strong>Using Progress Reporting</strong></p> * * {@codesnippet com.azure.storage.file.datalake.DataLakeFileAsyncClient.uploadWithResponse * * @param data The data to write to the file. Unlike other upload methods, this method does not require that the * {@code Flux} be replayable. In other words, it does not have to support multiple subscribers and is not expected * to produce the same values across subscriptions. * @param parallelTransferOptions {@link ParallelTransferOptions} used to configure buffered uploading. * @param headers {@link PathHttpHeaders} * @param metadata Metadata to associate with the resource. * @param requestConditions {@link DataLakeRequestConditions} * @return A reactive response containing the information of the uploaded file. */ public Mono<Response<PathInfo>> uploadWithResponse(Flux<ByteBuffer> data, ParallelTransferOptions parallelTransferOptions, PathHttpHeaders headers, Map<String, String> metadata, DataLakeRequestConditions requestConditions) { try { return uploadWithResponse(new FileParallelUploadOptions(data) .setParallelTransferOptions(parallelTransferOptions).setHeaders(headers).setMetadata(metadata) .setRequestConditions(requestConditions)); } catch (RuntimeException ex) { return monoError(logger, ex); } } /** * Creates a new file. * <p> * To avoid overwriting, pass "*" to {@link DataLakeRequestConditions * * <p><strong>Code Samples</strong></p> * * {@codesnippet com.azure.storage.file.datalake.DataLakeFileAsyncClient.uploadWithResponse * * <p><strong>Using Progress Reporting</strong></p> * * {@codesnippet com.azure.storage.file.datalake.DataLakeFileAsyncClient.uploadWithResponse * * @param options {@link FileParallelUploadOptions} * @return A reactive response containing the information of the uploaded file. */ public Mono<Response<PathInfo>> uploadWithResponse(FileParallelUploadOptions options) { try { StorageImplUtils.assertNotNull("options", options); DataLakeRequestConditions validatedRequestConditions = options.getRequestConditions() == null ? new DataLakeRequestConditions() : options.getRequestConditions(); /* Since we are creating a file with the request conditions, everything but lease id becomes invalid after creation, so remove them for the append/flush calls. */ DataLakeRequestConditions validatedUploadRequestConditions = new DataLakeRequestConditions() .setLeaseId(validatedRequestConditions.getLeaseId()); final ParallelTransferOptions validatedParallelTransferOptions = ModelHelper.populateAndApplyDefaults(options.getParallelTransferOptions()); long fileOffset = 0; Function<Flux<ByteBuffer>, Mono<Response<PathInfo>>> uploadInChunksFunction = (stream) -> uploadInChunks(stream, fileOffset, validatedParallelTransferOptions, options.getHeaders(), validatedUploadRequestConditions); BiFunction<Flux<ByteBuffer>, Long, Mono<Response<PathInfo>>> uploadFullMethod = (stream, length) -> uploadWithResponse(ProgressReporter .addProgressReporting(stream, validatedParallelTransferOptions.getProgressReceiver()), fileOffset, length, options.getHeaders(), validatedUploadRequestConditions); Flux<ByteBuffer> data = options.getDataFlux() == null ? Utility.convertStreamToByteBuffer( options.getDataStream(), options.getLength(), (int) Math.min(Integer.MAX_VALUE, validatedParallelTransferOptions.getBlockSizeLong()), true) : options.getDataFlux(); return createWithResponse(options.getPermissions(), options.getUmask(), options.getHeaders(), options.getMetadata(), validatedRequestConditions) .then(UploadUtils.uploadFullOrChunked(data, validatedParallelTransferOptions, uploadInChunksFunction, uploadFullMethod)); } catch (RuntimeException ex) { return monoError(logger, ex); } } private Mono<Response<PathInfo>> uploadInChunks(Flux<ByteBuffer> data, long fileOffset, ParallelTransferOptions parallelTransferOptions, PathHttpHeaders httpHeaders, DataLakeRequestConditions requestConditions) { AtomicLong totalProgress = new AtomicLong(); Lock progressLock = new ReentrantLock(); /* We use maxConcurrency + 1 for the number of buffers because one buffer will typically be being filled while the others are being sent. */ UploadBufferPool pool = new UploadBufferPool(parallelTransferOptions.getMaxConcurrency() + 1, parallelTransferOptions.getBlockSizeLong(), MAX_APPEND_FILE_BYTES); Flux<ByteBuffer> chunkedSource = UploadUtils.chunkSource(data, parallelTransferOptions); /* Write to the pool and upload the output. */ return chunkedSource.concatMap(pool::write) .concatWith(Flux.defer(pool::flush)) /* Map the data to a tuple 3, of buffer, buffer length, buffer offset */ .map(bufferAggregator -> Tuples.of(bufferAggregator, bufferAggregator.length(), 0L)) /* Scan reduces a flux with an accumulator while emitting the intermediate results. */ /* As an example, data consists of ByteBuffers of length 10-10-5. In the map above we transform the initial ByteBuffer to a tuple3 of buff, 10, 0. Scan will emit that as is, then accumulate the tuple for the next emission. On the second iteration, the middle ByteBuffer gets transformed to buff, 10, 10+0 (from previous emission). Scan emits that, and on the last iteration, the last ByteBuffer gets transformed to buff, 5, 10+10 (from previous emission). */ .scan((result, source) -> { BufferAggregator bufferAggregator = source.getT1(); long currentBufferLength = bufferAggregator.length(); long lastBytesWritten = result.getT2(); long lastOffset = result.getT3(); return Tuples.of(bufferAggregator, currentBufferLength, lastBytesWritten + lastOffset); }) .flatMapSequential(tuple3 -> { BufferAggregator bufferAggregator = tuple3.getT1(); long currentBufferLength = bufferAggregator.length(); long currentOffset = tuple3.getT3() + fileOffset; Flux<ByteBuffer> progressData = ProgressReporter.addParallelProgressReporting( bufferAggregator.asFlux(), parallelTransferOptions.getProgressReceiver(), progressLock, totalProgress); return appendWithResponse(progressData, currentOffset, currentBufferLength, null, requestConditions.getLeaseId()) .doFinally(x -> pool.returnBuffer(bufferAggregator)) .map(resp -> currentBufferLength + currentOffset) /* End of file after append to pass to flush. */ .flux(); }, parallelTransferOptions.getMaxConcurrency()) .last() .flatMap(length -> flushWithResponse(length, false, false, httpHeaders, requestConditions)); } private Mono<Response<PathInfo>> uploadWithResponse(Flux<ByteBuffer> data, long fileOffset, long length, PathHttpHeaders httpHeaders, DataLakeRequestConditions requestConditions) { return appendWithResponse(data, fileOffset, length, null, requestConditions.getLeaseId()) .flatMap(resp -> flushWithResponse(fileOffset + length, false, false, httpHeaders, requestConditions)); } /** * Creates a new file, with the content of the specified file. By default this method will not overwrite an * existing file. * * <p><strong>Code Samples</strong></p> * * {@codesnippet com.azure.storage.file.datalake.DataLakeFileAsyncClient.uploadFromFile * * @param filePath Path to the upload file * @return An empty response * @throws UncheckedIOException If an I/O error occurs */ public Mono<Void> uploadFromFile(String filePath) { try { return uploadFromFile(filePath, false); } catch (RuntimeException ex) { return monoError(logger, ex); } } /** * Creates a new file, with the content of the specified file. * * <p><strong>Code Samples</strong></p> * * {@codesnippet com.azure.storage.file.datalake.DataLakeFileAsyncClient.uploadFromFile * * @param filePath Path to the upload file * @param overwrite Whether or not to overwrite, should the file already exist. * @return An empty response * @throws UncheckedIOException If an I/O error occurs */ public Mono<Void> uploadFromFile(String filePath, boolean overwrite) { try { Mono<Void> overwriteCheck = Mono.empty(); DataLakeRequestConditions requestConditions = null; if (!overwrite) { if (UploadUtils.shouldUploadInChunks(filePath, DataLakeFileAsyncClient.MAX_APPEND_FILE_BYTES, logger)) { overwriteCheck = exists().flatMap(exists -> exists ? monoError(logger, new IllegalArgumentException(Constants.FILE_ALREADY_EXISTS)) : Mono.empty()); } requestConditions = new DataLakeRequestConditions() .setIfNoneMatch(Constants.HeaderConstants.ETAG_WILDCARD); } return overwriteCheck.then(uploadFromFile(filePath, null, null, null, requestConditions)); } catch (RuntimeException ex) { return monoError(logger, ex); } } /** * Creates a new file, with the content of the specified file. * <p> * To avoid overwriting, pass "*" to {@link DataLakeRequestConditions * * <p><strong>Code Samples</strong></p> * * {@codesnippet com.azure.storage.file.datalake.DataLakeFileAsyncClient.uploadFromFile * * @param filePath Path to the upload file * @param parallelTransferOptions {@link ParallelTransferOptions} to use to upload from file. Number of parallel * transfers parameter is ignored. * @param headers {@link PathHttpHeaders} * @param metadata Metadata to associate with the resource. * @param requestConditions {@link DataLakeRequestConditions} * @return An empty response * @throws UncheckedIOException If an I/O error occurs */ public Mono<Void> uploadFromFile(String filePath, ParallelTransferOptions parallelTransferOptions, PathHttpHeaders headers, Map<String, String> metadata, DataLakeRequestConditions requestConditions) { Long originalBlockSize = (parallelTransferOptions == null) ? null : parallelTransferOptions.getBlockSizeLong(); DataLakeRequestConditions validatedRequestConditions = requestConditions == null ? new DataLakeRequestConditions() : requestConditions; /* Since we are creating a file with the request conditions, everything but lease id becomes invalid after creation, so e remove them for the append/flush calls. */ DataLakeRequestConditions validatedUploadRequestConditions = new DataLakeRequestConditions() .setLeaseId(validatedRequestConditions.getLeaseId()); final ParallelTransferOptions finalParallelTransferOptions = ModelHelper.populateAndApplyDefaults(parallelTransferOptions); long fileOffset = 0; try { return Mono.using(() -> UploadUtils.uploadFileResourceSupplier(filePath, logger), channel -> { try { long fileSize = channel.size(); if (fileSize == 0) { throw logger.logExceptionAsError(new IllegalArgumentException("Size of the file must be " + "greater than 0.")); } if (UploadUtils.shouldUploadInChunks(filePath, finalParallelTransferOptions.getMaxSingleUploadSizeLong(), logger)) { return createWithResponse(null, null, headers, metadata, validatedRequestConditions) .then(uploadFileChunks(fileOffset, fileSize, finalParallelTransferOptions, originalBlockSize, headers, validatedUploadRequestConditions, channel)); } else { return createWithResponse(null, null, headers, metadata, validatedRequestConditions) .then(uploadWithResponse(FluxUtil.readFile(channel), fileOffset, fileSize, headers, validatedUploadRequestConditions)) .then(); } } catch (IOException ex) { return Mono.error(ex); } }, channel -> UploadUtils.uploadFileCleanup(channel, logger)); } catch (RuntimeException ex) { return monoError(logger, ex); } } private Mono<Void> uploadFileChunks(long fileOffset, long fileSize, ParallelTransferOptions parallelTransferOptions, Long originalBlockSize, PathHttpHeaders headers, DataLakeRequestConditions requestConditions, AsynchronousFileChannel channel) { AtomicLong totalProgress = new AtomicLong(); Lock progressLock = new ReentrantLock(); return Flux.fromIterable(sliceFile(fileSize, originalBlockSize, parallelTransferOptions.getBlockSizeLong())) .flatMap(chunk -> { Flux<ByteBuffer> progressData = ProgressReporter.addParallelProgressReporting( FluxUtil.readFile(channel, chunk.getOffset(), chunk.getCount()), parallelTransferOptions.getProgressReceiver(), progressLock, totalProgress); return appendWithResponse(progressData, fileOffset + chunk.getOffset(), chunk.getCount(), null, requestConditions.getLeaseId()); }, parallelTransferOptions.getMaxConcurrency()) .then(Mono.defer(() -> flushWithResponse(fileSize, false, false, headers, requestConditions))) .then(); } private List<FileRange> sliceFile(long fileSize, Long originalBlockSize, long blockSize) { List<FileRange> ranges = new ArrayList<>(); if (fileSize > 100 * Constants.MB && originalBlockSize == null) { blockSize = BlobAsyncClient.BLOB_DEFAULT_HTBB_UPLOAD_BLOCK_SIZE; } for (long pos = 0; pos < fileSize; pos += blockSize) { long count = blockSize; if (pos + count > fileSize) { count = fileSize - pos; } ranges.add(new FileRange(pos, count)); } return ranges; } /** * Appends data to the specified resource to later be flushed (written) by a call to flush * * <p><strong>Code Samples</strong></p> * * {@codesnippet com.azure.storage.file.datalake.DataLakeFileAsyncClient.append * * <p>For more information, see the * <a href="https: * Docs</a></p> * * @param data The data to write to the file. * @param fileOffset The position where the data is to be appended. * @param length The exact length of the data. It is important that this value match precisely the length of the * data emitted by the {@code Flux}. * * @return A reactive response signalling completion. */ public Mono<Void> append(Flux<ByteBuffer> data, long fileOffset, long length) { try { return appendWithResponse(data, fileOffset, length, null, null).flatMap(FluxUtil::toMono); } catch (RuntimeException ex) { return monoError(logger, ex); } } /** * Appends data to the specified resource to later be flushed (written) by a call to flush * * <p><strong>Code Samples</strong></p> * * {@codesnippet com.azure.storage.file.datalake.DataLakeFileAsyncClient.appendWithResponse * * <p>For more information, see the * <a href="https: * Docs</a></p> * * @param data The data to write to the file. * @param fileOffset The position where the data is to be appended. * @param length The exact length of the data. It is important that this value match precisely the length of the * data emitted by the {@code Flux}. * @param contentMd5 An MD5 hash of the content of the data. If specified, the service will calculate the MD5 of the * received data and fail the request if it does not match the provided MD5. * @param leaseId By setting lease id, requests will fail if the provided lease does not match the active lease on * the file. * * @return A reactive response signalling completion. */ public Mono<Response<Void>> appendWithResponse(Flux<ByteBuffer> data, long fileOffset, long length, byte[] contentMd5, String leaseId) { try { return withContext(context -> appendWithResponse(data, fileOffset, length, contentMd5, leaseId, context)); } catch (RuntimeException ex) { return monoError(logger, ex); } } Mono<Response<Void>> appendWithResponse(Flux<ByteBuffer> data, long fileOffset, long length, byte[] contentMd5, String leaseId, Context context) { LeaseAccessConditions leaseAccessConditions = new LeaseAccessConditions().setLeaseId(leaseId); PathHttpHeaders headers = new PathHttpHeaders().setTransactionalContentHash(contentMd5); return this.dataLakeStorage.paths().appendDataWithRestResponseAsync(data, fileOffset, null, length, null, null, headers, leaseAccessConditions, context).map(response -> new SimpleResponse<>(response, null)); } /** * Flushes (writes) data previously appended to the file through a call to append. * The previously uploaded data must be contiguous. * <p>By default this method will not overwrite existing data.</p> * * <p><strong>Code Samples</strong></p> * * {@codesnippet com.azure.storage.file.datalake.DataLakeFileAsyncClient.flush * * <p>For more information, see the * <a href="https: * Docs</a></p> * * @param position The length of the file after all data has been written. * * @return A reactive response containing the information of the created resource. */ public Mono<PathInfo> flush(long position) { try { return flush(position, false); } catch (RuntimeException ex) { return monoError(logger, ex); } } /** * Flushes (writes) data previously appended to the file through a call to append. * The previously uploaded data must be contiguous. * * <p><strong>Code Samples</strong></p> * * {@codesnippet com.azure.storage.file.datalake.DataLakeFileAsyncClient.flush * * <p>For more information, see the * <a href="https: * Docs</a></p> * * @param position The length of the file after all data has been written. * @param overwrite Whether or not to overwrite, should data exist on the file. * * @return A reactive response containing the information of the created resource. */ public Mono<PathInfo> flush(long position, boolean overwrite) { try { DataLakeRequestConditions requestConditions = null; if (!overwrite) { requestConditions = new DataLakeRequestConditions() .setIfNoneMatch(Constants.HeaderConstants.ETAG_WILDCARD); } return flushWithResponse(position, false, false, null, requestConditions).flatMap(FluxUtil::toMono); } catch (RuntimeException ex) { return monoError(logger, ex); } } /** * Flushes (writes) data previously appended to the file through a call to append. * The previously uploaded data must be contiguous. * * <p><strong>Code Samples</strong></p> * * {@codesnippet com.azure.storage.file.datalake.DataLakeFileAsyncClient.flushWithResponse * * <p>For more information, see the * <a href="https: * Docs</a></p> * * @param position The length of the file after all data has been written. * @param retainUncommittedData Whether or not uncommitted data is to be retained after the operation. * @param close Whether or not a file changed event raised indicates completion (true) or modification (false). * @param httpHeaders {@link PathHttpHeaders httpHeaders} * @param requestConditions {@link DataLakeRequestConditions requestConditions} * * @return A reactive response containing the information of the created resource. */ public Mono<Response<PathInfo>> flushWithResponse(long position, boolean retainUncommittedData, boolean close, PathHttpHeaders httpHeaders, DataLakeRequestConditions requestConditions) { try { return withContext(context -> flushWithResponse(position, retainUncommittedData, close, httpHeaders, requestConditions, context)); } catch (RuntimeException ex) { return monoError(logger, ex); } } Mono<Response<PathInfo>> flushWithResponse(long position, boolean retainUncommittedData, boolean close, PathHttpHeaders httpHeaders, DataLakeRequestConditions requestConditions, Context context) { httpHeaders = httpHeaders == null ? new PathHttpHeaders() : httpHeaders; requestConditions = requestConditions == null ? new DataLakeRequestConditions() : requestConditions; LeaseAccessConditions lac = new LeaseAccessConditions().setLeaseId(requestConditions.getLeaseId()); ModifiedAccessConditions mac = new ModifiedAccessConditions() .setIfMatch(requestConditions.getIfMatch()) .setIfNoneMatch(requestConditions.getIfNoneMatch()) .setIfModifiedSince(requestConditions.getIfModifiedSince()) .setIfUnmodifiedSince(requestConditions.getIfUnmodifiedSince()); context = context == null ? Context.NONE : context; return this.dataLakeStorage.paths().flushDataWithRestResponseAsync(null, position, retainUncommittedData, close, (long) 0, null, httpHeaders, lac, mac, context.addData(AZ_TRACING_NAMESPACE_KEY, STORAGE_TRACING_NAMESPACE_VALUE)) .map(response -> new SimpleResponse<>(response, new PathInfo(response.getDeserializedHeaders().getETag(), response.getDeserializedHeaders().getLastModified()))); } /** * Reads the entire file. * * <p><strong>Code Samples</strong></p> * * {@codesnippet com.azure.storage.file.datalake.DataLakeFileAsyncClient.read} * * <p>For more information, see the * <a href="https: * * @return A reactive response containing the file data. */ public Flux<ByteBuffer> read() { try { return readWithResponse(null, null, null, false) .flatMapMany(FileReadAsyncResponse::getValue); } catch (RuntimeException ex) { return fluxError(logger, ex); } } /** * Reads a range of bytes from a file. * * <p><strong>Code Samples</strong></p> * * {@codesnippet com.azure.storage.file.datalake.DataLakeFileAsyncClient.readWithResponse * * <p>For more information, see the * <a href="https: * * @param range {@link FileRange} * @param options {@link DownloadRetryOptions} * @param requestConditions {@link DataLakeRequestConditions} * @param getRangeContentMd5 Whether the contentMD5 for the specified file range should be returned. * @return A reactive response containing the file data. */ public Mono<FileReadAsyncResponse> readWithResponse(FileRange range, DownloadRetryOptions options, DataLakeRequestConditions requestConditions, boolean getRangeContentMd5) { try { return blockBlobAsyncClient.downloadWithResponse(Transforms.toBlobRange(range), Transforms.toBlobDownloadRetryOptions(options), Transforms.toBlobRequestConditions(requestConditions), getRangeContentMd5).map(Transforms::toFileReadAsyncResponse) .onErrorMap(DataLakeImplUtils::transformBlobStorageException); } catch (RuntimeException ex) { return monoError(logger, ex); } } /** * Reads the entire file into a file specified by the path. * * <p>The file will be created and must not exist, if the file already exists a {@link FileAlreadyExistsException} * will be thrown.</p> * * <p><strong>Code Samples</strong></p> * * {@codesnippet com.azure.storage.file.datalake.DataLakeFileAsyncClient.readToFile * * <p>For more information, see the * <a href="https: * * @param filePath A {@link String} representing the filePath where the downloaded data will be written. * @return A reactive response containing the file properties and metadata. */ public Mono<PathProperties> readToFile(String filePath) { return readToFile(filePath, false); } /** * Reads the entire file into a file specified by the path. * * <p>If overwrite is set to false, the file will be created and must not exist, if the file already exists a * {@link FileAlreadyExistsException} will be thrown.</p> * * <p><strong>Code Samples</strong></p> * * {@codesnippet com.azure.storage.file.datalake.DataLakeFileAsyncClient.readToFile * * <p>For more information, see the * <a href="https: * * @param filePath A {@link String} representing the filePath where the downloaded data will be written. * @param overwrite Whether or not to overwrite the file, should the file exist. * @return A reactive response containing the file properties and metadata. */ public Mono<PathProperties> readToFile(String filePath, boolean overwrite) { Set<OpenOption> openOptions = null; if (overwrite) { openOptions = new HashSet<>(); openOptions.add(StandardOpenOption.CREATE); openOptions.add(StandardOpenOption.TRUNCATE_EXISTING); openOptions.add(StandardOpenOption.READ); openOptions.add(StandardOpenOption.WRITE); } return readToFileWithResponse(filePath, null, null, null, null, false, openOptions) .flatMap(FluxUtil::toMono); } /** * Reads the entire file into a file specified by the path. * * <p>By default the file will be created and must not exist, if the file already exists a * {@link FileAlreadyExistsException} will be thrown. To override this behavior, provide appropriate * {@link OpenOption OpenOptions} </p> * * <p><strong>Code Samples</strong></p> * * {@codesnippet com.azure.storage.file.datalake.DataLakeFileAsyncClient.readToFileWithResponse * * <p>For more information, see the * <a href="https: * * @param filePath A {@link String} representing the filePath where the downloaded data will be written. * @param range {@link FileRange} * @param parallelTransferOptions {@link ParallelTransferOptions} to use to download to file. Number of parallel * transfers parameter is ignored. * @param options {@link DownloadRetryOptions} * @param requestConditions {@link DataLakeRequestConditions} * @param rangeGetContentMd5 Whether the contentMD5 for the specified file range should be returned. * @param openOptions {@link OpenOption OpenOptions} to use to configure how to open or create the file. * @return A reactive response containing the file properties and metadata. * @throws IllegalArgumentException If {@code blockSize} is less than 0 or greater than 100MB. * @throws UncheckedIOException If an I/O error occurs. */ public Mono<Response<PathProperties>> readToFileWithResponse(String filePath, FileRange range, ParallelTransferOptions parallelTransferOptions, DownloadRetryOptions options, DataLakeRequestConditions requestConditions, boolean rangeGetContentMd5, Set<OpenOption> openOptions) { return blockBlobAsyncClient.downloadToFileWithResponse(new BlobDownloadToFileOptions(filePath) .setRange(Transforms.toBlobRange(range)).setParallelTransferOptions(parallelTransferOptions) .setDownloadRetryOptions(Transforms.toBlobDownloadRetryOptions(options)) .setRequestConditions(Transforms.toBlobRequestConditions(requestConditions)) .setRangeGetContentMd5(rangeGetContentMd5).setOpenOptions(openOptions)) .onErrorMap(DataLakeImplUtils::transformBlobStorageException) .map(response -> new SimpleResponse<>(response, Transforms.toPathProperties(response.getValue()))); } /** * Moves the file to another location within the file system. * For more information see the * <a href="https: * Docs</a>. * * <p><strong>Code Samples</strong></p> * * {@codesnippet com.azure.storage.file.datalake.DataLakeFileAsyncClient.rename * * @param destinationFileSystem The file system of the destination within the account. * {@code null} for the current file system. * @param destinationPath Relative path from the file system to rename the file to, excludes the file system name. * For example if you want to move a file with fileSystem = "myfilesystem", path = "mydir/hello.txt" to another path * in myfilesystem (ex: newdir/hi.txt) then set the destinationPath = "newdir/hi.txt" * @return A {@link Mono} containing a {@link DataLakeFileAsyncClient} used to interact with the new file created. */ public Mono<DataLakeFileAsyncClient> rename(String destinationFileSystem, String destinationPath) { try { return renameWithResponse(destinationFileSystem, destinationPath, null, null).flatMap(FluxUtil::toMono); } catch (RuntimeException ex) { return monoError(logger, ex); } } /** * Moves the file to another location within the file system. For more information, see the * <a href="https: * * <p><strong>Code Samples</strong></p> * * {@codesnippet com.azure.storage.file.datalake.DataLakeFileAsyncClient.renameWithResponse * * @param destinationFileSystem The file system of the destination within the account. * {@code null} for the current file system. * @param destinationPath Relative path from the file system to rename the file to, excludes the file system name. * For example if you want to move a file with fileSystem = "myfilesystem", path = "mydir/hello.txt" to another path * in myfilesystem (ex: newdir/hi.txt) then set the destinationPath = "newdir/hi.txt" * @param sourceRequestConditions {@link DataLakeRequestConditions} against the source. * @param destinationRequestConditions {@link DataLakeRequestConditions} against the destination. * @return A {@link Mono} containing a {@link Response} whose {@link Response * DataLakeFileAsyncClient} used to interact with the file created. */ public Mono<Response<DataLakeFileAsyncClient>> renameWithResponse(String destinationFileSystem, String destinationPath, DataLakeRequestConditions sourceRequestConditions, DataLakeRequestConditions destinationRequestConditions) { try { return withContext(context -> renameWithResponse(destinationFileSystem, destinationPath, sourceRequestConditions, destinationRequestConditions, context)) .map(response -> new SimpleResponse<>(response, new DataLakeFileAsyncClient(response.getValue()))); } catch (RuntimeException ex) { return monoError(logger, ex); } } /** * Queries the entire file. * * <p>For more information, see the * <a href="https: * * <p><strong>Code Samples</strong></p> * * {@codesnippet com.azure.storage.file.datalake.DataLakeFileAsyncClient.query * * @param expression The query expression. * @return A reactive response containing the queried data. */ public Flux<ByteBuffer> query(String expression) { return queryWithResponse(new FileQueryOptions(expression)) .flatMapMany(FileQueryAsyncResponse::getValue); } /** * Queries the entire file. * * <p>For more information, see the * <a href="https: * * <p><strong>Code Samples</strong></p> * * {@codesnippet com.azure.storage.file.datalake.DataLakeFileAsyncClient.queryWithResponse * * @param queryOptions {@link FileQueryOptions The query options} * @return A reactive response containing the queried data. */ public Mono<FileQueryAsyncResponse> queryWithResponse(FileQueryOptions queryOptions) { return blockBlobAsyncClient.queryWithResponse(Transforms.toBlobQueryOptions(queryOptions)) .map(Transforms::toFileQueryAsyncResponse) .onErrorMap(DataLakeImplUtils::transformBlobStorageException); } /** * Schedules the file for deletion. * * <p><strong>Code Samples</strong></p> * * {@codesnippet com.azure.storage.file.datalake.DataLakeFileAsyncClient.scheduleDeletion * * @param options Schedule deletion parameters. * @return A reactive response signalling completion. */ public Mono<Void> scheduleDeletion(FileScheduleDeletionOptions options) { return scheduleDeletionWithResponse(options).flatMap(FluxUtil::toMono); } /** * Schedules the file for deletion. * * <p><strong>Code Samples</strong></p> * * {@codesnippet com.azure.storage.file.datalake.DataLakeFileAsyncClient.scheduleDeletionWithResponse * * @param options Schedule deletion parameters. * @return A reactive response signalling completion. */ public Mono<Response<Void>> scheduleDeletionWithResponse(FileScheduleDeletionOptions options) { try { return withContext(context -> scheduleDeletionWithResponse(options, context)); } catch (RuntimeException ex) { return monoError(logger, ex); } } Mono<Response<Void>> scheduleDeletionWithResponse(FileScheduleDeletionOptions options, Context context) { PathExpiryOptions pathExpiryOptions; context = context == null ? Context.NONE : context; String expiresOn = null; if (options != null && options.getExpiresOn() != null) { pathExpiryOptions = PathExpiryOptions.ABSOLUTE; expiresOn = new DateTimeRfc1123(options.getExpiresOn()).toString(); } else if (options != null && options.getTimeToExpire() != null) { if (options.getExpiryRelativeTo() == FileExpirationOffset.CREATION_TIME) { pathExpiryOptions = PathExpiryOptions.RELATIVE_TO_CREATION; } else { pathExpiryOptions = PathExpiryOptions.RELATIVE_TO_NOW; } expiresOn = Long.toString(options.getTimeToExpire().toMillis()); } else { pathExpiryOptions = PathExpiryOptions.NEVER_EXPIRE; } return this.blobDataLakeStorage.paths().setExpiryWithRestResponseAsync( pathExpiryOptions, null, null, expiresOn, context.addData(AZ_TRACING_NAMESPACE_KEY, STORAGE_TRACING_NAMESPACE_VALUE)) .map(rb -> new SimpleResponse<>(rb, null)); } }
class DataLakeFileAsyncClient extends DataLakePathAsyncClient { /** * Indicates the maximum number of bytes that can be sent in a call to upload. */ static final long MAX_APPEND_FILE_BYTES = 4000L * Constants.MB; private final ClientLogger logger = new ClientLogger(DataLakeFileAsyncClient.class); /** * Package-private constructor for use by {@link DataLakePathClientBuilder}. * * @param pipeline The pipeline used to send and receive service requests. * @param url The endpoint where to send service requests. * @param serviceVersion The version of the service to receive requests. * @param accountName The storage account name. * @param fileSystemName The file system name. * @param fileName The file name. * @param blockBlobAsyncClient The underlying {@link BlobContainerAsyncClient} */ DataLakeFileAsyncClient(HttpPipeline pipeline, String url, DataLakeServiceVersion serviceVersion, String accountName, String fileSystemName, String fileName, BlockBlobAsyncClient blockBlobAsyncClient) { super(pipeline, url, serviceVersion, accountName, fileSystemName, fileName, PathResourceType.FILE, blockBlobAsyncClient); } DataLakeFileAsyncClient(DataLakePathAsyncClient pathAsyncClient) { super(pathAsyncClient.getHttpPipeline(), pathAsyncClient.getPathUrl(), pathAsyncClient.getServiceVersion(), pathAsyncClient.getAccountName(), pathAsyncClient.getFileSystemName(), pathAsyncClient.pathName, PathResourceType.FILE, pathAsyncClient.getBlockBlobAsyncClient()); } /** * Gets the URL of the file represented by this client on the Data Lake service. * * @return the URL. */ public String getFileUrl() { return getPathUrl(); } /** * Gets the path of this file, not including the name of the resource itself. * * @return The path of the file. */ public String getFilePath() { return getObjectPath(); } /** * Gets the name of this file, not including its full path. * * @return The name of the file. */ public String getFileName() { return getObjectName(); } /** * Deletes a file. * * <p><strong>Code Samples</strong></p> * * {@codesnippet com.azure.storage.file.datalake.DataLakeFileAsyncClient.delete} * * <p>For more information see the * <a href="https: * Docs</a></p> * * @return A reactive response signalling completion. */ public Mono<Void> delete() { try { return deleteWithResponse(null).flatMap(FluxUtil::toMono); } catch (RuntimeException ex) { return monoError(logger, ex); } } /** * Deletes a file. * * <p><strong>Code Samples</strong></p> * * {@codesnippet com.azure.storage.file.datalake.DataLakeFileAsyncClient.deleteWithResponse * * <p>For more information see the * <a href="https: * Docs</a></p> * * @param requestConditions {@link DataLakeRequestConditions} * * @return A reactive response signalling completion. */ public Mono<Response<Void>> deleteWithResponse(DataLakeRequestConditions requestConditions) { try { return withContext(context -> deleteWithResponse(null /* recursive */, requestConditions, context)); } catch (RuntimeException ex) { return monoError(logger, ex); } } /** * Creates a new file and uploads content. * * <p><strong>Code Samples</strong></p> * * {@codesnippet com.azure.storage.file.datalake.DataLakeFileAsyncClient.upload * * @param data The data to write to the file. Unlike other upload methods, this method does not require that the * {@code Flux} be replayable. In other words, it does not have to support multiple subscribers and is not expected * to produce the same values across subscriptions. * @param parallelTransferOptions {@link ParallelTransferOptions} used to configure buffered uploading. * @return A reactive response containing the information of the uploaded file. */ public Mono<PathInfo> upload(Flux<ByteBuffer> data, ParallelTransferOptions parallelTransferOptions) { return upload(data, parallelTransferOptions, false); } /** * Creates a new file and uploads content. * * <p><strong>Code Samples</strong></p> * * {@codesnippet com.azure.storage.file.datalake.DataLakeFileAsyncClient.upload * * @param data The data to write to the file. Unlike other upload methods, this method does not require that the * {@code Flux} be replayable. In other words, it does not have to support multiple subscribers and is not expected * to produce the same values across subscriptions. * @param parallelTransferOptions {@link ParallelTransferOptions} used to configure buffered uploading. * @param overwrite Whether or not to overwrite, should the file already exist. * @return A reactive response containing the information of the uploaded file. */ public Mono<PathInfo> upload(Flux<ByteBuffer> data, ParallelTransferOptions parallelTransferOptions, boolean overwrite) { Mono<Void> overwriteCheck; DataLakeRequestConditions requestConditions; if (overwrite) { overwriteCheck = Mono.empty(); requestConditions = null; } else { overwriteCheck = exists().flatMap(exists -> exists ? monoError(logger, new IllegalArgumentException(Constants.BLOB_ALREADY_EXISTS)) : Mono.empty()); requestConditions = . * * <p><strong>Code Samples</strong></p> * * {@codesnippet com.azure.storage.file.datalake.DataLakeFileAsyncClient.uploadWithResponse * * <p><strong>Using Progress Reporting</strong></p> * * {@codesnippet com.azure.storage.file.datalake.DataLakeFileAsyncClient.uploadWithResponse * * @param data The data to write to the file. Unlike other upload methods, this method does not require that the * {@code Flux} be replayable. In other words, it does not have to support multiple subscribers and is not expected * to produce the same values across subscriptions. * @param parallelTransferOptions {@link ParallelTransferOptions} used to configure buffered uploading. * @param headers {@link PathHttpHeaders} * @param metadata Metadata to associate with the resource. * @param requestConditions {@link DataLakeRequestConditions} * @return A reactive response containing the information of the uploaded file. */ public Mono<Response<PathInfo>> uploadWithResponse(Flux<ByteBuffer> data, ParallelTransferOptions parallelTransferOptions, PathHttpHeaders headers, Map<String, String> metadata, DataLakeRequestConditions requestConditions) { try { return uploadWithResponse(new FileParallelUploadOptions(data) .setParallelTransferOptions(parallelTransferOptions).setHeaders(headers).setMetadata(metadata) .setRequestConditions(requestConditions)); } catch (RuntimeException ex) { return monoError(logger, ex); } } /** * Creates a new file. * <p> * To avoid overwriting, pass "*" to {@link DataLakeRequestConditions * * <p><strong>Code Samples</strong></p> * * {@codesnippet com.azure.storage.file.datalake.DataLakeFileAsyncClient.uploadWithResponse * * <p><strong>Using Progress Reporting</strong></p> * * {@codesnippet com.azure.storage.file.datalake.DataLakeFileAsyncClient.uploadWithResponse * * @param options {@link FileParallelUploadOptions} * @return A reactive response containing the information of the uploaded file. */ public Mono<Response<PathInfo>> uploadWithResponse(FileParallelUploadOptions options) { try { StorageImplUtils.assertNotNull("options", options); DataLakeRequestConditions validatedRequestConditions = options.getRequestConditions() == null ? new DataLakeRequestConditions() : options.getRequestConditions(); /* Since we are creating a file with the request conditions, everything but lease id becomes invalid after creation, so remove them for the append/flush calls. */ DataLakeRequestConditions validatedUploadRequestConditions = new DataLakeRequestConditions() .setLeaseId(validatedRequestConditions.getLeaseId()); final ParallelTransferOptions validatedParallelTransferOptions = ModelHelper.populateAndApplyDefaults(options.getParallelTransferOptions()); long fileOffset = 0; Function<Flux<ByteBuffer>, Mono<Response<PathInfo>>> uploadInChunksFunction = (stream) -> uploadInChunks(stream, fileOffset, validatedParallelTransferOptions, options.getHeaders(), validatedUploadRequestConditions); BiFunction<Flux<ByteBuffer>, Long, Mono<Response<PathInfo>>> uploadFullMethod = (stream, length) -> uploadWithResponse(ProgressReporter .addProgressReporting(stream, validatedParallelTransferOptions.getProgressReceiver()), fileOffset, length, options.getHeaders(), validatedUploadRequestConditions); Flux<ByteBuffer> data = options.getDataFlux() == null ? Utility.convertStreamToByteBuffer( options.getDataStream(), options.getLength(), (int) Math.min(Integer.MAX_VALUE, validatedParallelTransferOptions.getBlockSizeLong()), false) : options.getDataFlux(); return createWithResponse(options.getPermissions(), options.getUmask(), options.getHeaders(), options.getMetadata(), validatedRequestConditions) .then(UploadUtils.uploadFullOrChunked(data, validatedParallelTransferOptions, uploadInChunksFunction, uploadFullMethod)); } catch (RuntimeException ex) { return monoError(logger, ex); } } private Mono<Response<PathInfo>> uploadInChunks(Flux<ByteBuffer> data, long fileOffset, ParallelTransferOptions parallelTransferOptions, PathHttpHeaders httpHeaders, DataLakeRequestConditions requestConditions) { AtomicLong totalProgress = new AtomicLong(); Lock progressLock = new ReentrantLock(); /* We use maxConcurrency + 1 for the number of buffers because one buffer will typically be being filled while the others are being sent. */ UploadBufferPool pool = new UploadBufferPool(parallelTransferOptions.getMaxConcurrency() + 1, parallelTransferOptions.getBlockSizeLong(), MAX_APPEND_FILE_BYTES); Flux<ByteBuffer> chunkedSource = UploadUtils.chunkSource(data, parallelTransferOptions); /* Write to the pool and upload the output. */ return chunkedSource.concatMap(pool::write) .concatWith(Flux.defer(pool::flush)) /* Map the data to a tuple 3, of buffer, buffer length, buffer offset */ .map(bufferAggregator -> Tuples.of(bufferAggregator, bufferAggregator.length(), 0L)) /* Scan reduces a flux with an accumulator while emitting the intermediate results. */ /* As an example, data consists of ByteBuffers of length 10-10-5. In the map above we transform the initial ByteBuffer to a tuple3 of buff, 10, 0. Scan will emit that as is, then accumulate the tuple for the next emission. On the second iteration, the middle ByteBuffer gets transformed to buff, 10, 10+0 (from previous emission). Scan emits that, and on the last iteration, the last ByteBuffer gets transformed to buff, 5, 10+10 (from previous emission). */ .scan((result, source) -> { BufferAggregator bufferAggregator = source.getT1(); long currentBufferLength = bufferAggregator.length(); long lastBytesWritten = result.getT2(); long lastOffset = result.getT3(); return Tuples.of(bufferAggregator, currentBufferLength, lastBytesWritten + lastOffset); }) .flatMapSequential(tuple3 -> { BufferAggregator bufferAggregator = tuple3.getT1(); long currentBufferLength = bufferAggregator.length(); long currentOffset = tuple3.getT3() + fileOffset; Flux<ByteBuffer> progressData = ProgressReporter.addParallelProgressReporting( bufferAggregator.asFlux(), parallelTransferOptions.getProgressReceiver(), progressLock, totalProgress); return appendWithResponse(progressData, currentOffset, currentBufferLength, null, requestConditions.getLeaseId()) .doFinally(x -> pool.returnBuffer(bufferAggregator)) .map(resp -> currentBufferLength + currentOffset) /* End of file after append to pass to flush. */ .flux(); }, parallelTransferOptions.getMaxConcurrency()) .last() .flatMap(length -> flushWithResponse(length, false, false, httpHeaders, requestConditions)); } private Mono<Response<PathInfo>> uploadWithResponse(Flux<ByteBuffer> data, long fileOffset, long length, PathHttpHeaders httpHeaders, DataLakeRequestConditions requestConditions) { return appendWithResponse(data, fileOffset, length, null, requestConditions.getLeaseId()) .flatMap(resp -> flushWithResponse(fileOffset + length, false, false, httpHeaders, requestConditions)); } /** * Creates a new file, with the content of the specified file. By default this method will not overwrite an * existing file. * * <p><strong>Code Samples</strong></p> * * {@codesnippet com.azure.storage.file.datalake.DataLakeFileAsyncClient.uploadFromFile * * @param filePath Path to the upload file * @return An empty response * @throws UncheckedIOException If an I/O error occurs */ public Mono<Void> uploadFromFile(String filePath) { try { return uploadFromFile(filePath, false); } catch (RuntimeException ex) { return monoError(logger, ex); } } /** * Creates a new file, with the content of the specified file. * * <p><strong>Code Samples</strong></p> * * {@codesnippet com.azure.storage.file.datalake.DataLakeFileAsyncClient.uploadFromFile * * @param filePath Path to the upload file * @param overwrite Whether or not to overwrite, should the file already exist. * @return An empty response * @throws UncheckedIOException If an I/O error occurs */ public Mono<Void> uploadFromFile(String filePath, boolean overwrite) { try { Mono<Void> overwriteCheck = Mono.empty(); DataLakeRequestConditions requestConditions = null; if (!overwrite) { if (UploadUtils.shouldUploadInChunks(filePath, DataLakeFileAsyncClient.MAX_APPEND_FILE_BYTES, logger)) { overwriteCheck = exists().flatMap(exists -> exists ? monoError(logger, new IllegalArgumentException(Constants.FILE_ALREADY_EXISTS)) : Mono.empty()); } requestConditions = new DataLakeRequestConditions() .setIfNoneMatch(Constants.HeaderConstants.ETAG_WILDCARD); } return overwriteCheck.then(uploadFromFile(filePath, null, null, null, requestConditions)); } catch (RuntimeException ex) { return monoError(logger, ex); } } /** * Creates a new file, with the content of the specified file. * <p> * To avoid overwriting, pass "*" to {@link DataLakeRequestConditions * * <p><strong>Code Samples</strong></p> * * {@codesnippet com.azure.storage.file.datalake.DataLakeFileAsyncClient.uploadFromFile * * @param filePath Path to the upload file * @param parallelTransferOptions {@link ParallelTransferOptions} to use to upload from file. Number of parallel * transfers parameter is ignored. * @param headers {@link PathHttpHeaders} * @param metadata Metadata to associate with the resource. * @param requestConditions {@link DataLakeRequestConditions} * @return An empty response * @throws UncheckedIOException If an I/O error occurs */ public Mono<Void> uploadFromFile(String filePath, ParallelTransferOptions parallelTransferOptions, PathHttpHeaders headers, Map<String, String> metadata, DataLakeRequestConditions requestConditions) { Long originalBlockSize = (parallelTransferOptions == null) ? null : parallelTransferOptions.getBlockSizeLong(); DataLakeRequestConditions validatedRequestConditions = requestConditions == null ? new DataLakeRequestConditions() : requestConditions; /* Since we are creating a file with the request conditions, everything but lease id becomes invalid after creation, so e remove them for the append/flush calls. */ DataLakeRequestConditions validatedUploadRequestConditions = new DataLakeRequestConditions() .setLeaseId(validatedRequestConditions.getLeaseId()); final ParallelTransferOptions finalParallelTransferOptions = ModelHelper.populateAndApplyDefaults(parallelTransferOptions); long fileOffset = 0; try { return Mono.using(() -> UploadUtils.uploadFileResourceSupplier(filePath, logger), channel -> { try { long fileSize = channel.size(); if (fileSize == 0) { throw logger.logExceptionAsError(new IllegalArgumentException("Size of the file must be " + "greater than 0.")); } if (UploadUtils.shouldUploadInChunks(filePath, finalParallelTransferOptions.getMaxSingleUploadSizeLong(), logger)) { return createWithResponse(null, null, headers, metadata, validatedRequestConditions) .then(uploadFileChunks(fileOffset, fileSize, finalParallelTransferOptions, originalBlockSize, headers, validatedUploadRequestConditions, channel)); } else { return createWithResponse(null, null, headers, metadata, validatedRequestConditions) .then(uploadWithResponse(FluxUtil.readFile(channel), fileOffset, fileSize, headers, validatedUploadRequestConditions)) .then(); } } catch (IOException ex) { return Mono.error(ex); } }, channel -> UploadUtils.uploadFileCleanup(channel, logger)); } catch (RuntimeException ex) { return monoError(logger, ex); } } private Mono<Void> uploadFileChunks(long fileOffset, long fileSize, ParallelTransferOptions parallelTransferOptions, Long originalBlockSize, PathHttpHeaders headers, DataLakeRequestConditions requestConditions, AsynchronousFileChannel channel) { AtomicLong totalProgress = new AtomicLong(); Lock progressLock = new ReentrantLock(); return Flux.fromIterable(sliceFile(fileSize, originalBlockSize, parallelTransferOptions.getBlockSizeLong())) .flatMap(chunk -> { Flux<ByteBuffer> progressData = ProgressReporter.addParallelProgressReporting( FluxUtil.readFile(channel, chunk.getOffset(), chunk.getCount()), parallelTransferOptions.getProgressReceiver(), progressLock, totalProgress); return appendWithResponse(progressData, fileOffset + chunk.getOffset(), chunk.getCount(), null, requestConditions.getLeaseId()); }, parallelTransferOptions.getMaxConcurrency()) .then(Mono.defer(() -> flushWithResponse(fileSize, false, false, headers, requestConditions))) .then(); } private List<FileRange> sliceFile(long fileSize, Long originalBlockSize, long blockSize) { List<FileRange> ranges = new ArrayList<>(); if (fileSize > 100 * Constants.MB && originalBlockSize == null) { blockSize = BlobAsyncClient.BLOB_DEFAULT_HTBB_UPLOAD_BLOCK_SIZE; } for (long pos = 0; pos < fileSize; pos += blockSize) { long count = blockSize; if (pos + count > fileSize) { count = fileSize - pos; } ranges.add(new FileRange(pos, count)); } return ranges; } /** * Appends data to the specified resource to later be flushed (written) by a call to flush * * <p><strong>Code Samples</strong></p> * * {@codesnippet com.azure.storage.file.datalake.DataLakeFileAsyncClient.append * * <p>For more information, see the * <a href="https: * Docs</a></p> * * @param data The data to write to the file. * @param fileOffset The position where the data is to be appended. * @param length The exact length of the data. It is important that this value match precisely the length of the * data emitted by the {@code Flux}. * * @return A reactive response signalling completion. */ public Mono<Void> append(Flux<ByteBuffer> data, long fileOffset, long length) { try { return appendWithResponse(data, fileOffset, length, null, null).flatMap(FluxUtil::toMono); } catch (RuntimeException ex) { return monoError(logger, ex); } } /** * Appends data to the specified resource to later be flushed (written) by a call to flush * * <p><strong>Code Samples</strong></p> * * {@codesnippet com.azure.storage.file.datalake.DataLakeFileAsyncClient.appendWithResponse * * <p>For more information, see the * <a href="https: * Docs</a></p> * * @param data The data to write to the file. * @param fileOffset The position where the data is to be appended. * @param length The exact length of the data. It is important that this value match precisely the length of the * data emitted by the {@code Flux}. * @param contentMd5 An MD5 hash of the content of the data. If specified, the service will calculate the MD5 of the * received data and fail the request if it does not match the provided MD5. * @param leaseId By setting lease id, requests will fail if the provided lease does not match the active lease on * the file. * * @return A reactive response signalling completion. */ public Mono<Response<Void>> appendWithResponse(Flux<ByteBuffer> data, long fileOffset, long length, byte[] contentMd5, String leaseId) { try { return withContext(context -> appendWithResponse(data, fileOffset, length, contentMd5, leaseId, context)); } catch (RuntimeException ex) { return monoError(logger, ex); } } Mono<Response<Void>> appendWithResponse(Flux<ByteBuffer> data, long fileOffset, long length, byte[] contentMd5, String leaseId, Context context) { LeaseAccessConditions leaseAccessConditions = new LeaseAccessConditions().setLeaseId(leaseId); PathHttpHeaders headers = new PathHttpHeaders().setTransactionalContentHash(contentMd5); return this.dataLakeStorage.paths().appendDataWithRestResponseAsync(data, fileOffset, null, length, null, null, headers, leaseAccessConditions, context).map(response -> new SimpleResponse<>(response, null)); } /** * Flushes (writes) data previously appended to the file through a call to append. * The previously uploaded data must be contiguous. * <p>By default this method will not overwrite existing data.</p> * * <p><strong>Code Samples</strong></p> * * {@codesnippet com.azure.storage.file.datalake.DataLakeFileAsyncClient.flush * * <p>For more information, see the * <a href="https: * Docs</a></p> * * @param position The length of the file after all data has been written. * * @return A reactive response containing the information of the created resource. */ public Mono<PathInfo> flush(long position) { try { return flush(position, false); } catch (RuntimeException ex) { return monoError(logger, ex); } } /** * Flushes (writes) data previously appended to the file through a call to append. * The previously uploaded data must be contiguous. * * <p><strong>Code Samples</strong></p> * * {@codesnippet com.azure.storage.file.datalake.DataLakeFileAsyncClient.flush * * <p>For more information, see the * <a href="https: * Docs</a></p> * * @param position The length of the file after all data has been written. * @param overwrite Whether or not to overwrite, should data exist on the file. * * @return A reactive response containing the information of the created resource. */ public Mono<PathInfo> flush(long position, boolean overwrite) { try { DataLakeRequestConditions requestConditions = null; if (!overwrite) { requestConditions = new DataLakeRequestConditions() .setIfNoneMatch(Constants.HeaderConstants.ETAG_WILDCARD); } return flushWithResponse(position, false, false, null, requestConditions).flatMap(FluxUtil::toMono); } catch (RuntimeException ex) { return monoError(logger, ex); } } /** * Flushes (writes) data previously appended to the file through a call to append. * The previously uploaded data must be contiguous. * * <p><strong>Code Samples</strong></p> * * {@codesnippet com.azure.storage.file.datalake.DataLakeFileAsyncClient.flushWithResponse * * <p>For more information, see the * <a href="https: * Docs</a></p> * * @param position The length of the file after all data has been written. * @param retainUncommittedData Whether or not uncommitted data is to be retained after the operation. * @param close Whether or not a file changed event raised indicates completion (true) or modification (false). * @param httpHeaders {@link PathHttpHeaders httpHeaders} * @param requestConditions {@link DataLakeRequestConditions requestConditions} * * @return A reactive response containing the information of the created resource. */ public Mono<Response<PathInfo>> flushWithResponse(long position, boolean retainUncommittedData, boolean close, PathHttpHeaders httpHeaders, DataLakeRequestConditions requestConditions) { try { return withContext(context -> flushWithResponse(position, retainUncommittedData, close, httpHeaders, requestConditions, context)); } catch (RuntimeException ex) { return monoError(logger, ex); } } Mono<Response<PathInfo>> flushWithResponse(long position, boolean retainUncommittedData, boolean close, PathHttpHeaders httpHeaders, DataLakeRequestConditions requestConditions, Context context) { httpHeaders = httpHeaders == null ? new PathHttpHeaders() : httpHeaders; requestConditions = requestConditions == null ? new DataLakeRequestConditions() : requestConditions; LeaseAccessConditions lac = new LeaseAccessConditions().setLeaseId(requestConditions.getLeaseId()); ModifiedAccessConditions mac = new ModifiedAccessConditions() .setIfMatch(requestConditions.getIfMatch()) .setIfNoneMatch(requestConditions.getIfNoneMatch()) .setIfModifiedSince(requestConditions.getIfModifiedSince()) .setIfUnmodifiedSince(requestConditions.getIfUnmodifiedSince()); context = context == null ? Context.NONE : context; return this.dataLakeStorage.paths().flushDataWithRestResponseAsync(null, position, retainUncommittedData, close, (long) 0, null, httpHeaders, lac, mac, context.addData(AZ_TRACING_NAMESPACE_KEY, STORAGE_TRACING_NAMESPACE_VALUE)) .map(response -> new SimpleResponse<>(response, new PathInfo(response.getDeserializedHeaders().getETag(), response.getDeserializedHeaders().getLastModified()))); } /** * Reads the entire file. * * <p><strong>Code Samples</strong></p> * * {@codesnippet com.azure.storage.file.datalake.DataLakeFileAsyncClient.read} * * <p>For more information, see the * <a href="https: * * @return A reactive response containing the file data. */ public Flux<ByteBuffer> read() { try { return readWithResponse(null, null, null, false) .flatMapMany(FileReadAsyncResponse::getValue); } catch (RuntimeException ex) { return fluxError(logger, ex); } } /** * Reads a range of bytes from a file. * * <p><strong>Code Samples</strong></p> * * {@codesnippet com.azure.storage.file.datalake.DataLakeFileAsyncClient.readWithResponse * * <p>For more information, see the * <a href="https: * * @param range {@link FileRange} * @param options {@link DownloadRetryOptions} * @param requestConditions {@link DataLakeRequestConditions} * @param getRangeContentMd5 Whether the contentMD5 for the specified file range should be returned. * @return A reactive response containing the file data. */ public Mono<FileReadAsyncResponse> readWithResponse(FileRange range, DownloadRetryOptions options, DataLakeRequestConditions requestConditions, boolean getRangeContentMd5) { try { return blockBlobAsyncClient.downloadWithResponse(Transforms.toBlobRange(range), Transforms.toBlobDownloadRetryOptions(options), Transforms.toBlobRequestConditions(requestConditions), getRangeContentMd5).map(Transforms::toFileReadAsyncResponse) .onErrorMap(DataLakeImplUtils::transformBlobStorageException); } catch (RuntimeException ex) { return monoError(logger, ex); } } /** * Reads the entire file into a file specified by the path. * * <p>The file will be created and must not exist, if the file already exists a {@link FileAlreadyExistsException} * will be thrown.</p> * * <p><strong>Code Samples</strong></p> * * {@codesnippet com.azure.storage.file.datalake.DataLakeFileAsyncClient.readToFile * * <p>For more information, see the * <a href="https: * * @param filePath A {@link String} representing the filePath where the downloaded data will be written. * @return A reactive response containing the file properties and metadata. */ public Mono<PathProperties> readToFile(String filePath) { return readToFile(filePath, false); } /** * Reads the entire file into a file specified by the path. * * <p>If overwrite is set to false, the file will be created and must not exist, if the file already exists a * {@link FileAlreadyExistsException} will be thrown.</p> * * <p><strong>Code Samples</strong></p> * * {@codesnippet com.azure.storage.file.datalake.DataLakeFileAsyncClient.readToFile * * <p>For more information, see the * <a href="https: * * @param filePath A {@link String} representing the filePath where the downloaded data will be written. * @param overwrite Whether or not to overwrite the file, should the file exist. * @return A reactive response containing the file properties and metadata. */ public Mono<PathProperties> readToFile(String filePath, boolean overwrite) { Set<OpenOption> openOptions = null; if (overwrite) { openOptions = new HashSet<>(); openOptions.add(StandardOpenOption.CREATE); openOptions.add(StandardOpenOption.TRUNCATE_EXISTING); openOptions.add(StandardOpenOption.READ); openOptions.add(StandardOpenOption.WRITE); } return readToFileWithResponse(filePath, null, null, null, null, false, openOptions) .flatMap(FluxUtil::toMono); } /** * Reads the entire file into a file specified by the path. * * <p>By default the file will be created and must not exist, if the file already exists a * {@link FileAlreadyExistsException} will be thrown. To override this behavior, provide appropriate * {@link OpenOption OpenOptions} </p> * * <p><strong>Code Samples</strong></p> * * {@codesnippet com.azure.storage.file.datalake.DataLakeFileAsyncClient.readToFileWithResponse * * <p>For more information, see the * <a href="https: * * @param filePath A {@link String} representing the filePath where the downloaded data will be written. * @param range {@link FileRange} * @param parallelTransferOptions {@link ParallelTransferOptions} to use to download to file. Number of parallel * transfers parameter is ignored. * @param options {@link DownloadRetryOptions} * @param requestConditions {@link DataLakeRequestConditions} * @param rangeGetContentMd5 Whether the contentMD5 for the specified file range should be returned. * @param openOptions {@link OpenOption OpenOptions} to use to configure how to open or create the file. * @return A reactive response containing the file properties and metadata. * @throws IllegalArgumentException If {@code blockSize} is less than 0 or greater than 100MB. * @throws UncheckedIOException If an I/O error occurs. */ public Mono<Response<PathProperties>> readToFileWithResponse(String filePath, FileRange range, ParallelTransferOptions parallelTransferOptions, DownloadRetryOptions options, DataLakeRequestConditions requestConditions, boolean rangeGetContentMd5, Set<OpenOption> openOptions) { return blockBlobAsyncClient.downloadToFileWithResponse(new BlobDownloadToFileOptions(filePath) .setRange(Transforms.toBlobRange(range)).setParallelTransferOptions(parallelTransferOptions) .setDownloadRetryOptions(Transforms.toBlobDownloadRetryOptions(options)) .setRequestConditions(Transforms.toBlobRequestConditions(requestConditions)) .setRangeGetContentMd5(rangeGetContentMd5).setOpenOptions(openOptions)) .onErrorMap(DataLakeImplUtils::transformBlobStorageException) .map(response -> new SimpleResponse<>(response, Transforms.toPathProperties(response.getValue()))); } /** * Moves the file to another location within the file system. * For more information see the * <a href="https: * Docs</a>. * * <p><strong>Code Samples</strong></p> * * {@codesnippet com.azure.storage.file.datalake.DataLakeFileAsyncClient.rename * * @param destinationFileSystem The file system of the destination within the account. * {@code null} for the current file system. * @param destinationPath Relative path from the file system to rename the file to, excludes the file system name. * For example if you want to move a file with fileSystem = "myfilesystem", path = "mydir/hello.txt" to another path * in myfilesystem (ex: newdir/hi.txt) then set the destinationPath = "newdir/hi.txt" * @return A {@link Mono} containing a {@link DataLakeFileAsyncClient} used to interact with the new file created. */ public Mono<DataLakeFileAsyncClient> rename(String destinationFileSystem, String destinationPath) { try { return renameWithResponse(destinationFileSystem, destinationPath, null, null).flatMap(FluxUtil::toMono); } catch (RuntimeException ex) { return monoError(logger, ex); } } /** * Moves the file to another location within the file system. For more information, see the * <a href="https: * * <p><strong>Code Samples</strong></p> * * {@codesnippet com.azure.storage.file.datalake.DataLakeFileAsyncClient.renameWithResponse * * @param destinationFileSystem The file system of the destination within the account. * {@code null} for the current file system. * @param destinationPath Relative path from the file system to rename the file to, excludes the file system name. * For example if you want to move a file with fileSystem = "myfilesystem", path = "mydir/hello.txt" to another path * in myfilesystem (ex: newdir/hi.txt) then set the destinationPath = "newdir/hi.txt" * @param sourceRequestConditions {@link DataLakeRequestConditions} against the source. * @param destinationRequestConditions {@link DataLakeRequestConditions} against the destination. * @return A {@link Mono} containing a {@link Response} whose {@link Response * DataLakeFileAsyncClient} used to interact with the file created. */ public Mono<Response<DataLakeFileAsyncClient>> renameWithResponse(String destinationFileSystem, String destinationPath, DataLakeRequestConditions sourceRequestConditions, DataLakeRequestConditions destinationRequestConditions) { try { return withContext(context -> renameWithResponse(destinationFileSystem, destinationPath, sourceRequestConditions, destinationRequestConditions, context)) .map(response -> new SimpleResponse<>(response, new DataLakeFileAsyncClient(response.getValue()))); } catch (RuntimeException ex) { return monoError(logger, ex); } } /** * Queries the entire file. * * <p>For more information, see the * <a href="https: * * <p><strong>Code Samples</strong></p> * * {@codesnippet com.azure.storage.file.datalake.DataLakeFileAsyncClient.query * * @param expression The query expression. * @return A reactive response containing the queried data. */ public Flux<ByteBuffer> query(String expression) { return queryWithResponse(new FileQueryOptions(expression)) .flatMapMany(FileQueryAsyncResponse::getValue); } /** * Queries the entire file. * * <p>For more information, see the * <a href="https: * * <p><strong>Code Samples</strong></p> * * {@codesnippet com.azure.storage.file.datalake.DataLakeFileAsyncClient.queryWithResponse * * @param queryOptions {@link FileQueryOptions The query options} * @return A reactive response containing the queried data. */ public Mono<FileQueryAsyncResponse> queryWithResponse(FileQueryOptions queryOptions) { return blockBlobAsyncClient.queryWithResponse(Transforms.toBlobQueryOptions(queryOptions)) .map(Transforms::toFileQueryAsyncResponse) .onErrorMap(DataLakeImplUtils::transformBlobStorageException); } /** * Schedules the file for deletion. * * <p><strong>Code Samples</strong></p> * * {@codesnippet com.azure.storage.file.datalake.DataLakeFileAsyncClient.scheduleDeletion * * @param options Schedule deletion parameters. * @return A reactive response signalling completion. */ public Mono<Void> scheduleDeletion(FileScheduleDeletionOptions options) { return scheduleDeletionWithResponse(options).flatMap(FluxUtil::toMono); } /** * Schedules the file for deletion. * * <p><strong>Code Samples</strong></p> * * {@codesnippet com.azure.storage.file.datalake.DataLakeFileAsyncClient.scheduleDeletionWithResponse * * @param options Schedule deletion parameters. * @return A reactive response signalling completion. */ public Mono<Response<Void>> scheduleDeletionWithResponse(FileScheduleDeletionOptions options) { try { return withContext(context -> scheduleDeletionWithResponse(options, context)); } catch (RuntimeException ex) { return monoError(logger, ex); } } Mono<Response<Void>> scheduleDeletionWithResponse(FileScheduleDeletionOptions options, Context context) { PathExpiryOptions pathExpiryOptions; context = context == null ? Context.NONE : context; String expiresOn = null; if (options != null && options.getExpiresOn() != null) { pathExpiryOptions = PathExpiryOptions.ABSOLUTE; expiresOn = new DateTimeRfc1123(options.getExpiresOn()).toString(); } else if (options != null && options.getTimeToExpire() != null) { if (options.getExpiryRelativeTo() == FileExpirationOffset.CREATION_TIME) { pathExpiryOptions = PathExpiryOptions.RELATIVE_TO_CREATION; } else { pathExpiryOptions = PathExpiryOptions.RELATIVE_TO_NOW; } expiresOn = Long.toString(options.getTimeToExpire().toMillis()); } else { pathExpiryOptions = PathExpiryOptions.NEVER_EXPIRE; } return this.blobDataLakeStorage.paths().setExpiryWithRestResponseAsync( pathExpiryOptions, null, null, expiresOn, context.addData(AZ_TRACING_NAMESPACE_KEY, STORAGE_TRACING_NAMESPACE_VALUE)) .map(rb -> new SimpleResponse<>(rb, null)); } }
`getTotalTimeout` makes more sense.
public ServiceBusSenderClient buildClient() { return new ServiceBusSenderClient(buildAsyncClient(), MessageUtils.calcTotalTimeout(retryOptions)); }
return new ServiceBusSenderClient(buildAsyncClient(), MessageUtils.calcTotalTimeout(retryOptions));
public ServiceBusSenderClient buildClient() { return new ServiceBusSenderClient(buildAsyncClient(), MessageUtils.getTotalTimeout(retryOptions)); }
class ServiceBusSenderClientBuilder { private String queueName; private String topicName; private String viaQueueName; private String viaTopicName; private ServiceBusSenderClientBuilder() { } /** * Sets the name of the Service Bus queue to publish messages to. * * @param queueName Name of the queue. * * @return The modified {@link ServiceBusSenderClientBuilder} object. */ public ServiceBusSenderClientBuilder queueName(String queueName) { this.queueName = queueName; return this; } /** * Sets the name of the initial destination Service Bus queue to publish messages to. * * @param viaQueueName The initial destination of the message. * * @return The modified {@link ServiceBusSenderClientBuilder} object. * @see <a href="https: * Via</a> */ public ServiceBusSenderClientBuilder viaQueueName(String viaQueueName) { this.viaQueueName = viaQueueName; return this; } /** * Sets the name of the initial destination Service Bus topic to publish messages to. * * @param viaTopicName The initial destination of the message. * * @return The modified {@link ServiceBusSenderClientBuilder} object. * @see <a href="https: * Via</a> */ public ServiceBusSenderClientBuilder viaTopicName(String viaTopicName) { this.viaTopicName = viaTopicName; return this; } /** * Sets the name of the Service Bus topic to publish messages to. * * @param topicName Name of the topic. * * @return The modified {@link ServiceBusSenderClientBuilder} object. */ public ServiceBusSenderClientBuilder topicName(String topicName) { this.topicName = topicName; return this; } /** * Creates an <b>asynchronous</b> {@link ServiceBusSenderAsyncClient client} for transmitting {@link * ServiceBusMessage} to a Service Bus queue or topic. * * @return A new {@link ServiceBusSenderAsyncClient} for transmitting to a Service queue or topic. * @throws IllegalStateException if {@link * topicName} are not set or, both of these fields are set. It is also thrown if the Service Bus {@link * * {@link * * @throws IllegalArgumentException if the entity type is not a queue or a topic. */ public ServiceBusSenderAsyncClient buildAsyncClient() { final ServiceBusConnectionProcessor connectionProcessor = getOrCreateConnectionProcessor(messageSerializer); final MessagingEntityType entityType = validateEntityPaths(logger, connectionStringEntityName, topicName, queueName); if (!CoreUtils.isNullOrEmpty(viaQueueName) && entityType == MessagingEntityType.SUBSCRIPTION) { throw logger.logExceptionAsError(new IllegalStateException(String.format( "(%s), Via queue feature work only with a queue.", viaQueueName))); } else if (!CoreUtils.isNullOrEmpty(viaTopicName) && entityType == MessagingEntityType.QUEUE) { throw logger.logExceptionAsError(new IllegalStateException(String.format( "(%s), Via topic feature work only with a topic.", viaTopicName))); } final String entityName; final String viaEntityName = !CoreUtils.isNullOrEmpty(viaQueueName) ? viaQueueName : viaTopicName; switch (entityType) { case QUEUE: entityName = queueName; break; case SUBSCRIPTION: entityName = topicName; break; case UNKNOWN: entityName = connectionStringEntityName; break; default: throw logger.logExceptionAsError( new IllegalArgumentException("Unknown entity type: " + entityType)); } return new ServiceBusSenderAsyncClient(entityName, entityType, connectionProcessor, retryOptions, tracerProvider, messageSerializer, ServiceBusClientBuilder.this::onClientClose, viaEntityName); } /** * Creates a <b>synchronous</b> {@link ServiceBusSenderClient client} for transmitting {@link ServiceBusMessage} * to a Service Bus queue or topic. * * @return A new {@link ServiceBusSenderAsyncClient} for transmitting to a Service queue or topic. * @throws IllegalStateException if {@link * topicName} are not set or, both of these fields are set. It is also thrown if the Service Bus {@link * * {@link * @throws IllegalArgumentException if the entity type is not a queue or a topic. */ }
class ServiceBusSenderClientBuilder { private String queueName; private String topicName; private String viaQueueName; private String viaTopicName; private ServiceBusSenderClientBuilder() { } /** * Sets the name of the Service Bus queue to publish messages to. * * @param queueName Name of the queue. * * @return The modified {@link ServiceBusSenderClientBuilder} object. */ public ServiceBusSenderClientBuilder queueName(String queueName) { this.queueName = queueName; return this; } /** * Sets the name of the initial destination Service Bus queue to publish messages to. * * @param viaQueueName The initial destination of the message. * * @return The modified {@link ServiceBusSenderClientBuilder} object. * @see <a href="https: * Via</a> */ public ServiceBusSenderClientBuilder viaQueueName(String viaQueueName) { this.viaQueueName = viaQueueName; return this; } /** * Sets the name of the initial destination Service Bus topic to publish messages to. * * @param viaTopicName The initial destination of the message. * * @return The modified {@link ServiceBusSenderClientBuilder} object. * @see <a href="https: * Via</a> */ public ServiceBusSenderClientBuilder viaTopicName(String viaTopicName) { this.viaTopicName = viaTopicName; return this; } /** * Sets the name of the Service Bus topic to publish messages to. * * @param topicName Name of the topic. * * @return The modified {@link ServiceBusSenderClientBuilder} object. */ public ServiceBusSenderClientBuilder topicName(String topicName) { this.topicName = topicName; return this; } /** * Creates an <b>asynchronous</b> {@link ServiceBusSenderAsyncClient client} for transmitting {@link * ServiceBusMessage} to a Service Bus queue or topic. * * @return A new {@link ServiceBusSenderAsyncClient} for transmitting to a Service queue or topic. * @throws IllegalStateException if {@link * topicName} are not set or, both of these fields are set. It is also thrown if the Service Bus {@link * * {@link * * @throws IllegalArgumentException if the entity type is not a queue or a topic. */ public ServiceBusSenderAsyncClient buildAsyncClient() { final ServiceBusConnectionProcessor connectionProcessor = getOrCreateConnectionProcessor(messageSerializer); final MessagingEntityType entityType = validateEntityPaths(logger, connectionStringEntityName, topicName, queueName); if (!CoreUtils.isNullOrEmpty(viaQueueName) && entityType == MessagingEntityType.SUBSCRIPTION) { throw logger.logExceptionAsError(new IllegalStateException(String.format( "(%s), Via queue feature work only with a queue.", viaQueueName))); } else if (!CoreUtils.isNullOrEmpty(viaTopicName) && entityType == MessagingEntityType.QUEUE) { throw logger.logExceptionAsError(new IllegalStateException(String.format( "(%s), Via topic feature work only with a topic.", viaTopicName))); } final String entityName; final String viaEntityName = !CoreUtils.isNullOrEmpty(viaQueueName) ? viaQueueName : viaTopicName; switch (entityType) { case QUEUE: entityName = queueName; break; case SUBSCRIPTION: entityName = topicName; break; case UNKNOWN: entityName = connectionStringEntityName; break; default: throw logger.logExceptionAsError( new IllegalArgumentException("Unknown entity type: " + entityType)); } return new ServiceBusSenderAsyncClient(entityName, entityType, connectionProcessor, retryOptions, tracerProvider, messageSerializer, ServiceBusClientBuilder.this::onClientClose, viaEntityName); } /** * Creates a <b>synchronous</b> {@link ServiceBusSenderClient client} for transmitting {@link ServiceBusMessage} * to a Service Bus queue or topic. * * @return A new {@link ServiceBusSenderAsyncClient} for transmitting to a Service queue or topic. * @throws IllegalStateException if {@link * topicName} are not set or, both of these fields are set. It is also thrown if the Service Bus {@link * * {@link * @throws IllegalArgumentException if the entity type is not a queue or a topic. */ }
if the `IngestionEndpoint` key is absent, ideally should default to `https://dc.services.visualstudio.com`
public AzureMonitorExporterBuilder connectionString(String connectionString) { this.instrumentationKey = extractValueFromConnectionString(connectionString, "InstrumentationKey"); this.endpoint(extractValueFromConnectionString(connectionString, "IngestionEndpoint")); return this; }
return this;
public AzureMonitorExporterBuilder connectionString(String connectionString) { Map<String, String> keyValues = extractKeyValuesFromConnectionString(connectionString); if (!keyValues.containsKey("InstrumentationKey")) { throw logger.logExceptionAsError( new IllegalArgumentException("InstrumentationKey not found in connectionString")); } this.instrumentationKey = keyValues.get("InstrumentationKey"); String endpoint = keyValues.get("IngestionEndpoint"); if (endpoint != null) { this.endpoint(endpoint); } return this; }
class AzureMonitorExporterBuilder { private final ClientLogger logger = new ClientLogger(AzureMonitorExporterBuilder.class); private final ApplicationInsightsClientImplBuilder restServiceClientBuilder; private String instrumentationKey; /** * Creates an instance of {@link AzureMonitorExporterBuilder}. */ public AzureMonitorExporterBuilder() { restServiceClientBuilder = new ApplicationInsightsClientImplBuilder(); } /** * Sets the service endpoint for the Azure Monitor Exporter. * @param endpoint The URL of the Azure Monitor Exporter endpoint. * @return The updated {@link AzureMonitorExporterBuilder} object. * @throws NullPointerException if {@code endpoint} is null. * @throws IllegalArgumentException if {@code endpoint} cannot be parsed into a valid URL. */ AzureMonitorExporterBuilder endpoint(String endpoint) { Objects.requireNonNull(endpoint, "'endpoint' cannot be null."); try { URL url = new URL(endpoint); restServiceClientBuilder.host(url.getProtocol() + ": } catch (MalformedURLException ex) { throw logger.logExceptionAsWarning( new IllegalArgumentException("'endpoint' must be a valid URL.", ex)); } return this; } /** * Sets the HTTP pipeline to use for the service client. If {@code pipeline} is set, all other settings are * ignored, apart from {@link * * @param httpPipeline The HTTP pipeline to use for sending service requests and receiving responses. * @return The updated {@link AzureMonitorExporterBuilder} object. */ public AzureMonitorExporterBuilder pipeline(HttpPipeline httpPipeline) { restServiceClientBuilder.pipeline(httpPipeline); return this; } /** * Sets the HTTP client to use for sending and receiving requests to and from the service. * * @param client The HTTP client to use for requests. * @return The updated {@link AzureMonitorExporterBuilder} object. */ public AzureMonitorExporterBuilder httpClient(HttpClient client) { restServiceClientBuilder.httpClient(client); return this; } /** * Sets the logging configuration for HTTP requests and responses. * * <p> If logLevel is not provided, default value of {@link HttpLogDetailLevel * @param logOptions The logging configuration to use when sending and receiving HTTP requests/responses. * * @return The updated {@link AzureMonitorExporterBuilder} object. */ public AzureMonitorExporterBuilder httpLogOptions(HttpLogOptions logOptions) { restServiceClientBuilder.httpLogOptions(logOptions); return this; } /** * Sets the {@link RetryPolicy} that is used when each request is sent. * <p> * The default retry policy will be used if not provided to build {@link AzureMonitorExporterBuilder} . * @param retryPolicy user's retry policy applied to each request. * * @return The updated {@link AzureMonitorExporterBuilder} object. */ public AzureMonitorExporterBuilder retryPolicy(RetryPolicy retryPolicy) { restServiceClientBuilder.retryPolicy(retryPolicy); return this; } /** * Adds a policy to the set of existing policies that are executed after required policies. * @param policy The retry policy for service requests. * * @return The updated {@link AzureMonitorExporterBuilder} object. * @throws NullPointerException If {@code policy} is {@code null}. */ public AzureMonitorExporterBuilder addPolicy(HttpPipelinePolicy policy) { restServiceClientBuilder.addPolicy(Objects.requireNonNull(policy, "'policy' cannot be null.")); return this; } /** * Sets the configuration store that is used during construction of the service client. * <p> * The default configuration store is a clone of the {@link Configuration * configuration store}, use {@link Configuration * * @param configuration The configuration store used to * @return The updated {@link AzureMonitorExporterBuilder} object. */ public AzureMonitorExporterBuilder configuration(Configuration configuration) { restServiceClientBuilder.configuration(configuration); return this; } /** * The connection string to use for exporting telemetry events to Azure Monitor. * @param connectionString The connection string for the Azure Monitor resource. * @return The updated {@link AzureMonitorExporterBuilder} object. * @throws NullPointerException If the connection string is {@code null}. * @throws IllegalArgumentException If the connection string is invalid. */ private String extractValueFromConnectionString(String connectionString, String key) { Objects.requireNonNull(connectionString); return Arrays.stream(connectionString.split(";")) .filter(keyValue -> { String[] keyValuePair = keyValue.split("="); return keyValuePair.length == 2 && keyValuePair[0].equalsIgnoreCase(key); }) .map(instrumentationKeyValue -> instrumentationKeyValue.split("=")[1]) .filter(iKey -> !CoreUtils.isNullOrEmpty(iKey)) .findFirst() .orElseThrow(() -> new IllegalArgumentException(key + " not found in connectionString")); } /** * Creates a {@link MonitorExporterClient} based on options set in the builder. Every time {@code * buildAsyncClient()} is called a new instance of {@link MonitorExporterClient} is created. * * <p> * If {@link * endpoint} are used to create the {@link MonitorExporterAsyncClient client}. All other builder settings are * ignored. * </p> * @return A {@link MonitorExporterClient} with the options set from the builder. * @throws NullPointerException if {@link */ MonitorExporterClient buildClient() { return new MonitorExporterClient(buildAsyncClient()); } /** * Creates a {@link MonitorExporterAsyncClient} based on options set in the builder. Every time {@code * buildAsyncClient()} is called a new instance of {@link MonitorExporterAsyncClient} is created. * * <p> * If {@link * endpoint} are used to create the {@link MonitorExporterAsyncClient client}. All other builder settings are * ignored. * </p> * @return A {@link MonitorExporterAsyncClient} with the options set from the builder. */ MonitorExporterAsyncClient buildAsyncClient() { final SimpleModule ndjsonModule = new SimpleModule("Ndjson List Serializer"); JacksonAdapter jacksonAdapter = new JacksonAdapter(); jacksonAdapter.serializer().registerModule(ndjsonModule); ndjsonModule.addSerializer(new NdJsonSerializer()); restServiceClientBuilder.serializerAdapter(jacksonAdapter); ApplicationInsightsClientImpl restServiceClient = restServiceClientBuilder.buildClient(); return new MonitorExporterAsyncClient(restServiceClient); } /** * Creates an {@link AzureMonitorExporter} based on the options set in the builder. This exporter is an * implementation of OpenTelemetry {@link SpanExporter}. * * @return An instance of {@link AzureMonitorExporter}. * @throws NullPointerException if the instrumentation key is not set. */ public AzureMonitorExporter buildExporter() { Objects.requireNonNull(instrumentationKey, "'connectionString' cannot be null"); return new AzureMonitorExporter(buildClient(), instrumentationKey); } }
class AzureMonitorExporterBuilder { private final ClientLogger logger = new ClientLogger(AzureMonitorExporterBuilder.class); private final ApplicationInsightsClientImplBuilder restServiceClientBuilder; private String instrumentationKey; /** * Creates an instance of {@link AzureMonitorExporterBuilder}. */ public AzureMonitorExporterBuilder() { restServiceClientBuilder = new ApplicationInsightsClientImplBuilder(); } /** * Sets the service endpoint for the Azure Monitor Exporter. * @param endpoint The URL of the Azure Monitor Exporter endpoint. * @return The updated {@link AzureMonitorExporterBuilder} object. * @throws NullPointerException if {@code endpoint} is null. * @throws IllegalArgumentException if {@code endpoint} cannot be parsed into a valid URL. */ AzureMonitorExporterBuilder endpoint(String endpoint) { Objects.requireNonNull(endpoint, "'endpoint' cannot be null."); try { URL url = new URL(endpoint); restServiceClientBuilder.host(url.getProtocol() + ": } catch (MalformedURLException ex) { throw logger.logExceptionAsWarning( new IllegalArgumentException("'endpoint' must be a valid URL.", ex)); } return this; } /** * Sets the HTTP pipeline to use for the service client. If {@code pipeline} is set, all other settings are * ignored, apart from {@link * * @param httpPipeline The HTTP pipeline to use for sending service requests and receiving responses. * @return The updated {@link AzureMonitorExporterBuilder} object. */ public AzureMonitorExporterBuilder pipeline(HttpPipeline httpPipeline) { restServiceClientBuilder.pipeline(httpPipeline); return this; } /** * Sets the HTTP client to use for sending and receiving requests to and from the service. * * @param client The HTTP client to use for requests. * @return The updated {@link AzureMonitorExporterBuilder} object. */ public AzureMonitorExporterBuilder httpClient(HttpClient client) { restServiceClientBuilder.httpClient(client); return this; } /** * Sets the logging configuration for HTTP requests and responses. * * <p> If logLevel is not provided, default value of {@link HttpLogDetailLevel * @param logOptions The logging configuration to use when sending and receiving HTTP requests/responses. * * @return The updated {@link AzureMonitorExporterBuilder} object. */ public AzureMonitorExporterBuilder httpLogOptions(HttpLogOptions logOptions) { restServiceClientBuilder.httpLogOptions(logOptions); return this; } /** * Sets the {@link RetryPolicy} that is used when each request is sent. * <p> * The default retry policy will be used if not provided to build {@link AzureMonitorExporterBuilder} . * @param retryPolicy user's retry policy applied to each request. * * @return The updated {@link AzureMonitorExporterBuilder} object. */ public AzureMonitorExporterBuilder retryPolicy(RetryPolicy retryPolicy) { restServiceClientBuilder.retryPolicy(retryPolicy); return this; } /** * Adds a policy to the set of existing policies that are executed after required policies. * @param policy The retry policy for service requests. * * @return The updated {@link AzureMonitorExporterBuilder} object. * @throws NullPointerException If {@code policy} is {@code null}. */ public AzureMonitorExporterBuilder addPolicy(HttpPipelinePolicy policy) { restServiceClientBuilder.addPolicy(Objects.requireNonNull(policy, "'policy' cannot be null.")); return this; } /** * Sets the configuration store that is used during construction of the service client. * <p> * The default configuration store is a clone of the {@link Configuration * configuration store}, use {@link Configuration * * @param configuration The configuration store used to * @return The updated {@link AzureMonitorExporterBuilder} object. */ public AzureMonitorExporterBuilder configuration(Configuration configuration) { restServiceClientBuilder.configuration(configuration); return this; } /** * The connection string to use for exporting telemetry events to Azure Monitor. * @param connectionString The connection string for the Azure Monitor resource. * @return The updated {@link AzureMonitorExporterBuilder} object. * @throws NullPointerException If the connection string is {@code null}. * @throws IllegalArgumentException If the connection string is invalid. */ private Map<String, String> extractKeyValuesFromConnectionString(String connectionString) { Objects.requireNonNull(connectionString); Map<String, String> keyValues = new HashMap<>(); String[] splits = connectionString.split(";"); for (String split : splits) { String[] keyValPair = split.split("="); if (keyValPair.length == 2) { keyValues.put(keyValPair[0], keyValPair[1]); } } return keyValues; } /** * Creates a {@link MonitorExporterClient} based on options set in the builder. Every time {@code * buildAsyncClient()} is called a new instance of {@link MonitorExporterClient} is created. * * <p> * If {@link * endpoint} are used to create the {@link MonitorExporterAsyncClient client}. All other builder settings are * ignored. * </p> * @return A {@link MonitorExporterClient} with the options set from the builder. * @throws NullPointerException if {@link */ MonitorExporterClient buildClient() { return new MonitorExporterClient(buildAsyncClient()); } /** * Creates a {@link MonitorExporterAsyncClient} based on options set in the builder. Every time {@code * buildAsyncClient()} is called a new instance of {@link MonitorExporterAsyncClient} is created. * * <p> * If {@link * endpoint} are used to create the {@link MonitorExporterAsyncClient client}. All other builder settings are * ignored. * </p> * @return A {@link MonitorExporterAsyncClient} with the options set from the builder. */ MonitorExporterAsyncClient buildAsyncClient() { final SimpleModule ndjsonModule = new SimpleModule("Ndjson List Serializer"); JacksonAdapter jacksonAdapter = new JacksonAdapter(); jacksonAdapter.serializer().registerModule(ndjsonModule); ndjsonModule.addSerializer(new NdJsonSerializer()); restServiceClientBuilder.serializerAdapter(jacksonAdapter); ApplicationInsightsClientImpl restServiceClient = restServiceClientBuilder.buildClient(); return new MonitorExporterAsyncClient(restServiceClient); } /** * Creates an {@link AzureMonitorExporter} based on the options set in the builder. This exporter is an * implementation of OpenTelemetry {@link SpanExporter}. * * @return An instance of {@link AzureMonitorExporter}. * @throws NullPointerException if the instrumentation key is not set. */ public AzureMonitorExporter buildExporter() { Objects.requireNonNull(instrumentationKey, "'connectionString' cannot be null"); return new AzureMonitorExporter(buildClient(), instrumentationKey); } }
Changed to get
public ServiceBusSenderClient buildClient() { return new ServiceBusSenderClient(buildAsyncClient(), MessageUtils.calcTotalTimeout(retryOptions)); }
return new ServiceBusSenderClient(buildAsyncClient(), MessageUtils.calcTotalTimeout(retryOptions));
public ServiceBusSenderClient buildClient() { return new ServiceBusSenderClient(buildAsyncClient(), MessageUtils.getTotalTimeout(retryOptions)); }
class ServiceBusSenderClientBuilder { private String queueName; private String topicName; private String viaQueueName; private String viaTopicName; private ServiceBusSenderClientBuilder() { } /** * Sets the name of the Service Bus queue to publish messages to. * * @param queueName Name of the queue. * * @return The modified {@link ServiceBusSenderClientBuilder} object. */ public ServiceBusSenderClientBuilder queueName(String queueName) { this.queueName = queueName; return this; } /** * Sets the name of the initial destination Service Bus queue to publish messages to. * * @param viaQueueName The initial destination of the message. * * @return The modified {@link ServiceBusSenderClientBuilder} object. * @see <a href="https: * Via</a> */ public ServiceBusSenderClientBuilder viaQueueName(String viaQueueName) { this.viaQueueName = viaQueueName; return this; } /** * Sets the name of the initial destination Service Bus topic to publish messages to. * * @param viaTopicName The initial destination of the message. * * @return The modified {@link ServiceBusSenderClientBuilder} object. * @see <a href="https: * Via</a> */ public ServiceBusSenderClientBuilder viaTopicName(String viaTopicName) { this.viaTopicName = viaTopicName; return this; } /** * Sets the name of the Service Bus topic to publish messages to. * * @param topicName Name of the topic. * * @return The modified {@link ServiceBusSenderClientBuilder} object. */ public ServiceBusSenderClientBuilder topicName(String topicName) { this.topicName = topicName; return this; } /** * Creates an <b>asynchronous</b> {@link ServiceBusSenderAsyncClient client} for transmitting {@link * ServiceBusMessage} to a Service Bus queue or topic. * * @return A new {@link ServiceBusSenderAsyncClient} for transmitting to a Service queue or topic. * @throws IllegalStateException if {@link * topicName} are not set or, both of these fields are set. It is also thrown if the Service Bus {@link * * {@link * * @throws IllegalArgumentException if the entity type is not a queue or a topic. */ public ServiceBusSenderAsyncClient buildAsyncClient() { final ServiceBusConnectionProcessor connectionProcessor = getOrCreateConnectionProcessor(messageSerializer); final MessagingEntityType entityType = validateEntityPaths(logger, connectionStringEntityName, topicName, queueName); if (!CoreUtils.isNullOrEmpty(viaQueueName) && entityType == MessagingEntityType.SUBSCRIPTION) { throw logger.logExceptionAsError(new IllegalStateException(String.format( "(%s), Via queue feature work only with a queue.", viaQueueName))); } else if (!CoreUtils.isNullOrEmpty(viaTopicName) && entityType == MessagingEntityType.QUEUE) { throw logger.logExceptionAsError(new IllegalStateException(String.format( "(%s), Via topic feature work only with a topic.", viaTopicName))); } final String entityName; final String viaEntityName = !CoreUtils.isNullOrEmpty(viaQueueName) ? viaQueueName : viaTopicName; switch (entityType) { case QUEUE: entityName = queueName; break; case SUBSCRIPTION: entityName = topicName; break; case UNKNOWN: entityName = connectionStringEntityName; break; default: throw logger.logExceptionAsError( new IllegalArgumentException("Unknown entity type: " + entityType)); } return new ServiceBusSenderAsyncClient(entityName, entityType, connectionProcessor, retryOptions, tracerProvider, messageSerializer, ServiceBusClientBuilder.this::onClientClose, viaEntityName); } /** * Creates a <b>synchronous</b> {@link ServiceBusSenderClient client} for transmitting {@link ServiceBusMessage} * to a Service Bus queue or topic. * * @return A new {@link ServiceBusSenderAsyncClient} for transmitting to a Service queue or topic. * @throws IllegalStateException if {@link * topicName} are not set or, both of these fields are set. It is also thrown if the Service Bus {@link * * {@link * @throws IllegalArgumentException if the entity type is not a queue or a topic. */ }
class ServiceBusSenderClientBuilder { private String queueName; private String topicName; private String viaQueueName; private String viaTopicName; private ServiceBusSenderClientBuilder() { } /** * Sets the name of the Service Bus queue to publish messages to. * * @param queueName Name of the queue. * * @return The modified {@link ServiceBusSenderClientBuilder} object. */ public ServiceBusSenderClientBuilder queueName(String queueName) { this.queueName = queueName; return this; } /** * Sets the name of the initial destination Service Bus queue to publish messages to. * * @param viaQueueName The initial destination of the message. * * @return The modified {@link ServiceBusSenderClientBuilder} object. * @see <a href="https: * Via</a> */ public ServiceBusSenderClientBuilder viaQueueName(String viaQueueName) { this.viaQueueName = viaQueueName; return this; } /** * Sets the name of the initial destination Service Bus topic to publish messages to. * * @param viaTopicName The initial destination of the message. * * @return The modified {@link ServiceBusSenderClientBuilder} object. * @see <a href="https: * Via</a> */ public ServiceBusSenderClientBuilder viaTopicName(String viaTopicName) { this.viaTopicName = viaTopicName; return this; } /** * Sets the name of the Service Bus topic to publish messages to. * * @param topicName Name of the topic. * * @return The modified {@link ServiceBusSenderClientBuilder} object. */ public ServiceBusSenderClientBuilder topicName(String topicName) { this.topicName = topicName; return this; } /** * Creates an <b>asynchronous</b> {@link ServiceBusSenderAsyncClient client} for transmitting {@link * ServiceBusMessage} to a Service Bus queue or topic. * * @return A new {@link ServiceBusSenderAsyncClient} for transmitting to a Service queue or topic. * @throws IllegalStateException if {@link * topicName} are not set or, both of these fields are set. It is also thrown if the Service Bus {@link * * {@link * * @throws IllegalArgumentException if the entity type is not a queue or a topic. */ public ServiceBusSenderAsyncClient buildAsyncClient() { final ServiceBusConnectionProcessor connectionProcessor = getOrCreateConnectionProcessor(messageSerializer); final MessagingEntityType entityType = validateEntityPaths(logger, connectionStringEntityName, topicName, queueName); if (!CoreUtils.isNullOrEmpty(viaQueueName) && entityType == MessagingEntityType.SUBSCRIPTION) { throw logger.logExceptionAsError(new IllegalStateException(String.format( "(%s), Via queue feature work only with a queue.", viaQueueName))); } else if (!CoreUtils.isNullOrEmpty(viaTopicName) && entityType == MessagingEntityType.QUEUE) { throw logger.logExceptionAsError(new IllegalStateException(String.format( "(%s), Via topic feature work only with a topic.", viaTopicName))); } final String entityName; final String viaEntityName = !CoreUtils.isNullOrEmpty(viaQueueName) ? viaQueueName : viaTopicName; switch (entityType) { case QUEUE: entityName = queueName; break; case SUBSCRIPTION: entityName = topicName; break; case UNKNOWN: entityName = connectionStringEntityName; break; default: throw logger.logExceptionAsError( new IllegalArgumentException("Unknown entity type: " + entityType)); } return new ServiceBusSenderAsyncClient(entityName, entityType, connectionProcessor, retryOptions, tracerProvider, messageSerializer, ServiceBusClientBuilder.this::onClientClose, viaEntityName); } /** * Creates a <b>synchronous</b> {@link ServiceBusSenderClient client} for transmitting {@link ServiceBusMessage} * to a Service Bus queue or topic. * * @return A new {@link ServiceBusSenderAsyncClient} for transmitting to a Service queue or topic. * @throws IllegalStateException if {@link * topicName} are not set or, both of these fields are set. It is also thrown if the Service Bus {@link * * {@link * @throws IllegalArgumentException if the entity type is not a queue or a topic. */ }
nit: simplify to below? ```suggestion logger.warning("Failed to restore from location - {}", backupFolderUrl, error)) ```
Mono<Response<KeyVaultRestoreOperation>> restoreWithResponse(String backupFolderUrl, String sasToken, Context context) { String[] segments = backupFolderUrl.split("/"); String folderName = segments[segments.length - 1]; String containerUrl = backupFolderUrl.substring(0, backupFolderUrl.length() - folderName.length()); SASTokenParameter sasTokenParameter = new SASTokenParameter() .setStorageResourceUri(containerUrl) .setToken(sasToken); RestoreOperationParameters restoreOperationParameters = new RestoreOperationParameters() .setSasTokenParameters(sasTokenParameter) .setFolderToRestore(folderName); return clientImpl.fullRestoreOperationWithResponseAsync(vaultUrl, restoreOperationParameters, context.addData(AZ_TRACING_NAMESPACE_KEY, KEYVAULT_TRACING_NAMESPACE_VALUE)) .doOnRequest(ignored -> logger.info("Restoring from location - {}{}", containerUrl, folderName)) .doOnSuccess(response -> logger.info("Restored from location - {}{}", containerUrl, folderName)) .doOnError(error -> logger.warning("Failed to restore from location - {}{}", containerUrl, folderName, error)) .map(restoreOperationResponse -> new SimpleResponse<>(restoreOperationResponse.getRequest(), restoreOperationResponse.getStatusCode(), restoreOperationResponse.getHeaders(), (KeyVaultRestoreOperation) transformToLongRunningOperation(restoreOperationResponse.getValue()))); }
logger.warning("Failed to restore from location - {}{}", containerUrl, folderName, error))
new SASTokenParameter() .setStorageResourceUri(blobStorageUrl) .setToken(sasToken); return clientImpl.fullBackupWithResponseAsync(vaultUrl, sasTokenParameter, context.addData(AZ_TRACING_NAMESPACE_KEY, KEYVAULT_TRACING_NAMESPACE_VALUE)) .doOnRequest(ignored -> logger.info("Backing up at URL - {}
class KeyVaultBackupAsyncClient { private static final String KEYVAULT_TRACING_NAMESPACE_VALUE = "Microsoft.KeyVault"; private static final Duration DEFAULT_POLLING_INTERVAL = Duration.ofSeconds(1); /** * The logger to be used. */ private final ClientLogger logger = new ClientLogger(KeyVaultBackupAsyncClient.class); /** * The underlying AutoRest client used to interact with the Key Vault service. */ private final KeyVaultBackupClientImpl clientImpl; /** * The Kay Vault URL this client is associated to. */ private final String vaultUrl; Duration getDefaultPollingInterval() { return DEFAULT_POLLING_INTERVAL; } /** * Package private constructor to be used by {@link KeyVaultBackupClientBuilder}. */ KeyVaultBackupAsyncClient(URL vaultUrl, HttpPipeline httpPipeline) { Objects.requireNonNull(vaultUrl, KeyVaultErrorCodeStrings.getErrorString(KeyVaultErrorCodeStrings.VAULT_END_POINT_REQUIRED)); this.vaultUrl = vaultUrl.toString(); clientImpl = new KeyVaultBackupClientImplBuilder() .pipeline(httpPipeline) .buildClient(); } /** * Gets the URL for the Key Vault this client is associated with. * * @return The Key Vault URL. */ public String getVaultUrl() { return this.vaultUrl; } /** * Initiates a full backup of the Key Vault. * * @param blobStorageUrl The URL for the Blob Storage resource where the backup will be located. * @param sasToken A Shared Access Signature (SAS) token to authorize access to the blob. * @return A {@link PollerFlux} polling on the {@link KeyVaultBackupOperation backup operation} status. * @throws NullPointerException if the {@code blobStorageUrl} or {@code sasToken} are {@code null}. */ @ServiceMethod(returns = ReturnType.SINGLE) public PollerFlux<KeyVaultBackupOperation, String> beginBackup(String blobStorageUrl, String sasToken) { return beginBackup(blobStorageUrl, sasToken, getDefaultPollingInterval()); } /** * Initiates a full backup of the Key Vault. * * @param blobStorageUrl The URL for the Blob Storage resource where the backup will be located. * @param sasToken A Shared Access Signature (SAS) token to authorize access to the blob. * @param pollingInterval The interval at which the operation status will be polled for. * @return A {@link PollerFlux} polling on the {@link KeyVaultBackupOperation backup operation} status. * @throws NullPointerException if the {@code blobStorageUrl} or {@code sasToken} are {@code null}. */ @ServiceMethod(returns = ReturnType.SINGLE) public PollerFlux<KeyVaultBackupOperation, String> beginBackup(String blobStorageUrl, String sasToken, Duration pollingInterval) { Objects.requireNonNull(blobStorageUrl, String.format(KeyVaultErrorCodeStrings.getErrorString(KeyVaultErrorCodeStrings.PARAMETER_REQUIRED), "'blobStorageUrl'")); Objects.requireNonNull(sasToken, String.format(KeyVaultErrorCodeStrings.getErrorString(KeyVaultErrorCodeStrings.PARAMETER_REQUIRED), "'sasToken'")); return new PollerFlux<>(pollingInterval, backupActivationOperation(blobStorageUrl, sasToken), backupPollOperation(), (pollingContext, firstResponse) -> Mono.error(new RuntimeException("Cancellation is not supported")), backupFetchOperation()); } /** * Initiates a full backup of the Key Vault. * * @param blobStorageUrl The URL for the Blob Storage resource where the backup will be located. * @param sasToken A Shared Access Signature (SAS) token to authorize access to the blob. * @param context Additional context that is passed through the HTTP pipeline during the service call. * @return A {@link PollerFlux} polling on the {@link KeyVaultBackupOperation backup operation} status. * @throws KeyVaultErrorException if the operation is unsuccessful. */ Mono<Response<KeyVaultBackupOperation>> backupWithResponse(String blobStorageUrl, String sasToken, Context context) { SASTokenParameter sasTokenParameter = ", blobStorageUrl)) .doOnSuccess(response -> logger.info("Backed up at URL - {}", response.getValue().getAzureStorageBlobContainerUri())) .doOnError(error -> logger.warning("Failed to backup at URL - {}", blobStorageUrl, error)) .map(backupOperationResponse -> new SimpleResponse<>(backupOperationResponse.getRequest(), backupOperationResponse.getStatusCode(), backupOperationResponse.getHeaders(), (KeyVaultBackupOperation) transformToLongRunningOperation(backupOperationResponse.getValue()))); } private Function<PollingContext<KeyVaultBackupOperation>, Mono<KeyVaultBackupOperation>> backupActivationOperation(String blobStorageUrl, String sasToken) { return (pollingContext) -> { try { return withContext(context -> backupWithResponse(blobStorageUrl, sasToken, context)) .flatMap(backupResponse -> Mono.just(backupResponse.getValue())); } catch (RuntimeException e) { return monoError(logger, e); } }; } private Function<PollingContext<KeyVaultBackupOperation>, Mono<PollResponse<KeyVaultBackupOperation>>> backupPollOperation() { return (pollingContext) -> { try { PollResponse<KeyVaultBackupOperation> pollResponse = pollingContext.getLatestResponse(); if (pollResponse.getStatus() == LongRunningOperationStatus.SUCCESSFULLY_COMPLETED || pollResponse.getStatus() == LongRunningOperationStatus.FAILED) { return Mono.just(pollResponse); } final KeyVaultBackupOperation keyVaultBackupOperation = pollResponse.getValue(); if (keyVaultBackupOperation == null) { logger.warning("Backup operation does not exist. Activation operation failed."); return Mono.just(new PollResponse<KeyVaultBackupOperation>( LongRunningOperationStatus.fromString("BACKUP_START_FAILED", true), null)); } final String jobId = keyVaultBackupOperation.getJobId(); return withContext(context -> clientImpl.fullBackupStatusWithResponseAsync(vaultUrl, jobId, context.addData(AZ_TRACING_NAMESPACE_KEY, KEYVAULT_TRACING_NAMESPACE_VALUE))) .map(response -> new SimpleResponse<>(response, (KeyVaultBackupOperation) transformToLongRunningOperation(response.getValue()))) .flatMap(KeyVaultBackupAsyncClient::processBackupOperationResponse); } catch (HttpResponseException e) { logger.logExceptionAsError(e); return Mono.just(new PollResponse<>(LongRunningOperationStatus.FAILED, null)); } }; } private Function<PollingContext<KeyVaultBackupOperation>, Mono<String>> backupFetchOperation() { return (pollingContext) -> { String blobContainerUri = pollingContext.getLatestResponse().getValue().getAzureStorageBlobContainerUri(); if (blobContainerUri == null) { return Mono.empty(); } else { return Mono.just(blobContainerUri); } }; } private static Mono<PollResponse<KeyVaultBackupOperation>> processBackupOperationResponse(Response<KeyVaultBackupOperation> response) { String operationStatus = response.getValue().getStatus().toLowerCase(Locale.US); return Mono.just(new PollResponse<>( toLongRunningOperationStatus(operationStatus.toLowerCase(Locale.US)), response.getValue())); } private static LongRunningOperationStatus toLongRunningOperationStatus(String operationStatus) { switch (operationStatus) { case "inprogress": return LongRunningOperationStatus.IN_PROGRESS; case "succeeded": return LongRunningOperationStatus.SUCCESSFULLY_COMPLETED; case "failed": return LongRunningOperationStatus.FAILED; default: return LongRunningOperationStatus.fromString("POLLING_FAILED", true); } } /** * Gets a pending {@link KeyVaultBackupOperation backup operation} from the Key Vault. * * @param jobId The operation identifier. * @throws NullPointerException if the {@code jobId} is null. * @return A {@link PollerFlux} polling on the {@link KeyVaultRestoreOperation backup operation} status. */ @ServiceMethod(returns = ReturnType.SINGLE) public PollerFlux<KeyVaultBackupOperation, String> getBackupOperation(String jobId) { Objects.requireNonNull(jobId, String.format(KeyVaultErrorCodeStrings.getErrorString(KeyVaultErrorCodeStrings.PARAMETER_REQUIRED), "'jobId'")); return new PollerFlux<>(Duration.ofSeconds(1), (pollingContext) -> Mono.empty(), backupStatusPollOperation(jobId), (pollingContext, firstResponse) -> Mono.error(new RuntimeException("Cancellation is not supported")), backupFetchOperation()); } private Function<PollingContext<KeyVaultBackupOperation>, Mono<PollResponse<KeyVaultBackupOperation>>> backupStatusPollOperation(String jobId) { return (pollingContext) -> withContext(context -> clientImpl.fullBackupStatusWithResponseAsync(vaultUrl, jobId, context.addData(AZ_TRACING_NAMESPACE_KEY, KEYVAULT_TRACING_NAMESPACE_VALUE))) .map(response -> new SimpleResponse<>(response, (KeyVaultBackupOperation) transformToLongRunningOperation(response.getValue()))) .flatMap(KeyVaultBackupAsyncClient::processBackupOperationResponse); } /** * Initiates a full restore of the Key Vault. * * @param backupFolderUrl The URL for the Blob Storage resource where the backup is located, including the path to * the blob container where the backup resides. This would be the exact value that is returned as the result of a * backup operation. An example of such a URL may look like the following: https: * @param sasToken A Shared Access Signature (SAS) token to authorize access to the blob. * @return A {@link PollerFlux} polling on the {@link KeyVaultRestoreOperation backup operation} status. * @throws NullPointerException if the {@code backupFolderUrl} or {@code sasToken} are {@code null}. */ @ServiceMethod(returns = ReturnType.SINGLE) public PollerFlux<KeyVaultRestoreOperation, Void> beginRestore(String backupFolderUrl, String sasToken) { return beginRestore(backupFolderUrl, sasToken, getDefaultPollingInterval()); } /** * Initiates a full restore of the Key Vault. * * @param backupFolderUrl The URL for the Blob Storage resource where the backup is located, including the path to * the blob container where the backup resides. This would be the exact value that is returned as the result of a * backup operation. An example of such a URL may look like the following: https: * @param sasToken A Shared Access Signature (SAS) token to authorize access to the blob. * @param pollingInterval The interval at which the operation status will be polled for. * @return A {@link PollerFlux} polling on the {@link KeyVaultRestoreOperation backup operation} status. * @throws NullPointerException if the {@code backupFolderUrl} or {@code sasToken} are {@code null}. */ @ServiceMethod(returns = ReturnType.SINGLE) public PollerFlux<KeyVaultRestoreOperation, Void> beginRestore(String backupFolderUrl, String sasToken, Duration pollingInterval) { Objects.requireNonNull(backupFolderUrl, String.format(KeyVaultErrorCodeStrings.getErrorString(KeyVaultErrorCodeStrings.PARAMETER_REQUIRED), "'backupFolderUrl'")); Objects.requireNonNull(sasToken, String.format(KeyVaultErrorCodeStrings.getErrorString(KeyVaultErrorCodeStrings.PARAMETER_REQUIRED), "'sasToken'")); return new PollerFlux<>(pollingInterval, restoreActivationOperation(backupFolderUrl, sasToken), restorePollOperation(), (pollingContext, firstResponse) -> Mono.empty(), (pollingContext) -> Mono.empty()); } /** * Initiates a full restore of the Key Vault. * * @param backupFolderUrl The URL for the Blob Storage resource where the backup is located, including the path to * the blob container where the backup resides. This would be the exact value that is returned as the result of a * backup operation. An example of such a URL may look like the following: https: * @param sasToken A Shared Access Signature (SAS) token to authorize access to the blob. * @param context Additional context that is passed through the HTTP pipeline during the service call. * @return A {@link PollerFlux} polling on the {@link KeyVaultRestoreOperation backup operation} status. * @throws KeyVaultErrorException if the operation is unsuccessful. */ Mono<Response<KeyVaultRestoreOperation>> restoreWithResponse(String backupFolderUrl, String sasToken, Context context) { String[] segments = backupFolderUrl.split("/"); String folderName = segments[segments.length - 1]; String containerUrl = backupFolderUrl.substring(0, backupFolderUrl.length() - folderName.length()); SASTokenParameter sasTokenParameter = new SASTokenParameter() .setStorageResourceUri(containerUrl) .setToken(sasToken); RestoreOperationParameters restoreOperationParameters = new RestoreOperationParameters() .setSasTokenParameters(sasTokenParameter) .setFolderToRestore(folderName); return clientImpl.fullRestoreOperationWithResponseAsync(vaultUrl, restoreOperationParameters, context.addData(AZ_TRACING_NAMESPACE_KEY, KEYVAULT_TRACING_NAMESPACE_VALUE)) .doOnRequest(ignored -> logger.info("Restoring from location - {}{}", containerUrl, folderName)) .doOnSuccess(response -> logger.info("Restored from location - {}{}", containerUrl, folderName)) .doOnError(error -> logger.warning("Failed to restore from location - {}{}", containerUrl, folderName, error)) .map(restoreOperationResponse -> new SimpleResponse<>(restoreOperationResponse.getRequest(), restoreOperationResponse.getStatusCode(), restoreOperationResponse.getHeaders(), (KeyVaultRestoreOperation) transformToLongRunningOperation(restoreOperationResponse.getValue()))); } private Function<PollingContext<KeyVaultRestoreOperation>, Mono<KeyVaultRestoreOperation>> restoreActivationOperation(String backupFolderUrl, String sasToken) { return (pollingContext) -> { try { return withContext(context -> restoreWithResponse(backupFolderUrl, sasToken, context)) .flatMap(restoreResponse -> Mono.just(restoreResponse.getValue())); } catch (RuntimeException e) { return monoError(logger, e); } }; } private Function<PollingContext<KeyVaultRestoreOperation>, Mono<PollResponse<KeyVaultRestoreOperation>>> restorePollOperation() { return (pollingContext) -> { try { PollResponse<KeyVaultRestoreOperation> pollResponse = pollingContext.getLatestResponse(); if (pollResponse.getStatus() == LongRunningOperationStatus.SUCCESSFULLY_COMPLETED || pollResponse.getStatus() == LongRunningOperationStatus.FAILED) { return Mono.just(pollResponse); } final KeyVaultRestoreOperation keyVaultRestoreOperation = pollResponse.getValue(); if (keyVaultRestoreOperation == null) { logger.warning("Restore operation does not exist. Activation operation failed."); return Mono.just(new PollResponse<KeyVaultRestoreOperation>( LongRunningOperationStatus.fromString("RESTORE_START_FAILED", true), null)); } final String jobId = keyVaultRestoreOperation.getJobId(); return withContext(context -> clientImpl.restoreStatusWithResponseAsync(vaultUrl, jobId, context.addData(AZ_TRACING_NAMESPACE_KEY, KEYVAULT_TRACING_NAMESPACE_VALUE))) .map(response -> new SimpleResponse<>(response, (KeyVaultRestoreOperation) transformToLongRunningOperation(response.getValue()))) .flatMap(KeyVaultBackupAsyncClient::processRestoreOperationResponse); } catch (HttpResponseException e) { logger.logExceptionAsError(e); return Mono.just(new PollResponse<>(LongRunningOperationStatus.FAILED, null)); } }; } private static Mono<PollResponse<KeyVaultRestoreOperation>> processRestoreOperationResponse(Response<KeyVaultRestoreOperation> response) { String operationStatus = response.getValue().getStatus().toLowerCase(Locale.US); return Mono.just(new PollResponse<>( toLongRunningOperationStatus(operationStatus.toLowerCase(Locale.US)), response.getValue())); } /** * Gets a pending {@link KeyVaultRestoreOperation full or selective restore operation} from the Key Vault. * * @param jobId The operation identifier. * @throws NullPointerException if the {@code jobId} is null. * @return A {@link PollerFlux} polling on the {@link KeyVaultRestoreOperation restore operation} status. */ @ServiceMethod(returns = ReturnType.SINGLE) public PollerFlux<KeyVaultRestoreOperation, Void> getRestoreOperation(String jobId) { Objects.requireNonNull(jobId, String.format(KeyVaultErrorCodeStrings.getErrorString(KeyVaultErrorCodeStrings.PARAMETER_REQUIRED), "'jobId'")); return new PollerFlux<>(Duration.ofSeconds(1), (pollingContext) -> Mono.empty(), restoreStatusPollOperation(jobId), (pollingContext, firstResponse) -> Mono.error(new RuntimeException("Cancellation is not supported")), (pollingContext) -> Mono.empty()); } private Function<PollingContext<KeyVaultRestoreOperation>, Mono<PollResponse<KeyVaultRestoreOperation>>> restoreStatusPollOperation(String jobId) { return (pollingContext) -> withContext(context -> clientImpl.restoreStatusWithResponseAsync(vaultUrl, jobId, context.addData(AZ_TRACING_NAMESPACE_KEY, KEYVAULT_TRACING_NAMESPACE_VALUE))) .map(response -> new SimpleResponse<>(response, (KeyVaultRestoreOperation) transformToLongRunningOperation(response.getValue()))) .flatMap(KeyVaultBackupAsyncClient::processRestoreOperationResponse); } /** * Restores all versions of a given key using the supplied SAS token pointing to a previously stored Azure Blob * storage backup folder. * * @param keyName The name of the key to be restored. * @param backupFolderUrl The URL for the Blob Storage resource where the backup is located, including the path to * the blob container where the backup resides. This would be the exact value that is returned as the result of a * backup operation. An example of such a URL may look like the following: https: * @param sasToken A Shared Access Signature (SAS) token to authorize access to the blob. * @return A {@link PollerFlux} polling on the {@link KeyVaultRestoreOperation backup operation} status. * @throws NullPointerException if the {@code keyName}, {@code backupFolderUrl} or {@code sasToken} are {@code * null}. */ @ServiceMethod(returns = ReturnType.SINGLE) public PollerFlux<KeyVaultRestoreOperation, Void> beginSelectiveRestore(String keyName, String backupFolderUrl, String sasToken) { return beginSelectiveRestore(keyName, backupFolderUrl, sasToken, getDefaultPollingInterval()); } /** * Restores all versions of a given key using the supplied SAS token pointing to a previously stored Azure Blob * storage backup folder. * * @param keyName The name of the key to be restored. * @param backupFolderUrl The URL for the Blob Storage resource where the backup is located, including the path to * the blob container where the backup resides. This would be the exact value that is returned as the result of a * backup operation. An example of such a URL may look like the following: https: * @param sasToken A Shared Access Signature (SAS) token to authorize access to the blob. * @param pollingInterval The interval at which the operation status will be polled for. * @return A {@link PollerFlux} polling on the {@link KeyVaultRestoreOperation backup operation} status. * @throws NullPointerException if the {@code keyName}, {@code backupFolderUrl} or {@code sasToken} are {@code * null}. */ @ServiceMethod(returns = ReturnType.SINGLE) public PollerFlux<KeyVaultRestoreOperation, Void> beginSelectiveRestore(String keyName, String backupFolderUrl, String sasToken, Duration pollingInterval) { Objects.requireNonNull(keyName, String.format(KeyVaultErrorCodeStrings.getErrorString(KeyVaultErrorCodeStrings.PARAMETER_REQUIRED), "'keyName'")); Objects.requireNonNull(backupFolderUrl, String.format(KeyVaultErrorCodeStrings.getErrorString(KeyVaultErrorCodeStrings.PARAMETER_REQUIRED), "'backupFolderUrl'")); Objects.requireNonNull(sasToken, String.format(KeyVaultErrorCodeStrings.getErrorString(KeyVaultErrorCodeStrings.PARAMETER_REQUIRED), "'sasToken'")); return new PollerFlux<>(pollingInterval, selectiveRestoreActivationOperation(keyName, backupFolderUrl, sasToken), selectiveRestorePollOperation(), (pollingContext, firstResponse) -> Mono.empty(), (pollingContext) -> Mono.empty()); } /** * Restores all versions of a given key using the supplied SAS token pointing to a previously stored Azure Blob * storage backup folder. * * @param keyName The name of the key to be restored. * @param backupFolderUrl The URL for the Blob Storage resource where the backup is located, including the path to * the blob container where the backup resides. This would be the exact value that is returned as the result of a * backup operation. An example of such a URL may look like the following: https: * @param sasToken A Shared Access Signature (SAS) token to authorize access to the blob. * @param context Additional context that is passed through the HTTP pipeline during the service call. * @return A {@link PollerFlux} polling on the {@link KeyVaultRestoreOperation backup operation} status. * @throws KeyVaultErrorException if the operation is unsuccessful. */ Mono<Response<KeyVaultRestoreOperation>> selectiveRestoreWithResponse(String keyName, String backupFolderUrl, String sasToken, Context context) { String[] segments = backupFolderUrl.split("/"); String folderName = segments[segments.length - 1]; String containerUrl = backupFolderUrl.substring(0, backupFolderUrl.length() - folderName.length()); SASTokenParameter sasTokenParameter = new SASTokenParameter() .setStorageResourceUri(containerUrl) .setToken(sasToken); SelectiveKeyRestoreOperationParameters selectiveKeyRestoreOperationParameters = new SelectiveKeyRestoreOperationParameters() .setSasTokenParameters(sasTokenParameter) .setFolder(folderName); return clientImpl.selectiveKeyRestoreOperationWithResponseAsync(vaultUrl, keyName, selectiveKeyRestoreOperationParameters, context.addData(AZ_TRACING_NAMESPACE_KEY, KEYVAULT_TRACING_NAMESPACE_VALUE)) .doOnRequest(ignored -> logger.info("Restoring key \"{}\" from location - {}{}", keyName, containerUrl, folderName)) .doOnSuccess(response -> logger.info("Restored key \"{}\" from location - {}{}", keyName, containerUrl, folderName)) .doOnError(error -> logger.warning("Failed to restore key \"{}\" from location - {}{}", keyName, containerUrl, folderName, error)) .map(restoreOperationResponse -> new SimpleResponse<>(restoreOperationResponse.getRequest(), restoreOperationResponse.getStatusCode(), restoreOperationResponse.getHeaders(), (KeyVaultRestoreOperation) transformToLongRunningOperation(restoreOperationResponse.getValue()))); } private Function<PollingContext<KeyVaultRestoreOperation>, Mono<KeyVaultRestoreOperation>> selectiveRestoreActivationOperation(String keyName, String backupFolderUrl, String sasToken) { return (pollingContext) -> { try { return withContext(context -> selectiveRestoreWithResponse(keyName, backupFolderUrl, sasToken, context)) .flatMap(selectiveKeyRestoreResponse -> Mono.just(selectiveKeyRestoreResponse.getValue())); } catch (RuntimeException e) { return monoError(logger, e); } }; } private Function<PollingContext<KeyVaultRestoreOperation>, Mono<PollResponse<KeyVaultRestoreOperation>>> selectiveRestorePollOperation() { return (pollingContext) -> { try { PollResponse<KeyVaultRestoreOperation> pollResponse = pollingContext.getLatestResponse(); if (pollResponse.getStatus() == LongRunningOperationStatus.SUCCESSFULLY_COMPLETED || pollResponse.getStatus() == LongRunningOperationStatus.FAILED) { return Mono.just(pollResponse); } final KeyVaultRestoreOperation keyVaultRestoreOperation = pollResponse.getValue(); if (keyVaultRestoreOperation == null) { logger.warning("Restore operation does not exist. Activation operation failed."); return Mono.just(new PollResponse<KeyVaultRestoreOperation>( LongRunningOperationStatus.fromString("SELECTIVE_RESTORE_START_FAILED", true), null)); } final String jobId = keyVaultRestoreOperation.getJobId(); return withContext(context -> clientImpl.restoreStatusWithResponseAsync(vaultUrl, jobId, context.addData(AZ_TRACING_NAMESPACE_KEY, KEYVAULT_TRACING_NAMESPACE_VALUE))) .map(response -> new SimpleResponse<>(response, (KeyVaultRestoreOperation) transformToLongRunningOperation(response.getValue()))) .flatMap(KeyVaultBackupAsyncClient::processRestoreOperationResponse); } catch (HttpResponseException e) { logger.logExceptionAsError(e); return Mono.just(new PollResponse<>(LongRunningOperationStatus.FAILED, null)); } }; } private static <O> KeyVaultLongRunningOperation transformToLongRunningOperation(O operation) { if (operation instanceof RestoreOperation) { RestoreOperation restoreOperation = (RestoreOperation) operation; return new KeyVaultRestoreOperation(restoreOperation.getStatus(), restoreOperation.getStatusDetails(), createKeyVaultErrorFromError(restoreOperation.getError()), restoreOperation.getJobId(), restoreOperation.getStartTime(), restoreOperation.getEndTime()); } else if (operation instanceof SelectiveKeyRestoreOperation) { SelectiveKeyRestoreOperation selectiveKeyRestoreOperation = (SelectiveKeyRestoreOperation) operation; return new KeyVaultRestoreOperation(selectiveKeyRestoreOperation.getStatus(), selectiveKeyRestoreOperation.getStatusDetails(), createKeyVaultErrorFromError(selectiveKeyRestoreOperation.getError()), selectiveKeyRestoreOperation.getJobId(), selectiveKeyRestoreOperation.getStartTime(), selectiveKeyRestoreOperation.getEndTime()); } else if (operation instanceof FullBackupOperation) { FullBackupOperation fullBackupOperation = (FullBackupOperation) operation; return new KeyVaultBackupOperation(fullBackupOperation.getStatus(), fullBackupOperation.getStatusDetails(), createKeyVaultErrorFromError(fullBackupOperation.getError()), fullBackupOperation.getJobId(), fullBackupOperation.getStartTime(), fullBackupOperation.getEndTime(), fullBackupOperation.getAzureStorageBlobContainerUri()); } else { throw new UnsupportedOperationException("Operation type not supported"); } } private static KeyVaultError createKeyVaultErrorFromError(Error error) { if (error == null) { return null; } return new KeyVaultError(error.getCode(), error.getMessage(), createKeyVaultErrorFromError(error.getInnerError())); } }
class KeyVaultBackupAsyncClient { private static final String KEYVAULT_TRACING_NAMESPACE_VALUE = "Microsoft.KeyVault"; private static final Duration DEFAULT_POLLING_INTERVAL = Duration.ofSeconds(1); /** * The logger to be used. */ private final ClientLogger logger = new ClientLogger(KeyVaultBackupAsyncClient.class); /** * The underlying AutoRest client used to interact with the Key Vault service. */ private final KeyVaultBackupClientImpl clientImpl; /** * The Kay Vault URL this client is associated to. */ private final String vaultUrl; Duration getDefaultPollingInterval() { return DEFAULT_POLLING_INTERVAL; } /** * Package private constructor to be used by {@link KeyVaultBackupClientBuilder}. */ KeyVaultBackupAsyncClient(URL vaultUrl, HttpPipeline httpPipeline) { Objects.requireNonNull(vaultUrl, KeyVaultErrorCodeStrings.getErrorString(KeyVaultErrorCodeStrings.VAULT_END_POINT_REQUIRED)); this.vaultUrl = vaultUrl.toString(); clientImpl = new KeyVaultBackupClientImplBuilder() .pipeline(httpPipeline) .buildClient(); } /** * Gets the URL for the Key Vault this client is associated with. * * @return The Key Vault URL. */ public String getVaultUrl() { return this.vaultUrl; } /** * Initiates a full backup of the Key Vault. * * @param blobStorageUrl The URL for the Blob Storage resource where the backup will be located. * @param sasToken A Shared Access Signature (SAS) token to authorize access to the blob. * @return A {@link PollerFlux} polling on the {@link KeyVaultBackupOperation backup operation} status. * @throws NullPointerException if the {@code blobStorageUrl} or {@code sasToken} are {@code null}. */ @ServiceMethod(returns = ReturnType.SINGLE) public PollerFlux<KeyVaultBackupOperation, String> beginBackup(String blobStorageUrl, String sasToken) { return beginBackup(blobStorageUrl, sasToken, getDefaultPollingInterval()); } /** * Initiates a full backup of the Key Vault. * * @param blobStorageUrl The URL for the Blob Storage resource where the backup will be located. * @param sasToken A Shared Access Signature (SAS) token to authorize access to the blob. * @param pollingInterval The interval at which the operation status will be polled for. * @return A {@link PollerFlux} polling on the {@link KeyVaultBackupOperation backup operation} status. * @throws NullPointerException if the {@code blobStorageUrl} or {@code sasToken} are {@code null}. */ @ServiceMethod(returns = ReturnType.SINGLE) public PollerFlux<KeyVaultBackupOperation, String> beginBackup(String blobStorageUrl, String sasToken, Duration pollingInterval) { Objects.requireNonNull(blobStorageUrl, String.format(KeyVaultErrorCodeStrings.getErrorString(KeyVaultErrorCodeStrings.PARAMETER_REQUIRED), "'blobStorageUrl'")); Objects.requireNonNull(sasToken, String.format(KeyVaultErrorCodeStrings.getErrorString(KeyVaultErrorCodeStrings.PARAMETER_REQUIRED), "'sasToken'")); return new PollerFlux<>(pollingInterval, backupActivationOperation(blobStorageUrl, sasToken), backupPollOperation(), (pollingContext, firstResponse) -> Mono.error(new RuntimeException("Cancellation is not supported")), backupFetchOperation()); } /** * Initiates a full backup of the Key Vault. * * @param blobStorageUrl The URL for the Blob Storage resource where the backup will be located. * @param sasToken A Shared Access Signature (SAS) token to authorize access to the blob. * @param context Additional context that is passed through the HTTP pipeline during the service call. * @return A {@link PollerFlux} polling on the {@link KeyVaultBackupOperation backup operation} status. * @throws KeyVaultErrorException if the operation is unsuccessful. */ Mono<Response<KeyVaultBackupOperation>> backupWithResponse(String blobStorageUrl, String sasToken, Context context) { SASTokenParameter sasTokenParameter = ", blobStorageUrl)) .doOnSuccess(response -> logger.info("Backed up at URL - {}", response.getValue().getAzureStorageBlobContainerUri())) .doOnError(error -> logger.warning("Failed to backup at URL - {}", blobStorageUrl, error)) .map(backupOperationResponse -> new SimpleResponse<>(backupOperationResponse.getRequest(), backupOperationResponse.getStatusCode(), backupOperationResponse.getHeaders(), (KeyVaultBackupOperation) transformToLongRunningOperation(backupOperationResponse.getValue()))); } private Function<PollingContext<KeyVaultBackupOperation>, Mono<KeyVaultBackupOperation>> backupActivationOperation(String blobStorageUrl, String sasToken) { return (pollingContext) -> { try { return withContext(context -> backupWithResponse(blobStorageUrl, sasToken, context)) .flatMap(backupResponse -> Mono.just(backupResponse.getValue())); } catch (RuntimeException e) { return monoError(logger, e); } }; } private Function<PollingContext<KeyVaultBackupOperation>, Mono<PollResponse<KeyVaultBackupOperation>>> backupPollOperation() { return (pollingContext) -> { try { PollResponse<KeyVaultBackupOperation> pollResponse = pollingContext.getLatestResponse(); if (pollResponse.getStatus() == LongRunningOperationStatus.SUCCESSFULLY_COMPLETED || pollResponse.getStatus() == LongRunningOperationStatus.FAILED) { return Mono.just(pollResponse); } final KeyVaultBackupOperation keyVaultBackupOperation = pollResponse.getValue(); if (keyVaultBackupOperation == null) { logger.warning("Backup operation does not exist. Activation operation failed."); return Mono.just(new PollResponse<KeyVaultBackupOperation>( LongRunningOperationStatus.fromString("BACKUP_START_FAILED", true), null)); } final String jobId = keyVaultBackupOperation.getJobId(); return withContext(context -> clientImpl.fullBackupStatusWithResponseAsync(vaultUrl, jobId, context.addData(AZ_TRACING_NAMESPACE_KEY, KEYVAULT_TRACING_NAMESPACE_VALUE))) .map(response -> new SimpleResponse<>(response, (KeyVaultBackupOperation) transformToLongRunningOperation(response.getValue()))) .flatMap(KeyVaultBackupAsyncClient::processBackupOperationResponse); } catch (HttpResponseException e) { logger.logExceptionAsError(e); return Mono.just(new PollResponse<>(LongRunningOperationStatus.FAILED, null)); } }; } private Function<PollingContext<KeyVaultBackupOperation>, Mono<String>> backupFetchOperation() { return (pollingContext) -> { String blobContainerUri = pollingContext.getLatestResponse().getValue().getAzureStorageBlobContainerUri(); if (blobContainerUri == null) { return Mono.empty(); } else { return Mono.just(blobContainerUri); } }; } private static Mono<PollResponse<KeyVaultBackupOperation>> processBackupOperationResponse(Response<KeyVaultBackupOperation> response) { String operationStatus = response.getValue().getStatus().toLowerCase(Locale.US); return Mono.just(new PollResponse<>( toLongRunningOperationStatus(operationStatus.toLowerCase(Locale.US)), response.getValue())); } private static LongRunningOperationStatus toLongRunningOperationStatus(String operationStatus) { switch (operationStatus) { case "inprogress": return LongRunningOperationStatus.IN_PROGRESS; case "succeeded": return LongRunningOperationStatus.SUCCESSFULLY_COMPLETED; case "failed": return LongRunningOperationStatus.FAILED; default: return LongRunningOperationStatus.fromString("POLLING_FAILED", true); } } /** * Gets a pending {@link KeyVaultBackupOperation backup operation} from the Key Vault. * * @param jobId The operation identifier. * @throws NullPointerException if the {@code jobId} is null. * @return A {@link PollerFlux} polling on the {@link KeyVaultRestoreOperation backup operation} status. */ @ServiceMethod(returns = ReturnType.SINGLE) public PollerFlux<KeyVaultBackupOperation, String> getBackupOperation(String jobId) { Objects.requireNonNull(jobId, String.format(KeyVaultErrorCodeStrings.getErrorString(KeyVaultErrorCodeStrings.PARAMETER_REQUIRED), "'jobId'")); return new PollerFlux<>(Duration.ofSeconds(1), (pollingContext) -> Mono.empty(), backupStatusPollOperation(jobId), (pollingContext, firstResponse) -> Mono.error(new RuntimeException("Cancellation is not supported")), backupFetchOperation()); } private Function<PollingContext<KeyVaultBackupOperation>, Mono<PollResponse<KeyVaultBackupOperation>>> backupStatusPollOperation(String jobId) { return (pollingContext) -> withContext(context -> clientImpl.fullBackupStatusWithResponseAsync(vaultUrl, jobId, context.addData(AZ_TRACING_NAMESPACE_KEY, KEYVAULT_TRACING_NAMESPACE_VALUE))) .map(response -> new SimpleResponse<>(response, (KeyVaultBackupOperation) transformToLongRunningOperation(response.getValue()))) .flatMap(KeyVaultBackupAsyncClient::processBackupOperationResponse); } /** * Initiates a full restore of the Key Vault. * * @param backupFolderUrl The URL for the Blob Storage resource where the backup is located, including the path to * the blob container where the backup resides. This would be the exact value that is returned as the result of a * backup operation. An example of such a URL may look like the following: https: * @param sasToken A Shared Access Signature (SAS) token to authorize access to the blob. * @return A {@link PollerFlux} polling on the {@link KeyVaultRestoreOperation backup operation} status. * @throws NullPointerException if the {@code backupFolderUrl} or {@code sasToken} are {@code null}. */ @ServiceMethod(returns = ReturnType.SINGLE) public PollerFlux<KeyVaultRestoreOperation, Void> beginRestore(String backupFolderUrl, String sasToken) { return beginRestore(backupFolderUrl, sasToken, getDefaultPollingInterval()); } /** * Initiates a full restore of the Key Vault. * * @param backupFolderUrl The URL for the Blob Storage resource where the backup is located, including the path to * the blob container where the backup resides. This would be the exact value that is returned as the result of a * backup operation. An example of such a URL may look like the following: https: * @param sasToken A Shared Access Signature (SAS) token to authorize access to the blob. * @param pollingInterval The interval at which the operation status will be polled for. * @return A {@link PollerFlux} polling on the {@link KeyVaultRestoreOperation backup operation} status. * @throws NullPointerException if the {@code backupFolderUrl} or {@code sasToken} are {@code null}. */ @ServiceMethod(returns = ReturnType.SINGLE) public PollerFlux<KeyVaultRestoreOperation, Void> beginRestore(String backupFolderUrl, String sasToken, Duration pollingInterval) { Objects.requireNonNull(backupFolderUrl, String.format(KeyVaultErrorCodeStrings.getErrorString(KeyVaultErrorCodeStrings.PARAMETER_REQUIRED), "'backupFolderUrl'")); Objects.requireNonNull(sasToken, String.format(KeyVaultErrorCodeStrings.getErrorString(KeyVaultErrorCodeStrings.PARAMETER_REQUIRED), "'sasToken'")); return new PollerFlux<>(pollingInterval, restoreActivationOperation(backupFolderUrl, sasToken), restorePollOperation(), (pollingContext, firstResponse) -> Mono.empty(), (pollingContext) -> Mono.empty()); } /** * Initiates a full restore of the Key Vault. * * @param backupFolderUrl The URL for the Blob Storage resource where the backup is located, including the path to * the blob container where the backup resides. This would be the exact value that is returned as the result of a * backup operation. An example of such a URL may look like the following: https: * @param sasToken A Shared Access Signature (SAS) token to authorize access to the blob. * @param context Additional context that is passed through the HTTP pipeline during the service call. * @return A {@link PollerFlux} polling on the {@link KeyVaultRestoreOperation backup operation} status. * @throws KeyVaultErrorException if the operation is unsuccessful. */ Mono<Response<KeyVaultRestoreOperation>> restoreWithResponse(String backupFolderUrl, String sasToken, Context context) { String[] segments = backupFolderUrl.split("/"); String folderName = segments[segments.length - 1]; String containerUrl = backupFolderUrl.substring(0, backupFolderUrl.length() - folderName.length()); SASTokenParameter sasTokenParameter = new SASTokenParameter() .setStorageResourceUri(containerUrl) .setToken(sasToken); RestoreOperationParameters restoreOperationParameters = new RestoreOperationParameters() .setSasTokenParameters(sasTokenParameter) .setFolderToRestore(folderName); return clientImpl.fullRestoreOperationWithResponseAsync(vaultUrl, restoreOperationParameters, context.addData(AZ_TRACING_NAMESPACE_KEY, KEYVAULT_TRACING_NAMESPACE_VALUE)) .doOnRequest(ignored -> logger.info("Restoring from location - {}", backupFolderUrl)) .doOnSuccess(response -> logger.info("Restored from location - {}", backupFolderUrl)) .doOnError(error -> logger.warning("Failed to restore from location - {}", backupFolderUrl, error)) .map(restoreOperationResponse -> new SimpleResponse<>(restoreOperationResponse.getRequest(), restoreOperationResponse.getStatusCode(), restoreOperationResponse.getHeaders(), (KeyVaultRestoreOperation) transformToLongRunningOperation(restoreOperationResponse.getValue()))); } private Function<PollingContext<KeyVaultRestoreOperation>, Mono<KeyVaultRestoreOperation>> restoreActivationOperation(String backupFolderUrl, String sasToken) { return (pollingContext) -> { try { return withContext(context -> restoreWithResponse(backupFolderUrl, sasToken, context)) .flatMap(restoreResponse -> Mono.just(restoreResponse.getValue())); } catch (RuntimeException e) { return monoError(logger, e); } }; } private Function<PollingContext<KeyVaultRestoreOperation>, Mono<PollResponse<KeyVaultRestoreOperation>>> restorePollOperation() { return (pollingContext) -> { try { PollResponse<KeyVaultRestoreOperation> pollResponse = pollingContext.getLatestResponse(); if (pollResponse.getStatus() == LongRunningOperationStatus.SUCCESSFULLY_COMPLETED || pollResponse.getStatus() == LongRunningOperationStatus.FAILED) { return Mono.just(pollResponse); } final KeyVaultRestoreOperation keyVaultRestoreOperation = pollResponse.getValue(); if (keyVaultRestoreOperation == null) { logger.warning("Restore operation does not exist. Activation operation failed."); return Mono.just(new PollResponse<KeyVaultRestoreOperation>( LongRunningOperationStatus.fromString("RESTORE_START_FAILED", true), null)); } final String jobId = keyVaultRestoreOperation.getJobId(); return withContext(context -> clientImpl.restoreStatusWithResponseAsync(vaultUrl, jobId, context.addData(AZ_TRACING_NAMESPACE_KEY, KEYVAULT_TRACING_NAMESPACE_VALUE))) .map(response -> new SimpleResponse<>(response, (KeyVaultRestoreOperation) transformToLongRunningOperation(response.getValue()))) .flatMap(KeyVaultBackupAsyncClient::processRestoreOperationResponse); } catch (HttpResponseException e) { logger.logExceptionAsError(e); return Mono.just(new PollResponse<>(LongRunningOperationStatus.FAILED, null)); } }; } private static Mono<PollResponse<KeyVaultRestoreOperation>> processRestoreOperationResponse(Response<KeyVaultRestoreOperation> response) { String operationStatus = response.getValue().getStatus().toLowerCase(Locale.US); return Mono.just(new PollResponse<>( toLongRunningOperationStatus(operationStatus.toLowerCase(Locale.US)), response.getValue())); } /** * Gets a pending {@link KeyVaultRestoreOperation full or selective restore operation} from the Key Vault. * * @param jobId The operation identifier. * @throws NullPointerException if the {@code jobId} is null. * @return A {@link PollerFlux} polling on the {@link KeyVaultRestoreOperation restore operation} status. */ @ServiceMethod(returns = ReturnType.SINGLE) public PollerFlux<KeyVaultRestoreOperation, Void> getRestoreOperation(String jobId) { Objects.requireNonNull(jobId, String.format(KeyVaultErrorCodeStrings.getErrorString(KeyVaultErrorCodeStrings.PARAMETER_REQUIRED), "'jobId'")); return new PollerFlux<>(Duration.ofSeconds(1), (pollingContext) -> Mono.empty(), restoreStatusPollOperation(jobId), (pollingContext, firstResponse) -> Mono.error(new RuntimeException("Cancellation is not supported")), (pollingContext) -> Mono.empty()); } private Function<PollingContext<KeyVaultRestoreOperation>, Mono<PollResponse<KeyVaultRestoreOperation>>> restoreStatusPollOperation(String jobId) { return (pollingContext) -> withContext(context -> clientImpl.restoreStatusWithResponseAsync(vaultUrl, jobId, context.addData(AZ_TRACING_NAMESPACE_KEY, KEYVAULT_TRACING_NAMESPACE_VALUE))) .map(response -> new SimpleResponse<>(response, (KeyVaultRestoreOperation) transformToLongRunningOperation(response.getValue()))) .flatMap(KeyVaultBackupAsyncClient::processRestoreOperationResponse); } /** * Restores all versions of a given key using the supplied SAS token pointing to a previously stored Azure Blob * storage backup folder. * * @param keyName The name of the key to be restored. * @param backupFolderUrl The URL for the Blob Storage resource where the backup is located, including the path to * the blob container where the backup resides. This would be the exact value that is returned as the result of a * backup operation. An example of such a URL may look like the following: https: * @param sasToken A Shared Access Signature (SAS) token to authorize access to the blob. * @return A {@link PollerFlux} polling on the {@link KeyVaultRestoreOperation backup operation} status. * @throws NullPointerException if the {@code keyName}, {@code backupFolderUrl} or {@code sasToken} are {@code * null}. */ @ServiceMethod(returns = ReturnType.SINGLE) public PollerFlux<KeyVaultRestoreOperation, Void> beginSelectiveRestore(String keyName, String backupFolderUrl, String sasToken) { return beginSelectiveRestore(keyName, backupFolderUrl, sasToken, getDefaultPollingInterval()); } /** * Restores all versions of a given key using the supplied SAS token pointing to a previously stored Azure Blob * storage backup folder. * * @param keyName The name of the key to be restored. * @param backupFolderUrl The URL for the Blob Storage resource where the backup is located, including the path to * the blob container where the backup resides. This would be the exact value that is returned as the result of a * backup operation. An example of such a URL may look like the following: https: * @param sasToken A Shared Access Signature (SAS) token to authorize access to the blob. * @param pollingInterval The interval at which the operation status will be polled for. * @return A {@link PollerFlux} polling on the {@link KeyVaultRestoreOperation backup operation} status. * @throws NullPointerException if the {@code keyName}, {@code backupFolderUrl} or {@code sasToken} are {@code * null}. */ @ServiceMethod(returns = ReturnType.SINGLE) public PollerFlux<KeyVaultRestoreOperation, Void> beginSelectiveRestore(String keyName, String backupFolderUrl, String sasToken, Duration pollingInterval) { Objects.requireNonNull(keyName, String.format(KeyVaultErrorCodeStrings.getErrorString(KeyVaultErrorCodeStrings.PARAMETER_REQUIRED), "'keyName'")); Objects.requireNonNull(backupFolderUrl, String.format(KeyVaultErrorCodeStrings.getErrorString(KeyVaultErrorCodeStrings.PARAMETER_REQUIRED), "'backupFolderUrl'")); Objects.requireNonNull(sasToken, String.format(KeyVaultErrorCodeStrings.getErrorString(KeyVaultErrorCodeStrings.PARAMETER_REQUIRED), "'sasToken'")); return new PollerFlux<>(pollingInterval, selectiveRestoreActivationOperation(keyName, backupFolderUrl, sasToken), selectiveRestorePollOperation(), (pollingContext, firstResponse) -> Mono.empty(), (pollingContext) -> Mono.empty()); } /** * Restores all versions of a given key using the supplied SAS token pointing to a previously stored Azure Blob * storage backup folder. * * @param keyName The name of the key to be restored. * @param backupFolderUrl The URL for the Blob Storage resource where the backup is located, including the path to * the blob container where the backup resides. This would be the exact value that is returned as the result of a * backup operation. An example of such a URL may look like the following: https: * @param sasToken A Shared Access Signature (SAS) token to authorize access to the blob. * @param context Additional context that is passed through the HTTP pipeline during the service call. * @return A {@link PollerFlux} polling on the {@link KeyVaultRestoreOperation backup operation} status. * @throws KeyVaultErrorException if the operation is unsuccessful. */ Mono<Response<KeyVaultRestoreOperation>> selectiveRestoreWithResponse(String keyName, String backupFolderUrl, String sasToken, Context context) { String[] segments = backupFolderUrl.split("/"); String folderName = segments[segments.length - 1]; String containerUrl = backupFolderUrl.substring(0, backupFolderUrl.length() - folderName.length()); SASTokenParameter sasTokenParameter = new SASTokenParameter() .setStorageResourceUri(containerUrl) .setToken(sasToken); SelectiveKeyRestoreOperationParameters selectiveKeyRestoreOperationParameters = new SelectiveKeyRestoreOperationParameters() .setSasTokenParameters(sasTokenParameter) .setFolder(folderName); return clientImpl.selectiveKeyRestoreOperationWithResponseAsync(vaultUrl, keyName, selectiveKeyRestoreOperationParameters, context.addData(AZ_TRACING_NAMESPACE_KEY, KEYVAULT_TRACING_NAMESPACE_VALUE)) .doOnRequest(ignored -> logger.info("Restoring key \"{}\" from location - {}", keyName, backupFolderUrl)) .doOnSuccess(response -> logger.info("Restored key \"{}\" from location - {}", keyName, backupFolderUrl)) .doOnError(error -> logger.warning("Failed to restore key \"{}\" from location - {}", keyName, backupFolderUrl, error)) .map(restoreOperationResponse -> new SimpleResponse<>(restoreOperationResponse.getRequest(), restoreOperationResponse.getStatusCode(), restoreOperationResponse.getHeaders(), (KeyVaultRestoreOperation) transformToLongRunningOperation(restoreOperationResponse.getValue()))); } private Function<PollingContext<KeyVaultRestoreOperation>, Mono<KeyVaultRestoreOperation>> selectiveRestoreActivationOperation(String keyName, String backupFolderUrl, String sasToken) { return (pollingContext) -> { try { return withContext(context -> selectiveRestoreWithResponse(keyName, backupFolderUrl, sasToken, context)) .flatMap(selectiveKeyRestoreResponse -> Mono.just(selectiveKeyRestoreResponse.getValue())); } catch (RuntimeException e) { return monoError(logger, e); } }; } private Function<PollingContext<KeyVaultRestoreOperation>, Mono<PollResponse<KeyVaultRestoreOperation>>> selectiveRestorePollOperation() { return (pollingContext) -> { try { PollResponse<KeyVaultRestoreOperation> pollResponse = pollingContext.getLatestResponse(); if (pollResponse.getStatus() == LongRunningOperationStatus.SUCCESSFULLY_COMPLETED || pollResponse.getStatus() == LongRunningOperationStatus.FAILED) { return Mono.just(pollResponse); } final KeyVaultRestoreOperation keyVaultRestoreOperation = pollResponse.getValue(); if (keyVaultRestoreOperation == null) { logger.warning("Restore operation does not exist. Activation operation failed."); return Mono.just(new PollResponse<KeyVaultRestoreOperation>( LongRunningOperationStatus.fromString("SELECTIVE_RESTORE_START_FAILED", true), null)); } final String jobId = keyVaultRestoreOperation.getJobId(); return withContext(context -> clientImpl.restoreStatusWithResponseAsync(vaultUrl, jobId, context.addData(AZ_TRACING_NAMESPACE_KEY, KEYVAULT_TRACING_NAMESPACE_VALUE))) .map(response -> new SimpleResponse<>(response, (KeyVaultRestoreOperation) transformToLongRunningOperation(response.getValue()))) .flatMap(KeyVaultBackupAsyncClient::processRestoreOperationResponse); } catch (HttpResponseException e) { logger.logExceptionAsError(e); return Mono.just(new PollResponse<>(LongRunningOperationStatus.FAILED, null)); } }; } private static <O> KeyVaultLongRunningOperation transformToLongRunningOperation(O operation) { if (operation instanceof RestoreOperation) { RestoreOperation restoreOperation = (RestoreOperation) operation; return new KeyVaultRestoreOperation(restoreOperation.getStatus(), restoreOperation.getStatusDetails(), createKeyVaultErrorFromError(restoreOperation.getError()), restoreOperation.getJobId(), restoreOperation.getStartTime(), restoreOperation.getEndTime()); } else if (operation instanceof SelectiveKeyRestoreOperation) { SelectiveKeyRestoreOperation selectiveKeyRestoreOperation = (SelectiveKeyRestoreOperation) operation; return new KeyVaultRestoreOperation(selectiveKeyRestoreOperation.getStatus(), selectiveKeyRestoreOperation.getStatusDetails(), createKeyVaultErrorFromError(selectiveKeyRestoreOperation.getError()), selectiveKeyRestoreOperation.getJobId(), selectiveKeyRestoreOperation.getStartTime(), selectiveKeyRestoreOperation.getEndTime()); } else if (operation instanceof FullBackupOperation) { FullBackupOperation fullBackupOperation = (FullBackupOperation) operation; return new KeyVaultBackupOperation(fullBackupOperation.getStatus(), fullBackupOperation.getStatusDetails(), createKeyVaultErrorFromError(fullBackupOperation.getError()), fullBackupOperation.getJobId(), fullBackupOperation.getStartTime(), fullBackupOperation.getEndTime(), fullBackupOperation.getAzureStorageBlobContainerUri()); } else { throw new UnsupportedOperationException(); } } private static KeyVaultError createKeyVaultErrorFromError(Error error) { if (error == null) { return null; } return new KeyVaultError(error.getCode(), error.getMessage(), createKeyVaultErrorFromError(error.getInnerError())); } }
nit: simplify to below (same to the one below)? ```suggestion .doOnRequest(ignored -> logger.info("Restoring from location - {}", backupFolderUrl)) ```
Mono<Response<KeyVaultRestoreOperation>> restoreWithResponse(String backupFolderUrl, String sasToken, Context context) { String[] segments = backupFolderUrl.split("/"); String folderName = segments[segments.length - 1]; String containerUrl = backupFolderUrl.substring(0, backupFolderUrl.length() - folderName.length()); SASTokenParameter sasTokenParameter = new SASTokenParameter() .setStorageResourceUri(containerUrl) .setToken(sasToken); RestoreOperationParameters restoreOperationParameters = new RestoreOperationParameters() .setSasTokenParameters(sasTokenParameter) .setFolderToRestore(folderName); return clientImpl.fullRestoreOperationWithResponseAsync(vaultUrl, restoreOperationParameters, context.addData(AZ_TRACING_NAMESPACE_KEY, KEYVAULT_TRACING_NAMESPACE_VALUE)) .doOnRequest(ignored -> logger.info("Restoring from location - {}{}", containerUrl, folderName)) .doOnSuccess(response -> logger.info("Restored from location - {}{}", containerUrl, folderName)) .doOnError(error -> logger.warning("Failed to restore from location - {}{}", containerUrl, folderName, error)) .map(restoreOperationResponse -> new SimpleResponse<>(restoreOperationResponse.getRequest(), restoreOperationResponse.getStatusCode(), restoreOperationResponse.getHeaders(), (KeyVaultRestoreOperation) transformToLongRunningOperation(restoreOperationResponse.getValue()))); }
.doOnRequest(ignored -> logger.info("Restoring from location - {}{}", containerUrl, folderName))
new SASTokenParameter() .setStorageResourceUri(blobStorageUrl) .setToken(sasToken); return clientImpl.fullBackupWithResponseAsync(vaultUrl, sasTokenParameter, context.addData(AZ_TRACING_NAMESPACE_KEY, KEYVAULT_TRACING_NAMESPACE_VALUE)) .doOnRequest(ignored -> logger.info("Backing up at URL - {}
class KeyVaultBackupAsyncClient { private static final String KEYVAULT_TRACING_NAMESPACE_VALUE = "Microsoft.KeyVault"; private static final Duration DEFAULT_POLLING_INTERVAL = Duration.ofSeconds(1); /** * The logger to be used. */ private final ClientLogger logger = new ClientLogger(KeyVaultBackupAsyncClient.class); /** * The underlying AutoRest client used to interact with the Key Vault service. */ private final KeyVaultBackupClientImpl clientImpl; /** * The Kay Vault URL this client is associated to. */ private final String vaultUrl; Duration getDefaultPollingInterval() { return DEFAULT_POLLING_INTERVAL; } /** * Package private constructor to be used by {@link KeyVaultBackupClientBuilder}. */ KeyVaultBackupAsyncClient(URL vaultUrl, HttpPipeline httpPipeline) { Objects.requireNonNull(vaultUrl, KeyVaultErrorCodeStrings.getErrorString(KeyVaultErrorCodeStrings.VAULT_END_POINT_REQUIRED)); this.vaultUrl = vaultUrl.toString(); clientImpl = new KeyVaultBackupClientImplBuilder() .pipeline(httpPipeline) .buildClient(); } /** * Gets the URL for the Key Vault this client is associated with. * * @return The Key Vault URL. */ public String getVaultUrl() { return this.vaultUrl; } /** * Initiates a full backup of the Key Vault. * * @param blobStorageUrl The URL for the Blob Storage resource where the backup will be located. * @param sasToken A Shared Access Signature (SAS) token to authorize access to the blob. * @return A {@link PollerFlux} polling on the {@link KeyVaultBackupOperation backup operation} status. * @throws NullPointerException if the {@code blobStorageUrl} or {@code sasToken} are {@code null}. */ @ServiceMethod(returns = ReturnType.SINGLE) public PollerFlux<KeyVaultBackupOperation, String> beginBackup(String blobStorageUrl, String sasToken) { return beginBackup(blobStorageUrl, sasToken, getDefaultPollingInterval()); } /** * Initiates a full backup of the Key Vault. * * @param blobStorageUrl The URL for the Blob Storage resource where the backup will be located. * @param sasToken A Shared Access Signature (SAS) token to authorize access to the blob. * @param pollingInterval The interval at which the operation status will be polled for. * @return A {@link PollerFlux} polling on the {@link KeyVaultBackupOperation backup operation} status. * @throws NullPointerException if the {@code blobStorageUrl} or {@code sasToken} are {@code null}. */ @ServiceMethod(returns = ReturnType.SINGLE) public PollerFlux<KeyVaultBackupOperation, String> beginBackup(String blobStorageUrl, String sasToken, Duration pollingInterval) { Objects.requireNonNull(blobStorageUrl, String.format(KeyVaultErrorCodeStrings.getErrorString(KeyVaultErrorCodeStrings.PARAMETER_REQUIRED), "'blobStorageUrl'")); Objects.requireNonNull(sasToken, String.format(KeyVaultErrorCodeStrings.getErrorString(KeyVaultErrorCodeStrings.PARAMETER_REQUIRED), "'sasToken'")); return new PollerFlux<>(pollingInterval, backupActivationOperation(blobStorageUrl, sasToken), backupPollOperation(), (pollingContext, firstResponse) -> Mono.error(new RuntimeException("Cancellation is not supported")), backupFetchOperation()); } /** * Initiates a full backup of the Key Vault. * * @param blobStorageUrl The URL for the Blob Storage resource where the backup will be located. * @param sasToken A Shared Access Signature (SAS) token to authorize access to the blob. * @param context Additional context that is passed through the HTTP pipeline during the service call. * @return A {@link PollerFlux} polling on the {@link KeyVaultBackupOperation backup operation} status. * @throws KeyVaultErrorException if the operation is unsuccessful. */ Mono<Response<KeyVaultBackupOperation>> backupWithResponse(String blobStorageUrl, String sasToken, Context context) { SASTokenParameter sasTokenParameter = ", blobStorageUrl)) .doOnSuccess(response -> logger.info("Backed up at URL - {}", response.getValue().getAzureStorageBlobContainerUri())) .doOnError(error -> logger.warning("Failed to backup at URL - {}", blobStorageUrl, error)) .map(backupOperationResponse -> new SimpleResponse<>(backupOperationResponse.getRequest(), backupOperationResponse.getStatusCode(), backupOperationResponse.getHeaders(), (KeyVaultBackupOperation) transformToLongRunningOperation(backupOperationResponse.getValue()))); } private Function<PollingContext<KeyVaultBackupOperation>, Mono<KeyVaultBackupOperation>> backupActivationOperation(String blobStorageUrl, String sasToken) { return (pollingContext) -> { try { return withContext(context -> backupWithResponse(blobStorageUrl, sasToken, context)) .flatMap(backupResponse -> Mono.just(backupResponse.getValue())); } catch (RuntimeException e) { return monoError(logger, e); } }; } private Function<PollingContext<KeyVaultBackupOperation>, Mono<PollResponse<KeyVaultBackupOperation>>> backupPollOperation() { return (pollingContext) -> { try { PollResponse<KeyVaultBackupOperation> pollResponse = pollingContext.getLatestResponse(); if (pollResponse.getStatus() == LongRunningOperationStatus.SUCCESSFULLY_COMPLETED || pollResponse.getStatus() == LongRunningOperationStatus.FAILED) { return Mono.just(pollResponse); } final KeyVaultBackupOperation keyVaultBackupOperation = pollResponse.getValue(); if (keyVaultBackupOperation == null) { logger.warning("Backup operation does not exist. Activation operation failed."); return Mono.just(new PollResponse<KeyVaultBackupOperation>( LongRunningOperationStatus.fromString("BACKUP_START_FAILED", true), null)); } final String jobId = keyVaultBackupOperation.getJobId(); return withContext(context -> clientImpl.fullBackupStatusWithResponseAsync(vaultUrl, jobId, context.addData(AZ_TRACING_NAMESPACE_KEY, KEYVAULT_TRACING_NAMESPACE_VALUE))) .map(response -> new SimpleResponse<>(response, (KeyVaultBackupOperation) transformToLongRunningOperation(response.getValue()))) .flatMap(KeyVaultBackupAsyncClient::processBackupOperationResponse); } catch (HttpResponseException e) { logger.logExceptionAsError(e); return Mono.just(new PollResponse<>(LongRunningOperationStatus.FAILED, null)); } }; } private Function<PollingContext<KeyVaultBackupOperation>, Mono<String>> backupFetchOperation() { return (pollingContext) -> { String blobContainerUri = pollingContext.getLatestResponse().getValue().getAzureStorageBlobContainerUri(); if (blobContainerUri == null) { return Mono.empty(); } else { return Mono.just(blobContainerUri); } }; } private static Mono<PollResponse<KeyVaultBackupOperation>> processBackupOperationResponse(Response<KeyVaultBackupOperation> response) { String operationStatus = response.getValue().getStatus().toLowerCase(Locale.US); return Mono.just(new PollResponse<>( toLongRunningOperationStatus(operationStatus.toLowerCase(Locale.US)), response.getValue())); } private static LongRunningOperationStatus toLongRunningOperationStatus(String operationStatus) { switch (operationStatus) { case "inprogress": return LongRunningOperationStatus.IN_PROGRESS; case "succeeded": return LongRunningOperationStatus.SUCCESSFULLY_COMPLETED; case "failed": return LongRunningOperationStatus.FAILED; default: return LongRunningOperationStatus.fromString("POLLING_FAILED", true); } } /** * Gets a pending {@link KeyVaultBackupOperation backup operation} from the Key Vault. * * @param jobId The operation identifier. * @throws NullPointerException if the {@code jobId} is null. * @return A {@link PollerFlux} polling on the {@link KeyVaultRestoreOperation backup operation} status. */ @ServiceMethod(returns = ReturnType.SINGLE) public PollerFlux<KeyVaultBackupOperation, String> getBackupOperation(String jobId) { Objects.requireNonNull(jobId, String.format(KeyVaultErrorCodeStrings.getErrorString(KeyVaultErrorCodeStrings.PARAMETER_REQUIRED), "'jobId'")); return new PollerFlux<>(Duration.ofSeconds(1), (pollingContext) -> Mono.empty(), backupStatusPollOperation(jobId), (pollingContext, firstResponse) -> Mono.error(new RuntimeException("Cancellation is not supported")), backupFetchOperation()); } private Function<PollingContext<KeyVaultBackupOperation>, Mono<PollResponse<KeyVaultBackupOperation>>> backupStatusPollOperation(String jobId) { return (pollingContext) -> withContext(context -> clientImpl.fullBackupStatusWithResponseAsync(vaultUrl, jobId, context.addData(AZ_TRACING_NAMESPACE_KEY, KEYVAULT_TRACING_NAMESPACE_VALUE))) .map(response -> new SimpleResponse<>(response, (KeyVaultBackupOperation) transformToLongRunningOperation(response.getValue()))) .flatMap(KeyVaultBackupAsyncClient::processBackupOperationResponse); } /** * Initiates a full restore of the Key Vault. * * @param backupFolderUrl The URL for the Blob Storage resource where the backup is located, including the path to * the blob container where the backup resides. This would be the exact value that is returned as the result of a * backup operation. An example of such a URL may look like the following: https: * @param sasToken A Shared Access Signature (SAS) token to authorize access to the blob. * @return A {@link PollerFlux} polling on the {@link KeyVaultRestoreOperation backup operation} status. * @throws NullPointerException if the {@code backupFolderUrl} or {@code sasToken} are {@code null}. */ @ServiceMethod(returns = ReturnType.SINGLE) public PollerFlux<KeyVaultRestoreOperation, Void> beginRestore(String backupFolderUrl, String sasToken) { return beginRestore(backupFolderUrl, sasToken, getDefaultPollingInterval()); } /** * Initiates a full restore of the Key Vault. * * @param backupFolderUrl The URL for the Blob Storage resource where the backup is located, including the path to * the blob container where the backup resides. This would be the exact value that is returned as the result of a * backup operation. An example of such a URL may look like the following: https: * @param sasToken A Shared Access Signature (SAS) token to authorize access to the blob. * @param pollingInterval The interval at which the operation status will be polled for. * @return A {@link PollerFlux} polling on the {@link KeyVaultRestoreOperation backup operation} status. * @throws NullPointerException if the {@code backupFolderUrl} or {@code sasToken} are {@code null}. */ @ServiceMethod(returns = ReturnType.SINGLE) public PollerFlux<KeyVaultRestoreOperation, Void> beginRestore(String backupFolderUrl, String sasToken, Duration pollingInterval) { Objects.requireNonNull(backupFolderUrl, String.format(KeyVaultErrorCodeStrings.getErrorString(KeyVaultErrorCodeStrings.PARAMETER_REQUIRED), "'backupFolderUrl'")); Objects.requireNonNull(sasToken, String.format(KeyVaultErrorCodeStrings.getErrorString(KeyVaultErrorCodeStrings.PARAMETER_REQUIRED), "'sasToken'")); return new PollerFlux<>(pollingInterval, restoreActivationOperation(backupFolderUrl, sasToken), restorePollOperation(), (pollingContext, firstResponse) -> Mono.empty(), (pollingContext) -> Mono.empty()); } /** * Initiates a full restore of the Key Vault. * * @param backupFolderUrl The URL for the Blob Storage resource where the backup is located, including the path to * the blob container where the backup resides. This would be the exact value that is returned as the result of a * backup operation. An example of such a URL may look like the following: https: * @param sasToken A Shared Access Signature (SAS) token to authorize access to the blob. * @param context Additional context that is passed through the HTTP pipeline during the service call. * @return A {@link PollerFlux} polling on the {@link KeyVaultRestoreOperation backup operation} status. * @throws KeyVaultErrorException if the operation is unsuccessful. */ Mono<Response<KeyVaultRestoreOperation>> restoreWithResponse(String backupFolderUrl, String sasToken, Context context) { String[] segments = backupFolderUrl.split("/"); String folderName = segments[segments.length - 1]; String containerUrl = backupFolderUrl.substring(0, backupFolderUrl.length() - folderName.length()); SASTokenParameter sasTokenParameter = new SASTokenParameter() .setStorageResourceUri(containerUrl) .setToken(sasToken); RestoreOperationParameters restoreOperationParameters = new RestoreOperationParameters() .setSasTokenParameters(sasTokenParameter) .setFolderToRestore(folderName); return clientImpl.fullRestoreOperationWithResponseAsync(vaultUrl, restoreOperationParameters, context.addData(AZ_TRACING_NAMESPACE_KEY, KEYVAULT_TRACING_NAMESPACE_VALUE)) .doOnRequest(ignored -> logger.info("Restoring from location - {}{}", containerUrl, folderName)) .doOnSuccess(response -> logger.info("Restored from location - {}{}", containerUrl, folderName)) .doOnError(error -> logger.warning("Failed to restore from location - {}{}", containerUrl, folderName, error)) .map(restoreOperationResponse -> new SimpleResponse<>(restoreOperationResponse.getRequest(), restoreOperationResponse.getStatusCode(), restoreOperationResponse.getHeaders(), (KeyVaultRestoreOperation) transformToLongRunningOperation(restoreOperationResponse.getValue()))); } private Function<PollingContext<KeyVaultRestoreOperation>, Mono<KeyVaultRestoreOperation>> restoreActivationOperation(String backupFolderUrl, String sasToken) { return (pollingContext) -> { try { return withContext(context -> restoreWithResponse(backupFolderUrl, sasToken, context)) .flatMap(restoreResponse -> Mono.just(restoreResponse.getValue())); } catch (RuntimeException e) { return monoError(logger, e); } }; } private Function<PollingContext<KeyVaultRestoreOperation>, Mono<PollResponse<KeyVaultRestoreOperation>>> restorePollOperation() { return (pollingContext) -> { try { PollResponse<KeyVaultRestoreOperation> pollResponse = pollingContext.getLatestResponse(); if (pollResponse.getStatus() == LongRunningOperationStatus.SUCCESSFULLY_COMPLETED || pollResponse.getStatus() == LongRunningOperationStatus.FAILED) { return Mono.just(pollResponse); } final KeyVaultRestoreOperation keyVaultRestoreOperation = pollResponse.getValue(); if (keyVaultRestoreOperation == null) { logger.warning("Restore operation does not exist. Activation operation failed."); return Mono.just(new PollResponse<KeyVaultRestoreOperation>( LongRunningOperationStatus.fromString("RESTORE_START_FAILED", true), null)); } final String jobId = keyVaultRestoreOperation.getJobId(); return withContext(context -> clientImpl.restoreStatusWithResponseAsync(vaultUrl, jobId, context.addData(AZ_TRACING_NAMESPACE_KEY, KEYVAULT_TRACING_NAMESPACE_VALUE))) .map(response -> new SimpleResponse<>(response, (KeyVaultRestoreOperation) transformToLongRunningOperation(response.getValue()))) .flatMap(KeyVaultBackupAsyncClient::processRestoreOperationResponse); } catch (HttpResponseException e) { logger.logExceptionAsError(e); return Mono.just(new PollResponse<>(LongRunningOperationStatus.FAILED, null)); } }; } private static Mono<PollResponse<KeyVaultRestoreOperation>> processRestoreOperationResponse(Response<KeyVaultRestoreOperation> response) { String operationStatus = response.getValue().getStatus().toLowerCase(Locale.US); return Mono.just(new PollResponse<>( toLongRunningOperationStatus(operationStatus.toLowerCase(Locale.US)), response.getValue())); } /** * Gets a pending {@link KeyVaultRestoreOperation full or selective restore operation} from the Key Vault. * * @param jobId The operation identifier. * @throws NullPointerException if the {@code jobId} is null. * @return A {@link PollerFlux} polling on the {@link KeyVaultRestoreOperation restore operation} status. */ @ServiceMethod(returns = ReturnType.SINGLE) public PollerFlux<KeyVaultRestoreOperation, Void> getRestoreOperation(String jobId) { Objects.requireNonNull(jobId, String.format(KeyVaultErrorCodeStrings.getErrorString(KeyVaultErrorCodeStrings.PARAMETER_REQUIRED), "'jobId'")); return new PollerFlux<>(Duration.ofSeconds(1), (pollingContext) -> Mono.empty(), restoreStatusPollOperation(jobId), (pollingContext, firstResponse) -> Mono.error(new RuntimeException("Cancellation is not supported")), (pollingContext) -> Mono.empty()); } private Function<PollingContext<KeyVaultRestoreOperation>, Mono<PollResponse<KeyVaultRestoreOperation>>> restoreStatusPollOperation(String jobId) { return (pollingContext) -> withContext(context -> clientImpl.restoreStatusWithResponseAsync(vaultUrl, jobId, context.addData(AZ_TRACING_NAMESPACE_KEY, KEYVAULT_TRACING_NAMESPACE_VALUE))) .map(response -> new SimpleResponse<>(response, (KeyVaultRestoreOperation) transformToLongRunningOperation(response.getValue()))) .flatMap(KeyVaultBackupAsyncClient::processRestoreOperationResponse); } /** * Restores all versions of a given key using the supplied SAS token pointing to a previously stored Azure Blob * storage backup folder. * * @param keyName The name of the key to be restored. * @param backupFolderUrl The URL for the Blob Storage resource where the backup is located, including the path to * the blob container where the backup resides. This would be the exact value that is returned as the result of a * backup operation. An example of such a URL may look like the following: https: * @param sasToken A Shared Access Signature (SAS) token to authorize access to the blob. * @return A {@link PollerFlux} polling on the {@link KeyVaultRestoreOperation backup operation} status. * @throws NullPointerException if the {@code keyName}, {@code backupFolderUrl} or {@code sasToken} are {@code * null}. */ @ServiceMethod(returns = ReturnType.SINGLE) public PollerFlux<KeyVaultRestoreOperation, Void> beginSelectiveRestore(String keyName, String backupFolderUrl, String sasToken) { return beginSelectiveRestore(keyName, backupFolderUrl, sasToken, getDefaultPollingInterval()); } /** * Restores all versions of a given key using the supplied SAS token pointing to a previously stored Azure Blob * storage backup folder. * * @param keyName The name of the key to be restored. * @param backupFolderUrl The URL for the Blob Storage resource where the backup is located, including the path to * the blob container where the backup resides. This would be the exact value that is returned as the result of a * backup operation. An example of such a URL may look like the following: https: * @param sasToken A Shared Access Signature (SAS) token to authorize access to the blob. * @param pollingInterval The interval at which the operation status will be polled for. * @return A {@link PollerFlux} polling on the {@link KeyVaultRestoreOperation backup operation} status. * @throws NullPointerException if the {@code keyName}, {@code backupFolderUrl} or {@code sasToken} are {@code * null}. */ @ServiceMethod(returns = ReturnType.SINGLE) public PollerFlux<KeyVaultRestoreOperation, Void> beginSelectiveRestore(String keyName, String backupFolderUrl, String sasToken, Duration pollingInterval) { Objects.requireNonNull(keyName, String.format(KeyVaultErrorCodeStrings.getErrorString(KeyVaultErrorCodeStrings.PARAMETER_REQUIRED), "'keyName'")); Objects.requireNonNull(backupFolderUrl, String.format(KeyVaultErrorCodeStrings.getErrorString(KeyVaultErrorCodeStrings.PARAMETER_REQUIRED), "'backupFolderUrl'")); Objects.requireNonNull(sasToken, String.format(KeyVaultErrorCodeStrings.getErrorString(KeyVaultErrorCodeStrings.PARAMETER_REQUIRED), "'sasToken'")); return new PollerFlux<>(pollingInterval, selectiveRestoreActivationOperation(keyName, backupFolderUrl, sasToken), selectiveRestorePollOperation(), (pollingContext, firstResponse) -> Mono.empty(), (pollingContext) -> Mono.empty()); } /** * Restores all versions of a given key using the supplied SAS token pointing to a previously stored Azure Blob * storage backup folder. * * @param keyName The name of the key to be restored. * @param backupFolderUrl The URL for the Blob Storage resource where the backup is located, including the path to * the blob container where the backup resides. This would be the exact value that is returned as the result of a * backup operation. An example of such a URL may look like the following: https: * @param sasToken A Shared Access Signature (SAS) token to authorize access to the blob. * @param context Additional context that is passed through the HTTP pipeline during the service call. * @return A {@link PollerFlux} polling on the {@link KeyVaultRestoreOperation backup operation} status. * @throws KeyVaultErrorException if the operation is unsuccessful. */ Mono<Response<KeyVaultRestoreOperation>> selectiveRestoreWithResponse(String keyName, String backupFolderUrl, String sasToken, Context context) { String[] segments = backupFolderUrl.split("/"); String folderName = segments[segments.length - 1]; String containerUrl = backupFolderUrl.substring(0, backupFolderUrl.length() - folderName.length()); SASTokenParameter sasTokenParameter = new SASTokenParameter() .setStorageResourceUri(containerUrl) .setToken(sasToken); SelectiveKeyRestoreOperationParameters selectiveKeyRestoreOperationParameters = new SelectiveKeyRestoreOperationParameters() .setSasTokenParameters(sasTokenParameter) .setFolder(folderName); return clientImpl.selectiveKeyRestoreOperationWithResponseAsync(vaultUrl, keyName, selectiveKeyRestoreOperationParameters, context.addData(AZ_TRACING_NAMESPACE_KEY, KEYVAULT_TRACING_NAMESPACE_VALUE)) .doOnRequest(ignored -> logger.info("Restoring key \"{}\" from location - {}{}", keyName, containerUrl, folderName)) .doOnSuccess(response -> logger.info("Restored key \"{}\" from location - {}{}", keyName, containerUrl, folderName)) .doOnError(error -> logger.warning("Failed to restore key \"{}\" from location - {}{}", keyName, containerUrl, folderName, error)) .map(restoreOperationResponse -> new SimpleResponse<>(restoreOperationResponse.getRequest(), restoreOperationResponse.getStatusCode(), restoreOperationResponse.getHeaders(), (KeyVaultRestoreOperation) transformToLongRunningOperation(restoreOperationResponse.getValue()))); } private Function<PollingContext<KeyVaultRestoreOperation>, Mono<KeyVaultRestoreOperation>> selectiveRestoreActivationOperation(String keyName, String backupFolderUrl, String sasToken) { return (pollingContext) -> { try { return withContext(context -> selectiveRestoreWithResponse(keyName, backupFolderUrl, sasToken, context)) .flatMap(selectiveKeyRestoreResponse -> Mono.just(selectiveKeyRestoreResponse.getValue())); } catch (RuntimeException e) { return monoError(logger, e); } }; } private Function<PollingContext<KeyVaultRestoreOperation>, Mono<PollResponse<KeyVaultRestoreOperation>>> selectiveRestorePollOperation() { return (pollingContext) -> { try { PollResponse<KeyVaultRestoreOperation> pollResponse = pollingContext.getLatestResponse(); if (pollResponse.getStatus() == LongRunningOperationStatus.SUCCESSFULLY_COMPLETED || pollResponse.getStatus() == LongRunningOperationStatus.FAILED) { return Mono.just(pollResponse); } final KeyVaultRestoreOperation keyVaultRestoreOperation = pollResponse.getValue(); if (keyVaultRestoreOperation == null) { logger.warning("Restore operation does not exist. Activation operation failed."); return Mono.just(new PollResponse<KeyVaultRestoreOperation>( LongRunningOperationStatus.fromString("SELECTIVE_RESTORE_START_FAILED", true), null)); } final String jobId = keyVaultRestoreOperation.getJobId(); return withContext(context -> clientImpl.restoreStatusWithResponseAsync(vaultUrl, jobId, context.addData(AZ_TRACING_NAMESPACE_KEY, KEYVAULT_TRACING_NAMESPACE_VALUE))) .map(response -> new SimpleResponse<>(response, (KeyVaultRestoreOperation) transformToLongRunningOperation(response.getValue()))) .flatMap(KeyVaultBackupAsyncClient::processRestoreOperationResponse); } catch (HttpResponseException e) { logger.logExceptionAsError(e); return Mono.just(new PollResponse<>(LongRunningOperationStatus.FAILED, null)); } }; } private static <O> KeyVaultLongRunningOperation transformToLongRunningOperation(O operation) { if (operation instanceof RestoreOperation) { RestoreOperation restoreOperation = (RestoreOperation) operation; return new KeyVaultRestoreOperation(restoreOperation.getStatus(), restoreOperation.getStatusDetails(), createKeyVaultErrorFromError(restoreOperation.getError()), restoreOperation.getJobId(), restoreOperation.getStartTime(), restoreOperation.getEndTime()); } else if (operation instanceof SelectiveKeyRestoreOperation) { SelectiveKeyRestoreOperation selectiveKeyRestoreOperation = (SelectiveKeyRestoreOperation) operation; return new KeyVaultRestoreOperation(selectiveKeyRestoreOperation.getStatus(), selectiveKeyRestoreOperation.getStatusDetails(), createKeyVaultErrorFromError(selectiveKeyRestoreOperation.getError()), selectiveKeyRestoreOperation.getJobId(), selectiveKeyRestoreOperation.getStartTime(), selectiveKeyRestoreOperation.getEndTime()); } else if (operation instanceof FullBackupOperation) { FullBackupOperation fullBackupOperation = (FullBackupOperation) operation; return new KeyVaultBackupOperation(fullBackupOperation.getStatus(), fullBackupOperation.getStatusDetails(), createKeyVaultErrorFromError(fullBackupOperation.getError()), fullBackupOperation.getJobId(), fullBackupOperation.getStartTime(), fullBackupOperation.getEndTime(), fullBackupOperation.getAzureStorageBlobContainerUri()); } else { throw new UnsupportedOperationException("Operation type not supported"); } } private static KeyVaultError createKeyVaultErrorFromError(Error error) { if (error == null) { return null; } return new KeyVaultError(error.getCode(), error.getMessage(), createKeyVaultErrorFromError(error.getInnerError())); } }
class KeyVaultBackupAsyncClient { private static final String KEYVAULT_TRACING_NAMESPACE_VALUE = "Microsoft.KeyVault"; private static final Duration DEFAULT_POLLING_INTERVAL = Duration.ofSeconds(1); /** * The logger to be used. */ private final ClientLogger logger = new ClientLogger(KeyVaultBackupAsyncClient.class); /** * The underlying AutoRest client used to interact with the Key Vault service. */ private final KeyVaultBackupClientImpl clientImpl; /** * The Kay Vault URL this client is associated to. */ private final String vaultUrl; Duration getDefaultPollingInterval() { return DEFAULT_POLLING_INTERVAL; } /** * Package private constructor to be used by {@link KeyVaultBackupClientBuilder}. */ KeyVaultBackupAsyncClient(URL vaultUrl, HttpPipeline httpPipeline) { Objects.requireNonNull(vaultUrl, KeyVaultErrorCodeStrings.getErrorString(KeyVaultErrorCodeStrings.VAULT_END_POINT_REQUIRED)); this.vaultUrl = vaultUrl.toString(); clientImpl = new KeyVaultBackupClientImplBuilder() .pipeline(httpPipeline) .buildClient(); } /** * Gets the URL for the Key Vault this client is associated with. * * @return The Key Vault URL. */ public String getVaultUrl() { return this.vaultUrl; } /** * Initiates a full backup of the Key Vault. * * @param blobStorageUrl The URL for the Blob Storage resource where the backup will be located. * @param sasToken A Shared Access Signature (SAS) token to authorize access to the blob. * @return A {@link PollerFlux} polling on the {@link KeyVaultBackupOperation backup operation} status. * @throws NullPointerException if the {@code blobStorageUrl} or {@code sasToken} are {@code null}. */ @ServiceMethod(returns = ReturnType.SINGLE) public PollerFlux<KeyVaultBackupOperation, String> beginBackup(String blobStorageUrl, String sasToken) { return beginBackup(blobStorageUrl, sasToken, getDefaultPollingInterval()); } /** * Initiates a full backup of the Key Vault. * * @param blobStorageUrl The URL for the Blob Storage resource where the backup will be located. * @param sasToken A Shared Access Signature (SAS) token to authorize access to the blob. * @param pollingInterval The interval at which the operation status will be polled for. * @return A {@link PollerFlux} polling on the {@link KeyVaultBackupOperation backup operation} status. * @throws NullPointerException if the {@code blobStorageUrl} or {@code sasToken} are {@code null}. */ @ServiceMethod(returns = ReturnType.SINGLE) public PollerFlux<KeyVaultBackupOperation, String> beginBackup(String blobStorageUrl, String sasToken, Duration pollingInterval) { Objects.requireNonNull(blobStorageUrl, String.format(KeyVaultErrorCodeStrings.getErrorString(KeyVaultErrorCodeStrings.PARAMETER_REQUIRED), "'blobStorageUrl'")); Objects.requireNonNull(sasToken, String.format(KeyVaultErrorCodeStrings.getErrorString(KeyVaultErrorCodeStrings.PARAMETER_REQUIRED), "'sasToken'")); return new PollerFlux<>(pollingInterval, backupActivationOperation(blobStorageUrl, sasToken), backupPollOperation(), (pollingContext, firstResponse) -> Mono.error(new RuntimeException("Cancellation is not supported")), backupFetchOperation()); } /** * Initiates a full backup of the Key Vault. * * @param blobStorageUrl The URL for the Blob Storage resource where the backup will be located. * @param sasToken A Shared Access Signature (SAS) token to authorize access to the blob. * @param context Additional context that is passed through the HTTP pipeline during the service call. * @return A {@link PollerFlux} polling on the {@link KeyVaultBackupOperation backup operation} status. * @throws KeyVaultErrorException if the operation is unsuccessful. */ Mono<Response<KeyVaultBackupOperation>> backupWithResponse(String blobStorageUrl, String sasToken, Context context) { SASTokenParameter sasTokenParameter = ", blobStorageUrl)) .doOnSuccess(response -> logger.info("Backed up at URL - {}", response.getValue().getAzureStorageBlobContainerUri())) .doOnError(error -> logger.warning("Failed to backup at URL - {}", blobStorageUrl, error)) .map(backupOperationResponse -> new SimpleResponse<>(backupOperationResponse.getRequest(), backupOperationResponse.getStatusCode(), backupOperationResponse.getHeaders(), (KeyVaultBackupOperation) transformToLongRunningOperation(backupOperationResponse.getValue()))); } private Function<PollingContext<KeyVaultBackupOperation>, Mono<KeyVaultBackupOperation>> backupActivationOperation(String blobStorageUrl, String sasToken) { return (pollingContext) -> { try { return withContext(context -> backupWithResponse(blobStorageUrl, sasToken, context)) .flatMap(backupResponse -> Mono.just(backupResponse.getValue())); } catch (RuntimeException e) { return monoError(logger, e); } }; } private Function<PollingContext<KeyVaultBackupOperation>, Mono<PollResponse<KeyVaultBackupOperation>>> backupPollOperation() { return (pollingContext) -> { try { PollResponse<KeyVaultBackupOperation> pollResponse = pollingContext.getLatestResponse(); if (pollResponse.getStatus() == LongRunningOperationStatus.SUCCESSFULLY_COMPLETED || pollResponse.getStatus() == LongRunningOperationStatus.FAILED) { return Mono.just(pollResponse); } final KeyVaultBackupOperation keyVaultBackupOperation = pollResponse.getValue(); if (keyVaultBackupOperation == null) { logger.warning("Backup operation does not exist. Activation operation failed."); return Mono.just(new PollResponse<KeyVaultBackupOperation>( LongRunningOperationStatus.fromString("BACKUP_START_FAILED", true), null)); } final String jobId = keyVaultBackupOperation.getJobId(); return withContext(context -> clientImpl.fullBackupStatusWithResponseAsync(vaultUrl, jobId, context.addData(AZ_TRACING_NAMESPACE_KEY, KEYVAULT_TRACING_NAMESPACE_VALUE))) .map(response -> new SimpleResponse<>(response, (KeyVaultBackupOperation) transformToLongRunningOperation(response.getValue()))) .flatMap(KeyVaultBackupAsyncClient::processBackupOperationResponse); } catch (HttpResponseException e) { logger.logExceptionAsError(e); return Mono.just(new PollResponse<>(LongRunningOperationStatus.FAILED, null)); } }; } private Function<PollingContext<KeyVaultBackupOperation>, Mono<String>> backupFetchOperation() { return (pollingContext) -> { String blobContainerUri = pollingContext.getLatestResponse().getValue().getAzureStorageBlobContainerUri(); if (blobContainerUri == null) { return Mono.empty(); } else { return Mono.just(blobContainerUri); } }; } private static Mono<PollResponse<KeyVaultBackupOperation>> processBackupOperationResponse(Response<KeyVaultBackupOperation> response) { String operationStatus = response.getValue().getStatus().toLowerCase(Locale.US); return Mono.just(new PollResponse<>( toLongRunningOperationStatus(operationStatus.toLowerCase(Locale.US)), response.getValue())); } private static LongRunningOperationStatus toLongRunningOperationStatus(String operationStatus) { switch (operationStatus) { case "inprogress": return LongRunningOperationStatus.IN_PROGRESS; case "succeeded": return LongRunningOperationStatus.SUCCESSFULLY_COMPLETED; case "failed": return LongRunningOperationStatus.FAILED; default: return LongRunningOperationStatus.fromString("POLLING_FAILED", true); } } /** * Gets a pending {@link KeyVaultBackupOperation backup operation} from the Key Vault. * * @param jobId The operation identifier. * @throws NullPointerException if the {@code jobId} is null. * @return A {@link PollerFlux} polling on the {@link KeyVaultRestoreOperation backup operation} status. */ @ServiceMethod(returns = ReturnType.SINGLE) public PollerFlux<KeyVaultBackupOperation, String> getBackupOperation(String jobId) { Objects.requireNonNull(jobId, String.format(KeyVaultErrorCodeStrings.getErrorString(KeyVaultErrorCodeStrings.PARAMETER_REQUIRED), "'jobId'")); return new PollerFlux<>(Duration.ofSeconds(1), (pollingContext) -> Mono.empty(), backupStatusPollOperation(jobId), (pollingContext, firstResponse) -> Mono.error(new RuntimeException("Cancellation is not supported")), backupFetchOperation()); } private Function<PollingContext<KeyVaultBackupOperation>, Mono<PollResponse<KeyVaultBackupOperation>>> backupStatusPollOperation(String jobId) { return (pollingContext) -> withContext(context -> clientImpl.fullBackupStatusWithResponseAsync(vaultUrl, jobId, context.addData(AZ_TRACING_NAMESPACE_KEY, KEYVAULT_TRACING_NAMESPACE_VALUE))) .map(response -> new SimpleResponse<>(response, (KeyVaultBackupOperation) transformToLongRunningOperation(response.getValue()))) .flatMap(KeyVaultBackupAsyncClient::processBackupOperationResponse); } /** * Initiates a full restore of the Key Vault. * * @param backupFolderUrl The URL for the Blob Storage resource where the backup is located, including the path to * the blob container where the backup resides. This would be the exact value that is returned as the result of a * backup operation. An example of such a URL may look like the following: https: * @param sasToken A Shared Access Signature (SAS) token to authorize access to the blob. * @return A {@link PollerFlux} polling on the {@link KeyVaultRestoreOperation backup operation} status. * @throws NullPointerException if the {@code backupFolderUrl} or {@code sasToken} are {@code null}. */ @ServiceMethod(returns = ReturnType.SINGLE) public PollerFlux<KeyVaultRestoreOperation, Void> beginRestore(String backupFolderUrl, String sasToken) { return beginRestore(backupFolderUrl, sasToken, getDefaultPollingInterval()); } /** * Initiates a full restore of the Key Vault. * * @param backupFolderUrl The URL for the Blob Storage resource where the backup is located, including the path to * the blob container where the backup resides. This would be the exact value that is returned as the result of a * backup operation. An example of such a URL may look like the following: https: * @param sasToken A Shared Access Signature (SAS) token to authorize access to the blob. * @param pollingInterval The interval at which the operation status will be polled for. * @return A {@link PollerFlux} polling on the {@link KeyVaultRestoreOperation backup operation} status. * @throws NullPointerException if the {@code backupFolderUrl} or {@code sasToken} are {@code null}. */ @ServiceMethod(returns = ReturnType.SINGLE) public PollerFlux<KeyVaultRestoreOperation, Void> beginRestore(String backupFolderUrl, String sasToken, Duration pollingInterval) { Objects.requireNonNull(backupFolderUrl, String.format(KeyVaultErrorCodeStrings.getErrorString(KeyVaultErrorCodeStrings.PARAMETER_REQUIRED), "'backupFolderUrl'")); Objects.requireNonNull(sasToken, String.format(KeyVaultErrorCodeStrings.getErrorString(KeyVaultErrorCodeStrings.PARAMETER_REQUIRED), "'sasToken'")); return new PollerFlux<>(pollingInterval, restoreActivationOperation(backupFolderUrl, sasToken), restorePollOperation(), (pollingContext, firstResponse) -> Mono.empty(), (pollingContext) -> Mono.empty()); } /** * Initiates a full restore of the Key Vault. * * @param backupFolderUrl The URL for the Blob Storage resource where the backup is located, including the path to * the blob container where the backup resides. This would be the exact value that is returned as the result of a * backup operation. An example of such a URL may look like the following: https: * @param sasToken A Shared Access Signature (SAS) token to authorize access to the blob. * @param context Additional context that is passed through the HTTP pipeline during the service call. * @return A {@link PollerFlux} polling on the {@link KeyVaultRestoreOperation backup operation} status. * @throws KeyVaultErrorException if the operation is unsuccessful. */ Mono<Response<KeyVaultRestoreOperation>> restoreWithResponse(String backupFolderUrl, String sasToken, Context context) { String[] segments = backupFolderUrl.split("/"); String folderName = segments[segments.length - 1]; String containerUrl = backupFolderUrl.substring(0, backupFolderUrl.length() - folderName.length()); SASTokenParameter sasTokenParameter = new SASTokenParameter() .setStorageResourceUri(containerUrl) .setToken(sasToken); RestoreOperationParameters restoreOperationParameters = new RestoreOperationParameters() .setSasTokenParameters(sasTokenParameter) .setFolderToRestore(folderName); return clientImpl.fullRestoreOperationWithResponseAsync(vaultUrl, restoreOperationParameters, context.addData(AZ_TRACING_NAMESPACE_KEY, KEYVAULT_TRACING_NAMESPACE_VALUE)) .doOnRequest(ignored -> logger.info("Restoring from location - {}", backupFolderUrl)) .doOnSuccess(response -> logger.info("Restored from location - {}", backupFolderUrl)) .doOnError(error -> logger.warning("Failed to restore from location - {}", backupFolderUrl, error)) .map(restoreOperationResponse -> new SimpleResponse<>(restoreOperationResponse.getRequest(), restoreOperationResponse.getStatusCode(), restoreOperationResponse.getHeaders(), (KeyVaultRestoreOperation) transformToLongRunningOperation(restoreOperationResponse.getValue()))); } private Function<PollingContext<KeyVaultRestoreOperation>, Mono<KeyVaultRestoreOperation>> restoreActivationOperation(String backupFolderUrl, String sasToken) { return (pollingContext) -> { try { return withContext(context -> restoreWithResponse(backupFolderUrl, sasToken, context)) .flatMap(restoreResponse -> Mono.just(restoreResponse.getValue())); } catch (RuntimeException e) { return monoError(logger, e); } }; } private Function<PollingContext<KeyVaultRestoreOperation>, Mono<PollResponse<KeyVaultRestoreOperation>>> restorePollOperation() { return (pollingContext) -> { try { PollResponse<KeyVaultRestoreOperation> pollResponse = pollingContext.getLatestResponse(); if (pollResponse.getStatus() == LongRunningOperationStatus.SUCCESSFULLY_COMPLETED || pollResponse.getStatus() == LongRunningOperationStatus.FAILED) { return Mono.just(pollResponse); } final KeyVaultRestoreOperation keyVaultRestoreOperation = pollResponse.getValue(); if (keyVaultRestoreOperation == null) { logger.warning("Restore operation does not exist. Activation operation failed."); return Mono.just(new PollResponse<KeyVaultRestoreOperation>( LongRunningOperationStatus.fromString("RESTORE_START_FAILED", true), null)); } final String jobId = keyVaultRestoreOperation.getJobId(); return withContext(context -> clientImpl.restoreStatusWithResponseAsync(vaultUrl, jobId, context.addData(AZ_TRACING_NAMESPACE_KEY, KEYVAULT_TRACING_NAMESPACE_VALUE))) .map(response -> new SimpleResponse<>(response, (KeyVaultRestoreOperation) transformToLongRunningOperation(response.getValue()))) .flatMap(KeyVaultBackupAsyncClient::processRestoreOperationResponse); } catch (HttpResponseException e) { logger.logExceptionAsError(e); return Mono.just(new PollResponse<>(LongRunningOperationStatus.FAILED, null)); } }; } private static Mono<PollResponse<KeyVaultRestoreOperation>> processRestoreOperationResponse(Response<KeyVaultRestoreOperation> response) { String operationStatus = response.getValue().getStatus().toLowerCase(Locale.US); return Mono.just(new PollResponse<>( toLongRunningOperationStatus(operationStatus.toLowerCase(Locale.US)), response.getValue())); } /** * Gets a pending {@link KeyVaultRestoreOperation full or selective restore operation} from the Key Vault. * * @param jobId The operation identifier. * @throws NullPointerException if the {@code jobId} is null. * @return A {@link PollerFlux} polling on the {@link KeyVaultRestoreOperation restore operation} status. */ @ServiceMethod(returns = ReturnType.SINGLE) public PollerFlux<KeyVaultRestoreOperation, Void> getRestoreOperation(String jobId) { Objects.requireNonNull(jobId, String.format(KeyVaultErrorCodeStrings.getErrorString(KeyVaultErrorCodeStrings.PARAMETER_REQUIRED), "'jobId'")); return new PollerFlux<>(Duration.ofSeconds(1), (pollingContext) -> Mono.empty(), restoreStatusPollOperation(jobId), (pollingContext, firstResponse) -> Mono.error(new RuntimeException("Cancellation is not supported")), (pollingContext) -> Mono.empty()); } private Function<PollingContext<KeyVaultRestoreOperation>, Mono<PollResponse<KeyVaultRestoreOperation>>> restoreStatusPollOperation(String jobId) { return (pollingContext) -> withContext(context -> clientImpl.restoreStatusWithResponseAsync(vaultUrl, jobId, context.addData(AZ_TRACING_NAMESPACE_KEY, KEYVAULT_TRACING_NAMESPACE_VALUE))) .map(response -> new SimpleResponse<>(response, (KeyVaultRestoreOperation) transformToLongRunningOperation(response.getValue()))) .flatMap(KeyVaultBackupAsyncClient::processRestoreOperationResponse); } /** * Restores all versions of a given key using the supplied SAS token pointing to a previously stored Azure Blob * storage backup folder. * * @param keyName The name of the key to be restored. * @param backupFolderUrl The URL for the Blob Storage resource where the backup is located, including the path to * the blob container where the backup resides. This would be the exact value that is returned as the result of a * backup operation. An example of such a URL may look like the following: https: * @param sasToken A Shared Access Signature (SAS) token to authorize access to the blob. * @return A {@link PollerFlux} polling on the {@link KeyVaultRestoreOperation backup operation} status. * @throws NullPointerException if the {@code keyName}, {@code backupFolderUrl} or {@code sasToken} are {@code * null}. */ @ServiceMethod(returns = ReturnType.SINGLE) public PollerFlux<KeyVaultRestoreOperation, Void> beginSelectiveRestore(String keyName, String backupFolderUrl, String sasToken) { return beginSelectiveRestore(keyName, backupFolderUrl, sasToken, getDefaultPollingInterval()); } /** * Restores all versions of a given key using the supplied SAS token pointing to a previously stored Azure Blob * storage backup folder. * * @param keyName The name of the key to be restored. * @param backupFolderUrl The URL for the Blob Storage resource where the backup is located, including the path to * the blob container where the backup resides. This would be the exact value that is returned as the result of a * backup operation. An example of such a URL may look like the following: https: * @param sasToken A Shared Access Signature (SAS) token to authorize access to the blob. * @param pollingInterval The interval at which the operation status will be polled for. * @return A {@link PollerFlux} polling on the {@link KeyVaultRestoreOperation backup operation} status. * @throws NullPointerException if the {@code keyName}, {@code backupFolderUrl} or {@code sasToken} are {@code * null}. */ @ServiceMethod(returns = ReturnType.SINGLE) public PollerFlux<KeyVaultRestoreOperation, Void> beginSelectiveRestore(String keyName, String backupFolderUrl, String sasToken, Duration pollingInterval) { Objects.requireNonNull(keyName, String.format(KeyVaultErrorCodeStrings.getErrorString(KeyVaultErrorCodeStrings.PARAMETER_REQUIRED), "'keyName'")); Objects.requireNonNull(backupFolderUrl, String.format(KeyVaultErrorCodeStrings.getErrorString(KeyVaultErrorCodeStrings.PARAMETER_REQUIRED), "'backupFolderUrl'")); Objects.requireNonNull(sasToken, String.format(KeyVaultErrorCodeStrings.getErrorString(KeyVaultErrorCodeStrings.PARAMETER_REQUIRED), "'sasToken'")); return new PollerFlux<>(pollingInterval, selectiveRestoreActivationOperation(keyName, backupFolderUrl, sasToken), selectiveRestorePollOperation(), (pollingContext, firstResponse) -> Mono.empty(), (pollingContext) -> Mono.empty()); } /** * Restores all versions of a given key using the supplied SAS token pointing to a previously stored Azure Blob * storage backup folder. * * @param keyName The name of the key to be restored. * @param backupFolderUrl The URL for the Blob Storage resource where the backup is located, including the path to * the blob container where the backup resides. This would be the exact value that is returned as the result of a * backup operation. An example of such a URL may look like the following: https: * @param sasToken A Shared Access Signature (SAS) token to authorize access to the blob. * @param context Additional context that is passed through the HTTP pipeline during the service call. * @return A {@link PollerFlux} polling on the {@link KeyVaultRestoreOperation backup operation} status. * @throws KeyVaultErrorException if the operation is unsuccessful. */ Mono<Response<KeyVaultRestoreOperation>> selectiveRestoreWithResponse(String keyName, String backupFolderUrl, String sasToken, Context context) { String[] segments = backupFolderUrl.split("/"); String folderName = segments[segments.length - 1]; String containerUrl = backupFolderUrl.substring(0, backupFolderUrl.length() - folderName.length()); SASTokenParameter sasTokenParameter = new SASTokenParameter() .setStorageResourceUri(containerUrl) .setToken(sasToken); SelectiveKeyRestoreOperationParameters selectiveKeyRestoreOperationParameters = new SelectiveKeyRestoreOperationParameters() .setSasTokenParameters(sasTokenParameter) .setFolder(folderName); return clientImpl.selectiveKeyRestoreOperationWithResponseAsync(vaultUrl, keyName, selectiveKeyRestoreOperationParameters, context.addData(AZ_TRACING_NAMESPACE_KEY, KEYVAULT_TRACING_NAMESPACE_VALUE)) .doOnRequest(ignored -> logger.info("Restoring key \"{}\" from location - {}", keyName, backupFolderUrl)) .doOnSuccess(response -> logger.info("Restored key \"{}\" from location - {}", keyName, backupFolderUrl)) .doOnError(error -> logger.warning("Failed to restore key \"{}\" from location - {}", keyName, backupFolderUrl, error)) .map(restoreOperationResponse -> new SimpleResponse<>(restoreOperationResponse.getRequest(), restoreOperationResponse.getStatusCode(), restoreOperationResponse.getHeaders(), (KeyVaultRestoreOperation) transformToLongRunningOperation(restoreOperationResponse.getValue()))); } private Function<PollingContext<KeyVaultRestoreOperation>, Mono<KeyVaultRestoreOperation>> selectiveRestoreActivationOperation(String keyName, String backupFolderUrl, String sasToken) { return (pollingContext) -> { try { return withContext(context -> selectiveRestoreWithResponse(keyName, backupFolderUrl, sasToken, context)) .flatMap(selectiveKeyRestoreResponse -> Mono.just(selectiveKeyRestoreResponse.getValue())); } catch (RuntimeException e) { return monoError(logger, e); } }; } private Function<PollingContext<KeyVaultRestoreOperation>, Mono<PollResponse<KeyVaultRestoreOperation>>> selectiveRestorePollOperation() { return (pollingContext) -> { try { PollResponse<KeyVaultRestoreOperation> pollResponse = pollingContext.getLatestResponse(); if (pollResponse.getStatus() == LongRunningOperationStatus.SUCCESSFULLY_COMPLETED || pollResponse.getStatus() == LongRunningOperationStatus.FAILED) { return Mono.just(pollResponse); } final KeyVaultRestoreOperation keyVaultRestoreOperation = pollResponse.getValue(); if (keyVaultRestoreOperation == null) { logger.warning("Restore operation does not exist. Activation operation failed."); return Mono.just(new PollResponse<KeyVaultRestoreOperation>( LongRunningOperationStatus.fromString("SELECTIVE_RESTORE_START_FAILED", true), null)); } final String jobId = keyVaultRestoreOperation.getJobId(); return withContext(context -> clientImpl.restoreStatusWithResponseAsync(vaultUrl, jobId, context.addData(AZ_TRACING_NAMESPACE_KEY, KEYVAULT_TRACING_NAMESPACE_VALUE))) .map(response -> new SimpleResponse<>(response, (KeyVaultRestoreOperation) transformToLongRunningOperation(response.getValue()))) .flatMap(KeyVaultBackupAsyncClient::processRestoreOperationResponse); } catch (HttpResponseException e) { logger.logExceptionAsError(e); return Mono.just(new PollResponse<>(LongRunningOperationStatus.FAILED, null)); } }; } private static <O> KeyVaultLongRunningOperation transformToLongRunningOperation(O operation) { if (operation instanceof RestoreOperation) { RestoreOperation restoreOperation = (RestoreOperation) operation; return new KeyVaultRestoreOperation(restoreOperation.getStatus(), restoreOperation.getStatusDetails(), createKeyVaultErrorFromError(restoreOperation.getError()), restoreOperation.getJobId(), restoreOperation.getStartTime(), restoreOperation.getEndTime()); } else if (operation instanceof SelectiveKeyRestoreOperation) { SelectiveKeyRestoreOperation selectiveKeyRestoreOperation = (SelectiveKeyRestoreOperation) operation; return new KeyVaultRestoreOperation(selectiveKeyRestoreOperation.getStatus(), selectiveKeyRestoreOperation.getStatusDetails(), createKeyVaultErrorFromError(selectiveKeyRestoreOperation.getError()), selectiveKeyRestoreOperation.getJobId(), selectiveKeyRestoreOperation.getStartTime(), selectiveKeyRestoreOperation.getEndTime()); } else if (operation instanceof FullBackupOperation) { FullBackupOperation fullBackupOperation = (FullBackupOperation) operation; return new KeyVaultBackupOperation(fullBackupOperation.getStatus(), fullBackupOperation.getStatusDetails(), createKeyVaultErrorFromError(fullBackupOperation.getError()), fullBackupOperation.getJobId(), fullBackupOperation.getStartTime(), fullBackupOperation.getEndTime(), fullBackupOperation.getAzureStorageBlobContainerUri()); } else { throw new UnsupportedOperationException(); } } private static KeyVaultError createKeyVaultErrorFromError(Error error) { if (error == null) { return null; } return new KeyVaultError(error.getCode(), error.getMessage(), createKeyVaultErrorFromError(error.getInnerError())); } }
same comments as above.
Mono<Response<KeyVaultRestoreOperation>> selectiveRestoreWithResponse(String keyName, String backupFolderUrl, String sasToken, Context context) { String[] segments = backupFolderUrl.split("/"); String folderName = segments[segments.length - 1]; String containerUrl = backupFolderUrl.substring(0, backupFolderUrl.length() - folderName.length()); SASTokenParameter sasTokenParameter = new SASTokenParameter() .setStorageResourceUri(containerUrl) .setToken(sasToken); SelectiveKeyRestoreOperationParameters selectiveKeyRestoreOperationParameters = new SelectiveKeyRestoreOperationParameters() .setSasTokenParameters(sasTokenParameter) .setFolder(folderName); return clientImpl.selectiveKeyRestoreOperationWithResponseAsync(vaultUrl, keyName, selectiveKeyRestoreOperationParameters, context.addData(AZ_TRACING_NAMESPACE_KEY, KEYVAULT_TRACING_NAMESPACE_VALUE)) .doOnRequest(ignored -> logger.info("Restoring key \"{}\" from location - {}{}", keyName, containerUrl, folderName)) .doOnSuccess(response -> logger.info("Restored key \"{}\" from location - {}{}", keyName, containerUrl, folderName)) .doOnError(error -> logger.warning("Failed to restore key \"{}\" from location - {}{}", keyName, containerUrl, folderName, error)) .map(restoreOperationResponse -> new SimpleResponse<>(restoreOperationResponse.getRequest(), restoreOperationResponse.getStatusCode(), restoreOperationResponse.getHeaders(), (KeyVaultRestoreOperation) transformToLongRunningOperation(restoreOperationResponse.getValue()))); }
error))
new SASTokenParameter() .setStorageResourceUri(blobStorageUrl) .setToken(sasToken); return clientImpl.fullBackupWithResponseAsync(vaultUrl, sasTokenParameter, context.addData(AZ_TRACING_NAMESPACE_KEY, KEYVAULT_TRACING_NAMESPACE_VALUE)) .doOnRequest(ignored -> logger.info("Backing up at URL - {}
class KeyVaultBackupAsyncClient { private static final String KEYVAULT_TRACING_NAMESPACE_VALUE = "Microsoft.KeyVault"; private static final Duration DEFAULT_POLLING_INTERVAL = Duration.ofSeconds(1); /** * The logger to be used. */ private final ClientLogger logger = new ClientLogger(KeyVaultBackupAsyncClient.class); /** * The underlying AutoRest client used to interact with the Key Vault service. */ private final KeyVaultBackupClientImpl clientImpl; /** * The Kay Vault URL this client is associated to. */ private final String vaultUrl; Duration getDefaultPollingInterval() { return DEFAULT_POLLING_INTERVAL; } /** * Package private constructor to be used by {@link KeyVaultBackupClientBuilder}. */ KeyVaultBackupAsyncClient(URL vaultUrl, HttpPipeline httpPipeline) { Objects.requireNonNull(vaultUrl, KeyVaultErrorCodeStrings.getErrorString(KeyVaultErrorCodeStrings.VAULT_END_POINT_REQUIRED)); this.vaultUrl = vaultUrl.toString(); clientImpl = new KeyVaultBackupClientImplBuilder() .pipeline(httpPipeline) .buildClient(); } /** * Gets the URL for the Key Vault this client is associated with. * * @return The Key Vault URL. */ public String getVaultUrl() { return this.vaultUrl; } /** * Initiates a full backup of the Key Vault. * * @param blobStorageUrl The URL for the Blob Storage resource where the backup will be located. * @param sasToken A Shared Access Signature (SAS) token to authorize access to the blob. * @return A {@link PollerFlux} polling on the {@link KeyVaultBackupOperation backup operation} status. * @throws NullPointerException if the {@code blobStorageUrl} or {@code sasToken} are {@code null}. */ @ServiceMethod(returns = ReturnType.SINGLE) public PollerFlux<KeyVaultBackupOperation, String> beginBackup(String blobStorageUrl, String sasToken) { return beginBackup(blobStorageUrl, sasToken, getDefaultPollingInterval()); } /** * Initiates a full backup of the Key Vault. * * @param blobStorageUrl The URL for the Blob Storage resource where the backup will be located. * @param sasToken A Shared Access Signature (SAS) token to authorize access to the blob. * @param pollingInterval The interval at which the operation status will be polled for. * @return A {@link PollerFlux} polling on the {@link KeyVaultBackupOperation backup operation} status. * @throws NullPointerException if the {@code blobStorageUrl} or {@code sasToken} are {@code null}. */ @ServiceMethod(returns = ReturnType.SINGLE) public PollerFlux<KeyVaultBackupOperation, String> beginBackup(String blobStorageUrl, String sasToken, Duration pollingInterval) { Objects.requireNonNull(blobStorageUrl, String.format(KeyVaultErrorCodeStrings.getErrorString(KeyVaultErrorCodeStrings.PARAMETER_REQUIRED), "'blobStorageUrl'")); Objects.requireNonNull(sasToken, String.format(KeyVaultErrorCodeStrings.getErrorString(KeyVaultErrorCodeStrings.PARAMETER_REQUIRED), "'sasToken'")); return new PollerFlux<>(pollingInterval, backupActivationOperation(blobStorageUrl, sasToken), backupPollOperation(), (pollingContext, firstResponse) -> Mono.error(new RuntimeException("Cancellation is not supported")), backupFetchOperation()); } /** * Initiates a full backup of the Key Vault. * * @param blobStorageUrl The URL for the Blob Storage resource where the backup will be located. * @param sasToken A Shared Access Signature (SAS) token to authorize access to the blob. * @param context Additional context that is passed through the HTTP pipeline during the service call. * @return A {@link PollerFlux} polling on the {@link KeyVaultBackupOperation backup operation} status. * @throws KeyVaultErrorException if the operation is unsuccessful. */ Mono<Response<KeyVaultBackupOperation>> backupWithResponse(String blobStorageUrl, String sasToken, Context context) { SASTokenParameter sasTokenParameter = ", blobStorageUrl)) .doOnSuccess(response -> logger.info("Backed up at URL - {}", response.getValue().getAzureStorageBlobContainerUri())) .doOnError(error -> logger.warning("Failed to backup at URL - {}", blobStorageUrl, error)) .map(backupOperationResponse -> new SimpleResponse<>(backupOperationResponse.getRequest(), backupOperationResponse.getStatusCode(), backupOperationResponse.getHeaders(), (KeyVaultBackupOperation) transformToLongRunningOperation(backupOperationResponse.getValue()))); } private Function<PollingContext<KeyVaultBackupOperation>, Mono<KeyVaultBackupOperation>> backupActivationOperation(String blobStorageUrl, String sasToken) { return (pollingContext) -> { try { return withContext(context -> backupWithResponse(blobStorageUrl, sasToken, context)) .flatMap(backupResponse -> Mono.just(backupResponse.getValue())); } catch (RuntimeException e) { return monoError(logger, e); } }; } private Function<PollingContext<KeyVaultBackupOperation>, Mono<PollResponse<KeyVaultBackupOperation>>> backupPollOperation() { return (pollingContext) -> { try { PollResponse<KeyVaultBackupOperation> pollResponse = pollingContext.getLatestResponse(); if (pollResponse.getStatus() == LongRunningOperationStatus.SUCCESSFULLY_COMPLETED || pollResponse.getStatus() == LongRunningOperationStatus.FAILED) { return Mono.just(pollResponse); } final KeyVaultBackupOperation keyVaultBackupOperation = pollResponse.getValue(); if (keyVaultBackupOperation == null) { logger.warning("Backup operation does not exist. Activation operation failed."); return Mono.just(new PollResponse<KeyVaultBackupOperation>( LongRunningOperationStatus.fromString("BACKUP_START_FAILED", true), null)); } final String jobId = keyVaultBackupOperation.getJobId(); return withContext(context -> clientImpl.fullBackupStatusWithResponseAsync(vaultUrl, jobId, context.addData(AZ_TRACING_NAMESPACE_KEY, KEYVAULT_TRACING_NAMESPACE_VALUE))) .map(response -> new SimpleResponse<>(response, (KeyVaultBackupOperation) transformToLongRunningOperation(response.getValue()))) .flatMap(KeyVaultBackupAsyncClient::processBackupOperationResponse); } catch (HttpResponseException e) { logger.logExceptionAsError(e); return Mono.just(new PollResponse<>(LongRunningOperationStatus.FAILED, null)); } }; } private Function<PollingContext<KeyVaultBackupOperation>, Mono<String>> backupFetchOperation() { return (pollingContext) -> { String blobContainerUri = pollingContext.getLatestResponse().getValue().getAzureStorageBlobContainerUri(); if (blobContainerUri == null) { return Mono.empty(); } else { return Mono.just(blobContainerUri); } }; } private static Mono<PollResponse<KeyVaultBackupOperation>> processBackupOperationResponse(Response<KeyVaultBackupOperation> response) { String operationStatus = response.getValue().getStatus().toLowerCase(Locale.US); return Mono.just(new PollResponse<>( toLongRunningOperationStatus(operationStatus.toLowerCase(Locale.US)), response.getValue())); } private static LongRunningOperationStatus toLongRunningOperationStatus(String operationStatus) { switch (operationStatus) { case "inprogress": return LongRunningOperationStatus.IN_PROGRESS; case "succeeded": return LongRunningOperationStatus.SUCCESSFULLY_COMPLETED; case "failed": return LongRunningOperationStatus.FAILED; default: return LongRunningOperationStatus.fromString("POLLING_FAILED", true); } } /** * Gets a pending {@link KeyVaultBackupOperation backup operation} from the Key Vault. * * @param jobId The operation identifier. * @throws NullPointerException if the {@code jobId} is null. * @return A {@link PollerFlux} polling on the {@link KeyVaultRestoreOperation backup operation} status. */ @ServiceMethod(returns = ReturnType.SINGLE) public PollerFlux<KeyVaultBackupOperation, String> getBackupOperation(String jobId) { Objects.requireNonNull(jobId, String.format(KeyVaultErrorCodeStrings.getErrorString(KeyVaultErrorCodeStrings.PARAMETER_REQUIRED), "'jobId'")); return new PollerFlux<>(Duration.ofSeconds(1), (pollingContext) -> Mono.empty(), backupStatusPollOperation(jobId), (pollingContext, firstResponse) -> Mono.error(new RuntimeException("Cancellation is not supported")), backupFetchOperation()); } private Function<PollingContext<KeyVaultBackupOperation>, Mono<PollResponse<KeyVaultBackupOperation>>> backupStatusPollOperation(String jobId) { return (pollingContext) -> withContext(context -> clientImpl.fullBackupStatusWithResponseAsync(vaultUrl, jobId, context.addData(AZ_TRACING_NAMESPACE_KEY, KEYVAULT_TRACING_NAMESPACE_VALUE))) .map(response -> new SimpleResponse<>(response, (KeyVaultBackupOperation) transformToLongRunningOperation(response.getValue()))) .flatMap(KeyVaultBackupAsyncClient::processBackupOperationResponse); } /** * Initiates a full restore of the Key Vault. * * @param backupFolderUrl The URL for the Blob Storage resource where the backup is located, including the path to * the blob container where the backup resides. This would be the exact value that is returned as the result of a * backup operation. An example of such a URL may look like the following: https: * @param sasToken A Shared Access Signature (SAS) token to authorize access to the blob. * @return A {@link PollerFlux} polling on the {@link KeyVaultRestoreOperation backup operation} status. * @throws NullPointerException if the {@code backupFolderUrl} or {@code sasToken} are {@code null}. */ @ServiceMethod(returns = ReturnType.SINGLE) public PollerFlux<KeyVaultRestoreOperation, Void> beginRestore(String backupFolderUrl, String sasToken) { return beginRestore(backupFolderUrl, sasToken, getDefaultPollingInterval()); } /** * Initiates a full restore of the Key Vault. * * @param backupFolderUrl The URL for the Blob Storage resource where the backup is located, including the path to * the blob container where the backup resides. This would be the exact value that is returned as the result of a * backup operation. An example of such a URL may look like the following: https: * @param sasToken A Shared Access Signature (SAS) token to authorize access to the blob. * @param pollingInterval The interval at which the operation status will be polled for. * @return A {@link PollerFlux} polling on the {@link KeyVaultRestoreOperation backup operation} status. * @throws NullPointerException if the {@code backupFolderUrl} or {@code sasToken} are {@code null}. */ @ServiceMethod(returns = ReturnType.SINGLE) public PollerFlux<KeyVaultRestoreOperation, Void> beginRestore(String backupFolderUrl, String sasToken, Duration pollingInterval) { Objects.requireNonNull(backupFolderUrl, String.format(KeyVaultErrorCodeStrings.getErrorString(KeyVaultErrorCodeStrings.PARAMETER_REQUIRED), "'backupFolderUrl'")); Objects.requireNonNull(sasToken, String.format(KeyVaultErrorCodeStrings.getErrorString(KeyVaultErrorCodeStrings.PARAMETER_REQUIRED), "'sasToken'")); return new PollerFlux<>(pollingInterval, restoreActivationOperation(backupFolderUrl, sasToken), restorePollOperation(), (pollingContext, firstResponse) -> Mono.empty(), (pollingContext) -> Mono.empty()); } /** * Initiates a full restore of the Key Vault. * * @param backupFolderUrl The URL for the Blob Storage resource where the backup is located, including the path to * the blob container where the backup resides. This would be the exact value that is returned as the result of a * backup operation. An example of such a URL may look like the following: https: * @param sasToken A Shared Access Signature (SAS) token to authorize access to the blob. * @param context Additional context that is passed through the HTTP pipeline during the service call. * @return A {@link PollerFlux} polling on the {@link KeyVaultRestoreOperation backup operation} status. * @throws KeyVaultErrorException if the operation is unsuccessful. */ Mono<Response<KeyVaultRestoreOperation>> restoreWithResponse(String backupFolderUrl, String sasToken, Context context) { String[] segments = backupFolderUrl.split("/"); String folderName = segments[segments.length - 1]; String containerUrl = backupFolderUrl.substring(0, backupFolderUrl.length() - folderName.length()); SASTokenParameter sasTokenParameter = new SASTokenParameter() .setStorageResourceUri(containerUrl) .setToken(sasToken); RestoreOperationParameters restoreOperationParameters = new RestoreOperationParameters() .setSasTokenParameters(sasTokenParameter) .setFolderToRestore(folderName); return clientImpl.fullRestoreOperationWithResponseAsync(vaultUrl, restoreOperationParameters, context.addData(AZ_TRACING_NAMESPACE_KEY, KEYVAULT_TRACING_NAMESPACE_VALUE)) .doOnRequest(ignored -> logger.info("Restoring from location - {}{}", containerUrl, folderName)) .doOnSuccess(response -> logger.info("Restored from location - {}{}", containerUrl, folderName)) .doOnError(error -> logger.warning("Failed to restore from location - {}{}", containerUrl, folderName, error)) .map(restoreOperationResponse -> new SimpleResponse<>(restoreOperationResponse.getRequest(), restoreOperationResponse.getStatusCode(), restoreOperationResponse.getHeaders(), (KeyVaultRestoreOperation) transformToLongRunningOperation(restoreOperationResponse.getValue()))); } private Function<PollingContext<KeyVaultRestoreOperation>, Mono<KeyVaultRestoreOperation>> restoreActivationOperation(String backupFolderUrl, String sasToken) { return (pollingContext) -> { try { return withContext(context -> restoreWithResponse(backupFolderUrl, sasToken, context)) .flatMap(restoreResponse -> Mono.just(restoreResponse.getValue())); } catch (RuntimeException e) { return monoError(logger, e); } }; } private Function<PollingContext<KeyVaultRestoreOperation>, Mono<PollResponse<KeyVaultRestoreOperation>>> restorePollOperation() { return (pollingContext) -> { try { PollResponse<KeyVaultRestoreOperation> pollResponse = pollingContext.getLatestResponse(); if (pollResponse.getStatus() == LongRunningOperationStatus.SUCCESSFULLY_COMPLETED || pollResponse.getStatus() == LongRunningOperationStatus.FAILED) { return Mono.just(pollResponse); } final KeyVaultRestoreOperation keyVaultRestoreOperation = pollResponse.getValue(); if (keyVaultRestoreOperation == null) { logger.warning("Restore operation does not exist. Activation operation failed."); return Mono.just(new PollResponse<KeyVaultRestoreOperation>( LongRunningOperationStatus.fromString("RESTORE_START_FAILED", true), null)); } final String jobId = keyVaultRestoreOperation.getJobId(); return withContext(context -> clientImpl.restoreStatusWithResponseAsync(vaultUrl, jobId, context.addData(AZ_TRACING_NAMESPACE_KEY, KEYVAULT_TRACING_NAMESPACE_VALUE))) .map(response -> new SimpleResponse<>(response, (KeyVaultRestoreOperation) transformToLongRunningOperation(response.getValue()))) .flatMap(KeyVaultBackupAsyncClient::processRestoreOperationResponse); } catch (HttpResponseException e) { logger.logExceptionAsError(e); return Mono.just(new PollResponse<>(LongRunningOperationStatus.FAILED, null)); } }; } private static Mono<PollResponse<KeyVaultRestoreOperation>> processRestoreOperationResponse(Response<KeyVaultRestoreOperation> response) { String operationStatus = response.getValue().getStatus().toLowerCase(Locale.US); return Mono.just(new PollResponse<>( toLongRunningOperationStatus(operationStatus.toLowerCase(Locale.US)), response.getValue())); } /** * Gets a pending {@link KeyVaultRestoreOperation full or selective restore operation} from the Key Vault. * * @param jobId The operation identifier. * @throws NullPointerException if the {@code jobId} is null. * @return A {@link PollerFlux} polling on the {@link KeyVaultRestoreOperation restore operation} status. */ @ServiceMethod(returns = ReturnType.SINGLE) public PollerFlux<KeyVaultRestoreOperation, Void> getRestoreOperation(String jobId) { Objects.requireNonNull(jobId, String.format(KeyVaultErrorCodeStrings.getErrorString(KeyVaultErrorCodeStrings.PARAMETER_REQUIRED), "'jobId'")); return new PollerFlux<>(Duration.ofSeconds(1), (pollingContext) -> Mono.empty(), restoreStatusPollOperation(jobId), (pollingContext, firstResponse) -> Mono.error(new RuntimeException("Cancellation is not supported")), (pollingContext) -> Mono.empty()); } private Function<PollingContext<KeyVaultRestoreOperation>, Mono<PollResponse<KeyVaultRestoreOperation>>> restoreStatusPollOperation(String jobId) { return (pollingContext) -> withContext(context -> clientImpl.restoreStatusWithResponseAsync(vaultUrl, jobId, context.addData(AZ_TRACING_NAMESPACE_KEY, KEYVAULT_TRACING_NAMESPACE_VALUE))) .map(response -> new SimpleResponse<>(response, (KeyVaultRestoreOperation) transformToLongRunningOperation(response.getValue()))) .flatMap(KeyVaultBackupAsyncClient::processRestoreOperationResponse); } /** * Restores all versions of a given key using the supplied SAS token pointing to a previously stored Azure Blob * storage backup folder. * * @param keyName The name of the key to be restored. * @param backupFolderUrl The URL for the Blob Storage resource where the backup is located, including the path to * the blob container where the backup resides. This would be the exact value that is returned as the result of a * backup operation. An example of such a URL may look like the following: https: * @param sasToken A Shared Access Signature (SAS) token to authorize access to the blob. * @return A {@link PollerFlux} polling on the {@link KeyVaultRestoreOperation backup operation} status. * @throws NullPointerException if the {@code keyName}, {@code backupFolderUrl} or {@code sasToken} are {@code * null}. */ @ServiceMethod(returns = ReturnType.SINGLE) public PollerFlux<KeyVaultRestoreOperation, Void> beginSelectiveRestore(String keyName, String backupFolderUrl, String sasToken) { return beginSelectiveRestore(keyName, backupFolderUrl, sasToken, getDefaultPollingInterval()); } /** * Restores all versions of a given key using the supplied SAS token pointing to a previously stored Azure Blob * storage backup folder. * * @param keyName The name of the key to be restored. * @param backupFolderUrl The URL for the Blob Storage resource where the backup is located, including the path to * the blob container where the backup resides. This would be the exact value that is returned as the result of a * backup operation. An example of such a URL may look like the following: https: * @param sasToken A Shared Access Signature (SAS) token to authorize access to the blob. * @param pollingInterval The interval at which the operation status will be polled for. * @return A {@link PollerFlux} polling on the {@link KeyVaultRestoreOperation backup operation} status. * @throws NullPointerException if the {@code keyName}, {@code backupFolderUrl} or {@code sasToken} are {@code * null}. */ @ServiceMethod(returns = ReturnType.SINGLE) public PollerFlux<KeyVaultRestoreOperation, Void> beginSelectiveRestore(String keyName, String backupFolderUrl, String sasToken, Duration pollingInterval) { Objects.requireNonNull(keyName, String.format(KeyVaultErrorCodeStrings.getErrorString(KeyVaultErrorCodeStrings.PARAMETER_REQUIRED), "'keyName'")); Objects.requireNonNull(backupFolderUrl, String.format(KeyVaultErrorCodeStrings.getErrorString(KeyVaultErrorCodeStrings.PARAMETER_REQUIRED), "'backupFolderUrl'")); Objects.requireNonNull(sasToken, String.format(KeyVaultErrorCodeStrings.getErrorString(KeyVaultErrorCodeStrings.PARAMETER_REQUIRED), "'sasToken'")); return new PollerFlux<>(pollingInterval, selectiveRestoreActivationOperation(keyName, backupFolderUrl, sasToken), selectiveRestorePollOperation(), (pollingContext, firstResponse) -> Mono.empty(), (pollingContext) -> Mono.empty()); } /** * Restores all versions of a given key using the supplied SAS token pointing to a previously stored Azure Blob * storage backup folder. * * @param keyName The name of the key to be restored. * @param backupFolderUrl The URL for the Blob Storage resource where the backup is located, including the path to * the blob container where the backup resides. This would be the exact value that is returned as the result of a * backup operation. An example of such a URL may look like the following: https: * @param sasToken A Shared Access Signature (SAS) token to authorize access to the blob. * @param context Additional context that is passed through the HTTP pipeline during the service call. * @return A {@link PollerFlux} polling on the {@link KeyVaultRestoreOperation backup operation} status. * @throws KeyVaultErrorException if the operation is unsuccessful. */ Mono<Response<KeyVaultRestoreOperation>> selectiveRestoreWithResponse(String keyName, String backupFolderUrl, String sasToken, Context context) { String[] segments = backupFolderUrl.split("/"); String folderName = segments[segments.length - 1]; String containerUrl = backupFolderUrl.substring(0, backupFolderUrl.length() - folderName.length()); SASTokenParameter sasTokenParameter = new SASTokenParameter() .setStorageResourceUri(containerUrl) .setToken(sasToken); SelectiveKeyRestoreOperationParameters selectiveKeyRestoreOperationParameters = new SelectiveKeyRestoreOperationParameters() .setSasTokenParameters(sasTokenParameter) .setFolder(folderName); return clientImpl.selectiveKeyRestoreOperationWithResponseAsync(vaultUrl, keyName, selectiveKeyRestoreOperationParameters, context.addData(AZ_TRACING_NAMESPACE_KEY, KEYVAULT_TRACING_NAMESPACE_VALUE)) .doOnRequest(ignored -> logger.info("Restoring key \"{}\" from location - {}{}", keyName, containerUrl, folderName)) .doOnSuccess(response -> logger.info("Restored key \"{}\" from location - {}{}", keyName, containerUrl, folderName)) .doOnError(error -> logger.warning("Failed to restore key \"{}\" from location - {}{}", keyName, containerUrl, folderName, error)) .map(restoreOperationResponse -> new SimpleResponse<>(restoreOperationResponse.getRequest(), restoreOperationResponse.getStatusCode(), restoreOperationResponse.getHeaders(), (KeyVaultRestoreOperation) transformToLongRunningOperation(restoreOperationResponse.getValue()))); } private Function<PollingContext<KeyVaultRestoreOperation>, Mono<KeyVaultRestoreOperation>> selectiveRestoreActivationOperation(String keyName, String backupFolderUrl, String sasToken) { return (pollingContext) -> { try { return withContext(context -> selectiveRestoreWithResponse(keyName, backupFolderUrl, sasToken, context)) .flatMap(selectiveKeyRestoreResponse -> Mono.just(selectiveKeyRestoreResponse.getValue())); } catch (RuntimeException e) { return monoError(logger, e); } }; } private Function<PollingContext<KeyVaultRestoreOperation>, Mono<PollResponse<KeyVaultRestoreOperation>>> selectiveRestorePollOperation() { return (pollingContext) -> { try { PollResponse<KeyVaultRestoreOperation> pollResponse = pollingContext.getLatestResponse(); if (pollResponse.getStatus() == LongRunningOperationStatus.SUCCESSFULLY_COMPLETED || pollResponse.getStatus() == LongRunningOperationStatus.FAILED) { return Mono.just(pollResponse); } final KeyVaultRestoreOperation keyVaultRestoreOperation = pollResponse.getValue(); if (keyVaultRestoreOperation == null) { logger.warning("Restore operation does not exist. Activation operation failed."); return Mono.just(new PollResponse<KeyVaultRestoreOperation>( LongRunningOperationStatus.fromString("SELECTIVE_RESTORE_START_FAILED", true), null)); } final String jobId = keyVaultRestoreOperation.getJobId(); return withContext(context -> clientImpl.restoreStatusWithResponseAsync(vaultUrl, jobId, context.addData(AZ_TRACING_NAMESPACE_KEY, KEYVAULT_TRACING_NAMESPACE_VALUE))) .map(response -> new SimpleResponse<>(response, (KeyVaultRestoreOperation) transformToLongRunningOperation(response.getValue()))) .flatMap(KeyVaultBackupAsyncClient::processRestoreOperationResponse); } catch (HttpResponseException e) { logger.logExceptionAsError(e); return Mono.just(new PollResponse<>(LongRunningOperationStatus.FAILED, null)); } }; } private static <O> KeyVaultLongRunningOperation transformToLongRunningOperation(O operation) { if (operation instanceof RestoreOperation) { RestoreOperation restoreOperation = (RestoreOperation) operation; return new KeyVaultRestoreOperation(restoreOperation.getStatus(), restoreOperation.getStatusDetails(), createKeyVaultErrorFromError(restoreOperation.getError()), restoreOperation.getJobId(), restoreOperation.getStartTime(), restoreOperation.getEndTime()); } else if (operation instanceof SelectiveKeyRestoreOperation) { SelectiveKeyRestoreOperation selectiveKeyRestoreOperation = (SelectiveKeyRestoreOperation) operation; return new KeyVaultRestoreOperation(selectiveKeyRestoreOperation.getStatus(), selectiveKeyRestoreOperation.getStatusDetails(), createKeyVaultErrorFromError(selectiveKeyRestoreOperation.getError()), selectiveKeyRestoreOperation.getJobId(), selectiveKeyRestoreOperation.getStartTime(), selectiveKeyRestoreOperation.getEndTime()); } else if (operation instanceof FullBackupOperation) { FullBackupOperation fullBackupOperation = (FullBackupOperation) operation; return new KeyVaultBackupOperation(fullBackupOperation.getStatus(), fullBackupOperation.getStatusDetails(), createKeyVaultErrorFromError(fullBackupOperation.getError()), fullBackupOperation.getJobId(), fullBackupOperation.getStartTime(), fullBackupOperation.getEndTime(), fullBackupOperation.getAzureStorageBlobContainerUri()); } else { throw new UnsupportedOperationException("Operation type not supported"); } } private static KeyVaultError createKeyVaultErrorFromError(Error error) { if (error == null) { return null; } return new KeyVaultError(error.getCode(), error.getMessage(), createKeyVaultErrorFromError(error.getInnerError())); } }
class KeyVaultBackupAsyncClient { private static final String KEYVAULT_TRACING_NAMESPACE_VALUE = "Microsoft.KeyVault"; private static final Duration DEFAULT_POLLING_INTERVAL = Duration.ofSeconds(1); /** * The logger to be used. */ private final ClientLogger logger = new ClientLogger(KeyVaultBackupAsyncClient.class); /** * The underlying AutoRest client used to interact with the Key Vault service. */ private final KeyVaultBackupClientImpl clientImpl; /** * The Kay Vault URL this client is associated to. */ private final String vaultUrl; Duration getDefaultPollingInterval() { return DEFAULT_POLLING_INTERVAL; } /** * Package private constructor to be used by {@link KeyVaultBackupClientBuilder}. */ KeyVaultBackupAsyncClient(URL vaultUrl, HttpPipeline httpPipeline) { Objects.requireNonNull(vaultUrl, KeyVaultErrorCodeStrings.getErrorString(KeyVaultErrorCodeStrings.VAULT_END_POINT_REQUIRED)); this.vaultUrl = vaultUrl.toString(); clientImpl = new KeyVaultBackupClientImplBuilder() .pipeline(httpPipeline) .buildClient(); } /** * Gets the URL for the Key Vault this client is associated with. * * @return The Key Vault URL. */ public String getVaultUrl() { return this.vaultUrl; } /** * Initiates a full backup of the Key Vault. * * @param blobStorageUrl The URL for the Blob Storage resource where the backup will be located. * @param sasToken A Shared Access Signature (SAS) token to authorize access to the blob. * @return A {@link PollerFlux} polling on the {@link KeyVaultBackupOperation backup operation} status. * @throws NullPointerException if the {@code blobStorageUrl} or {@code sasToken} are {@code null}. */ @ServiceMethod(returns = ReturnType.SINGLE) public PollerFlux<KeyVaultBackupOperation, String> beginBackup(String blobStorageUrl, String sasToken) { return beginBackup(blobStorageUrl, sasToken, getDefaultPollingInterval()); } /** * Initiates a full backup of the Key Vault. * * @param blobStorageUrl The URL for the Blob Storage resource where the backup will be located. * @param sasToken A Shared Access Signature (SAS) token to authorize access to the blob. * @param pollingInterval The interval at which the operation status will be polled for. * @return A {@link PollerFlux} polling on the {@link KeyVaultBackupOperation backup operation} status. * @throws NullPointerException if the {@code blobStorageUrl} or {@code sasToken} are {@code null}. */ @ServiceMethod(returns = ReturnType.SINGLE) public PollerFlux<KeyVaultBackupOperation, String> beginBackup(String blobStorageUrl, String sasToken, Duration pollingInterval) { Objects.requireNonNull(blobStorageUrl, String.format(KeyVaultErrorCodeStrings.getErrorString(KeyVaultErrorCodeStrings.PARAMETER_REQUIRED), "'blobStorageUrl'")); Objects.requireNonNull(sasToken, String.format(KeyVaultErrorCodeStrings.getErrorString(KeyVaultErrorCodeStrings.PARAMETER_REQUIRED), "'sasToken'")); return new PollerFlux<>(pollingInterval, backupActivationOperation(blobStorageUrl, sasToken), backupPollOperation(), (pollingContext, firstResponse) -> Mono.error(new RuntimeException("Cancellation is not supported")), backupFetchOperation()); } /** * Initiates a full backup of the Key Vault. * * @param blobStorageUrl The URL for the Blob Storage resource where the backup will be located. * @param sasToken A Shared Access Signature (SAS) token to authorize access to the blob. * @param context Additional context that is passed through the HTTP pipeline during the service call. * @return A {@link PollerFlux} polling on the {@link KeyVaultBackupOperation backup operation} status. * @throws KeyVaultErrorException if the operation is unsuccessful. */ Mono<Response<KeyVaultBackupOperation>> backupWithResponse(String blobStorageUrl, String sasToken, Context context) { SASTokenParameter sasTokenParameter = ", blobStorageUrl)) .doOnSuccess(response -> logger.info("Backed up at URL - {}", response.getValue().getAzureStorageBlobContainerUri())) .doOnError(error -> logger.warning("Failed to backup at URL - {}", blobStorageUrl, error)) .map(backupOperationResponse -> new SimpleResponse<>(backupOperationResponse.getRequest(), backupOperationResponse.getStatusCode(), backupOperationResponse.getHeaders(), (KeyVaultBackupOperation) transformToLongRunningOperation(backupOperationResponse.getValue()))); } private Function<PollingContext<KeyVaultBackupOperation>, Mono<KeyVaultBackupOperation>> backupActivationOperation(String blobStorageUrl, String sasToken) { return (pollingContext) -> { try { return withContext(context -> backupWithResponse(blobStorageUrl, sasToken, context)) .flatMap(backupResponse -> Mono.just(backupResponse.getValue())); } catch (RuntimeException e) { return monoError(logger, e); } }; } private Function<PollingContext<KeyVaultBackupOperation>, Mono<PollResponse<KeyVaultBackupOperation>>> backupPollOperation() { return (pollingContext) -> { try { PollResponse<KeyVaultBackupOperation> pollResponse = pollingContext.getLatestResponse(); if (pollResponse.getStatus() == LongRunningOperationStatus.SUCCESSFULLY_COMPLETED || pollResponse.getStatus() == LongRunningOperationStatus.FAILED) { return Mono.just(pollResponse); } final KeyVaultBackupOperation keyVaultBackupOperation = pollResponse.getValue(); if (keyVaultBackupOperation == null) { logger.warning("Backup operation does not exist. Activation operation failed."); return Mono.just(new PollResponse<KeyVaultBackupOperation>( LongRunningOperationStatus.fromString("BACKUP_START_FAILED", true), null)); } final String jobId = keyVaultBackupOperation.getJobId(); return withContext(context -> clientImpl.fullBackupStatusWithResponseAsync(vaultUrl, jobId, context.addData(AZ_TRACING_NAMESPACE_KEY, KEYVAULT_TRACING_NAMESPACE_VALUE))) .map(response -> new SimpleResponse<>(response, (KeyVaultBackupOperation) transformToLongRunningOperation(response.getValue()))) .flatMap(KeyVaultBackupAsyncClient::processBackupOperationResponse); } catch (HttpResponseException e) { logger.logExceptionAsError(e); return Mono.just(new PollResponse<>(LongRunningOperationStatus.FAILED, null)); } }; } private Function<PollingContext<KeyVaultBackupOperation>, Mono<String>> backupFetchOperation() { return (pollingContext) -> { String blobContainerUri = pollingContext.getLatestResponse().getValue().getAzureStorageBlobContainerUri(); if (blobContainerUri == null) { return Mono.empty(); } else { return Mono.just(blobContainerUri); } }; } private static Mono<PollResponse<KeyVaultBackupOperation>> processBackupOperationResponse(Response<KeyVaultBackupOperation> response) { String operationStatus = response.getValue().getStatus().toLowerCase(Locale.US); return Mono.just(new PollResponse<>( toLongRunningOperationStatus(operationStatus.toLowerCase(Locale.US)), response.getValue())); } private static LongRunningOperationStatus toLongRunningOperationStatus(String operationStatus) { switch (operationStatus) { case "inprogress": return LongRunningOperationStatus.IN_PROGRESS; case "succeeded": return LongRunningOperationStatus.SUCCESSFULLY_COMPLETED; case "failed": return LongRunningOperationStatus.FAILED; default: return LongRunningOperationStatus.fromString("POLLING_FAILED", true); } } /** * Gets a pending {@link KeyVaultBackupOperation backup operation} from the Key Vault. * * @param jobId The operation identifier. * @throws NullPointerException if the {@code jobId} is null. * @return A {@link PollerFlux} polling on the {@link KeyVaultRestoreOperation backup operation} status. */ @ServiceMethod(returns = ReturnType.SINGLE) public PollerFlux<KeyVaultBackupOperation, String> getBackupOperation(String jobId) { Objects.requireNonNull(jobId, String.format(KeyVaultErrorCodeStrings.getErrorString(KeyVaultErrorCodeStrings.PARAMETER_REQUIRED), "'jobId'")); return new PollerFlux<>(Duration.ofSeconds(1), (pollingContext) -> Mono.empty(), backupStatusPollOperation(jobId), (pollingContext, firstResponse) -> Mono.error(new RuntimeException("Cancellation is not supported")), backupFetchOperation()); } private Function<PollingContext<KeyVaultBackupOperation>, Mono<PollResponse<KeyVaultBackupOperation>>> backupStatusPollOperation(String jobId) { return (pollingContext) -> withContext(context -> clientImpl.fullBackupStatusWithResponseAsync(vaultUrl, jobId, context.addData(AZ_TRACING_NAMESPACE_KEY, KEYVAULT_TRACING_NAMESPACE_VALUE))) .map(response -> new SimpleResponse<>(response, (KeyVaultBackupOperation) transformToLongRunningOperation(response.getValue()))) .flatMap(KeyVaultBackupAsyncClient::processBackupOperationResponse); } /** * Initiates a full restore of the Key Vault. * * @param backupFolderUrl The URL for the Blob Storage resource where the backup is located, including the path to * the blob container where the backup resides. This would be the exact value that is returned as the result of a * backup operation. An example of such a URL may look like the following: https: * @param sasToken A Shared Access Signature (SAS) token to authorize access to the blob. * @return A {@link PollerFlux} polling on the {@link KeyVaultRestoreOperation backup operation} status. * @throws NullPointerException if the {@code backupFolderUrl} or {@code sasToken} are {@code null}. */ @ServiceMethod(returns = ReturnType.SINGLE) public PollerFlux<KeyVaultRestoreOperation, Void> beginRestore(String backupFolderUrl, String sasToken) { return beginRestore(backupFolderUrl, sasToken, getDefaultPollingInterval()); } /** * Initiates a full restore of the Key Vault. * * @param backupFolderUrl The URL for the Blob Storage resource where the backup is located, including the path to * the blob container where the backup resides. This would be the exact value that is returned as the result of a * backup operation. An example of such a URL may look like the following: https: * @param sasToken A Shared Access Signature (SAS) token to authorize access to the blob. * @param pollingInterval The interval at which the operation status will be polled for. * @return A {@link PollerFlux} polling on the {@link KeyVaultRestoreOperation backup operation} status. * @throws NullPointerException if the {@code backupFolderUrl} or {@code sasToken} are {@code null}. */ @ServiceMethod(returns = ReturnType.SINGLE) public PollerFlux<KeyVaultRestoreOperation, Void> beginRestore(String backupFolderUrl, String sasToken, Duration pollingInterval) { Objects.requireNonNull(backupFolderUrl, String.format(KeyVaultErrorCodeStrings.getErrorString(KeyVaultErrorCodeStrings.PARAMETER_REQUIRED), "'backupFolderUrl'")); Objects.requireNonNull(sasToken, String.format(KeyVaultErrorCodeStrings.getErrorString(KeyVaultErrorCodeStrings.PARAMETER_REQUIRED), "'sasToken'")); return new PollerFlux<>(pollingInterval, restoreActivationOperation(backupFolderUrl, sasToken), restorePollOperation(), (pollingContext, firstResponse) -> Mono.empty(), (pollingContext) -> Mono.empty()); } /** * Initiates a full restore of the Key Vault. * * @param backupFolderUrl The URL for the Blob Storage resource where the backup is located, including the path to * the blob container where the backup resides. This would be the exact value that is returned as the result of a * backup operation. An example of such a URL may look like the following: https: * @param sasToken A Shared Access Signature (SAS) token to authorize access to the blob. * @param context Additional context that is passed through the HTTP pipeline during the service call. * @return A {@link PollerFlux} polling on the {@link KeyVaultRestoreOperation backup operation} status. * @throws KeyVaultErrorException if the operation is unsuccessful. */ Mono<Response<KeyVaultRestoreOperation>> restoreWithResponse(String backupFolderUrl, String sasToken, Context context) { String[] segments = backupFolderUrl.split("/"); String folderName = segments[segments.length - 1]; String containerUrl = backupFolderUrl.substring(0, backupFolderUrl.length() - folderName.length()); SASTokenParameter sasTokenParameter = new SASTokenParameter() .setStorageResourceUri(containerUrl) .setToken(sasToken); RestoreOperationParameters restoreOperationParameters = new RestoreOperationParameters() .setSasTokenParameters(sasTokenParameter) .setFolderToRestore(folderName); return clientImpl.fullRestoreOperationWithResponseAsync(vaultUrl, restoreOperationParameters, context.addData(AZ_TRACING_NAMESPACE_KEY, KEYVAULT_TRACING_NAMESPACE_VALUE)) .doOnRequest(ignored -> logger.info("Restoring from location - {}", backupFolderUrl)) .doOnSuccess(response -> logger.info("Restored from location - {}", backupFolderUrl)) .doOnError(error -> logger.warning("Failed to restore from location - {}", backupFolderUrl, error)) .map(restoreOperationResponse -> new SimpleResponse<>(restoreOperationResponse.getRequest(), restoreOperationResponse.getStatusCode(), restoreOperationResponse.getHeaders(), (KeyVaultRestoreOperation) transformToLongRunningOperation(restoreOperationResponse.getValue()))); } private Function<PollingContext<KeyVaultRestoreOperation>, Mono<KeyVaultRestoreOperation>> restoreActivationOperation(String backupFolderUrl, String sasToken) { return (pollingContext) -> { try { return withContext(context -> restoreWithResponse(backupFolderUrl, sasToken, context)) .flatMap(restoreResponse -> Mono.just(restoreResponse.getValue())); } catch (RuntimeException e) { return monoError(logger, e); } }; } private Function<PollingContext<KeyVaultRestoreOperation>, Mono<PollResponse<KeyVaultRestoreOperation>>> restorePollOperation() { return (pollingContext) -> { try { PollResponse<KeyVaultRestoreOperation> pollResponse = pollingContext.getLatestResponse(); if (pollResponse.getStatus() == LongRunningOperationStatus.SUCCESSFULLY_COMPLETED || pollResponse.getStatus() == LongRunningOperationStatus.FAILED) { return Mono.just(pollResponse); } final KeyVaultRestoreOperation keyVaultRestoreOperation = pollResponse.getValue(); if (keyVaultRestoreOperation == null) { logger.warning("Restore operation does not exist. Activation operation failed."); return Mono.just(new PollResponse<KeyVaultRestoreOperation>( LongRunningOperationStatus.fromString("RESTORE_START_FAILED", true), null)); } final String jobId = keyVaultRestoreOperation.getJobId(); return withContext(context -> clientImpl.restoreStatusWithResponseAsync(vaultUrl, jobId, context.addData(AZ_TRACING_NAMESPACE_KEY, KEYVAULT_TRACING_NAMESPACE_VALUE))) .map(response -> new SimpleResponse<>(response, (KeyVaultRestoreOperation) transformToLongRunningOperation(response.getValue()))) .flatMap(KeyVaultBackupAsyncClient::processRestoreOperationResponse); } catch (HttpResponseException e) { logger.logExceptionAsError(e); return Mono.just(new PollResponse<>(LongRunningOperationStatus.FAILED, null)); } }; } private static Mono<PollResponse<KeyVaultRestoreOperation>> processRestoreOperationResponse(Response<KeyVaultRestoreOperation> response) { String operationStatus = response.getValue().getStatus().toLowerCase(Locale.US); return Mono.just(new PollResponse<>( toLongRunningOperationStatus(operationStatus.toLowerCase(Locale.US)), response.getValue())); } /** * Gets a pending {@link KeyVaultRestoreOperation full or selective restore operation} from the Key Vault. * * @param jobId The operation identifier. * @throws NullPointerException if the {@code jobId} is null. * @return A {@link PollerFlux} polling on the {@link KeyVaultRestoreOperation restore operation} status. */ @ServiceMethod(returns = ReturnType.SINGLE) public PollerFlux<KeyVaultRestoreOperation, Void> getRestoreOperation(String jobId) { Objects.requireNonNull(jobId, String.format(KeyVaultErrorCodeStrings.getErrorString(KeyVaultErrorCodeStrings.PARAMETER_REQUIRED), "'jobId'")); return new PollerFlux<>(Duration.ofSeconds(1), (pollingContext) -> Mono.empty(), restoreStatusPollOperation(jobId), (pollingContext, firstResponse) -> Mono.error(new RuntimeException("Cancellation is not supported")), (pollingContext) -> Mono.empty()); } private Function<PollingContext<KeyVaultRestoreOperation>, Mono<PollResponse<KeyVaultRestoreOperation>>> restoreStatusPollOperation(String jobId) { return (pollingContext) -> withContext(context -> clientImpl.restoreStatusWithResponseAsync(vaultUrl, jobId, context.addData(AZ_TRACING_NAMESPACE_KEY, KEYVAULT_TRACING_NAMESPACE_VALUE))) .map(response -> new SimpleResponse<>(response, (KeyVaultRestoreOperation) transformToLongRunningOperation(response.getValue()))) .flatMap(KeyVaultBackupAsyncClient::processRestoreOperationResponse); } /** * Restores all versions of a given key using the supplied SAS token pointing to a previously stored Azure Blob * storage backup folder. * * @param keyName The name of the key to be restored. * @param backupFolderUrl The URL for the Blob Storage resource where the backup is located, including the path to * the blob container where the backup resides. This would be the exact value that is returned as the result of a * backup operation. An example of such a URL may look like the following: https: * @param sasToken A Shared Access Signature (SAS) token to authorize access to the blob. * @return A {@link PollerFlux} polling on the {@link KeyVaultRestoreOperation backup operation} status. * @throws NullPointerException if the {@code keyName}, {@code backupFolderUrl} or {@code sasToken} are {@code * null}. */ @ServiceMethod(returns = ReturnType.SINGLE) public PollerFlux<KeyVaultRestoreOperation, Void> beginSelectiveRestore(String keyName, String backupFolderUrl, String sasToken) { return beginSelectiveRestore(keyName, backupFolderUrl, sasToken, getDefaultPollingInterval()); } /** * Restores all versions of a given key using the supplied SAS token pointing to a previously stored Azure Blob * storage backup folder. * * @param keyName The name of the key to be restored. * @param backupFolderUrl The URL for the Blob Storage resource where the backup is located, including the path to * the blob container where the backup resides. This would be the exact value that is returned as the result of a * backup operation. An example of such a URL may look like the following: https: * @param sasToken A Shared Access Signature (SAS) token to authorize access to the blob. * @param pollingInterval The interval at which the operation status will be polled for. * @return A {@link PollerFlux} polling on the {@link KeyVaultRestoreOperation backup operation} status. * @throws NullPointerException if the {@code keyName}, {@code backupFolderUrl} or {@code sasToken} are {@code * null}. */ @ServiceMethod(returns = ReturnType.SINGLE) public PollerFlux<KeyVaultRestoreOperation, Void> beginSelectiveRestore(String keyName, String backupFolderUrl, String sasToken, Duration pollingInterval) { Objects.requireNonNull(keyName, String.format(KeyVaultErrorCodeStrings.getErrorString(KeyVaultErrorCodeStrings.PARAMETER_REQUIRED), "'keyName'")); Objects.requireNonNull(backupFolderUrl, String.format(KeyVaultErrorCodeStrings.getErrorString(KeyVaultErrorCodeStrings.PARAMETER_REQUIRED), "'backupFolderUrl'")); Objects.requireNonNull(sasToken, String.format(KeyVaultErrorCodeStrings.getErrorString(KeyVaultErrorCodeStrings.PARAMETER_REQUIRED), "'sasToken'")); return new PollerFlux<>(pollingInterval, selectiveRestoreActivationOperation(keyName, backupFolderUrl, sasToken), selectiveRestorePollOperation(), (pollingContext, firstResponse) -> Mono.empty(), (pollingContext) -> Mono.empty()); } /** * Restores all versions of a given key using the supplied SAS token pointing to a previously stored Azure Blob * storage backup folder. * * @param keyName The name of the key to be restored. * @param backupFolderUrl The URL for the Blob Storage resource where the backup is located, including the path to * the blob container where the backup resides. This would be the exact value that is returned as the result of a * backup operation. An example of such a URL may look like the following: https: * @param sasToken A Shared Access Signature (SAS) token to authorize access to the blob. * @param context Additional context that is passed through the HTTP pipeline during the service call. * @return A {@link PollerFlux} polling on the {@link KeyVaultRestoreOperation backup operation} status. * @throws KeyVaultErrorException if the operation is unsuccessful. */ Mono<Response<KeyVaultRestoreOperation>> selectiveRestoreWithResponse(String keyName, String backupFolderUrl, String sasToken, Context context) { String[] segments = backupFolderUrl.split("/"); String folderName = segments[segments.length - 1]; String containerUrl = backupFolderUrl.substring(0, backupFolderUrl.length() - folderName.length()); SASTokenParameter sasTokenParameter = new SASTokenParameter() .setStorageResourceUri(containerUrl) .setToken(sasToken); SelectiveKeyRestoreOperationParameters selectiveKeyRestoreOperationParameters = new SelectiveKeyRestoreOperationParameters() .setSasTokenParameters(sasTokenParameter) .setFolder(folderName); return clientImpl.selectiveKeyRestoreOperationWithResponseAsync(vaultUrl, keyName, selectiveKeyRestoreOperationParameters, context.addData(AZ_TRACING_NAMESPACE_KEY, KEYVAULT_TRACING_NAMESPACE_VALUE)) .doOnRequest(ignored -> logger.info("Restoring key \"{}\" from location - {}", keyName, backupFolderUrl)) .doOnSuccess(response -> logger.info("Restored key \"{}\" from location - {}", keyName, backupFolderUrl)) .doOnError(error -> logger.warning("Failed to restore key \"{}\" from location - {}", keyName, backupFolderUrl, error)) .map(restoreOperationResponse -> new SimpleResponse<>(restoreOperationResponse.getRequest(), restoreOperationResponse.getStatusCode(), restoreOperationResponse.getHeaders(), (KeyVaultRestoreOperation) transformToLongRunningOperation(restoreOperationResponse.getValue()))); } private Function<PollingContext<KeyVaultRestoreOperation>, Mono<KeyVaultRestoreOperation>> selectiveRestoreActivationOperation(String keyName, String backupFolderUrl, String sasToken) { return (pollingContext) -> { try { return withContext(context -> selectiveRestoreWithResponse(keyName, backupFolderUrl, sasToken, context)) .flatMap(selectiveKeyRestoreResponse -> Mono.just(selectiveKeyRestoreResponse.getValue())); } catch (RuntimeException e) { return monoError(logger, e); } }; } private Function<PollingContext<KeyVaultRestoreOperation>, Mono<PollResponse<KeyVaultRestoreOperation>>> selectiveRestorePollOperation() { return (pollingContext) -> { try { PollResponse<KeyVaultRestoreOperation> pollResponse = pollingContext.getLatestResponse(); if (pollResponse.getStatus() == LongRunningOperationStatus.SUCCESSFULLY_COMPLETED || pollResponse.getStatus() == LongRunningOperationStatus.FAILED) { return Mono.just(pollResponse); } final KeyVaultRestoreOperation keyVaultRestoreOperation = pollResponse.getValue(); if (keyVaultRestoreOperation == null) { logger.warning("Restore operation does not exist. Activation operation failed."); return Mono.just(new PollResponse<KeyVaultRestoreOperation>( LongRunningOperationStatus.fromString("SELECTIVE_RESTORE_START_FAILED", true), null)); } final String jobId = keyVaultRestoreOperation.getJobId(); return withContext(context -> clientImpl.restoreStatusWithResponseAsync(vaultUrl, jobId, context.addData(AZ_TRACING_NAMESPACE_KEY, KEYVAULT_TRACING_NAMESPACE_VALUE))) .map(response -> new SimpleResponse<>(response, (KeyVaultRestoreOperation) transformToLongRunningOperation(response.getValue()))) .flatMap(KeyVaultBackupAsyncClient::processRestoreOperationResponse); } catch (HttpResponseException e) { logger.logExceptionAsError(e); return Mono.just(new PollResponse<>(LongRunningOperationStatus.FAILED, null)); } }; } private static <O> KeyVaultLongRunningOperation transformToLongRunningOperation(O operation) { if (operation instanceof RestoreOperation) { RestoreOperation restoreOperation = (RestoreOperation) operation; return new KeyVaultRestoreOperation(restoreOperation.getStatus(), restoreOperation.getStatusDetails(), createKeyVaultErrorFromError(restoreOperation.getError()), restoreOperation.getJobId(), restoreOperation.getStartTime(), restoreOperation.getEndTime()); } else if (operation instanceof SelectiveKeyRestoreOperation) { SelectiveKeyRestoreOperation selectiveKeyRestoreOperation = (SelectiveKeyRestoreOperation) operation; return new KeyVaultRestoreOperation(selectiveKeyRestoreOperation.getStatus(), selectiveKeyRestoreOperation.getStatusDetails(), createKeyVaultErrorFromError(selectiveKeyRestoreOperation.getError()), selectiveKeyRestoreOperation.getJobId(), selectiveKeyRestoreOperation.getStartTime(), selectiveKeyRestoreOperation.getEndTime()); } else if (operation instanceof FullBackupOperation) { FullBackupOperation fullBackupOperation = (FullBackupOperation) operation; return new KeyVaultBackupOperation(fullBackupOperation.getStatus(), fullBackupOperation.getStatusDetails(), createKeyVaultErrorFromError(fullBackupOperation.getError()), fullBackupOperation.getJobId(), fullBackupOperation.getStartTime(), fullBackupOperation.getEndTime(), fullBackupOperation.getAzureStorageBlobContainerUri()); } else { throw new UnsupportedOperationException(); } } private static KeyVaultError createKeyVaultErrorFromError(Error error) { if (error == null) { return null; } return new KeyVaultError(error.getCode(), error.getMessage(), createKeyVaultErrorFromError(error.getInnerError())); } }
same null check ?
public ServiceBusSessionProcessorClientBuilder processError(Consumer<Throwable> processError) { this.processError = processError; return this; }
this.processError = processError;
public ServiceBusSessionProcessorClientBuilder processError(Consumer<Throwable> processError) { this.processError = processError; return this; }
class ServiceBusSessionProcessorClientBuilder { private final ServiceBusProcessorClientOptions processorClientOptions; private final ServiceBusSessionReceiverClientBuilder sessionReceiverClientBuilder; private Consumer<ServiceBusProcessorMessageContext> processMessage; private Consumer<Throwable> processError; private ServiceBusSessionProcessorClientBuilder() { sessionReceiverClientBuilder = new ServiceBusSessionReceiverClientBuilder(); processorClientOptions = new ServiceBusProcessorClientOptions(); sessionReceiverClientBuilder.maxConcurrentSessions(1); processorClientOptions.setMaxConcurrentCalls(1); } /** * Enables session processing roll-over by processing at most {@code maxConcurrentSessions}. * * @param maxConcurrentSessions Maximum number of concurrent sessions to process at any given time. * * @return The modified {@link ServiceBusSessionProcessorClientBuilder} object. * @throws IllegalArgumentException if {@code maxConcurrentSessions} is less than 1. */ public ServiceBusSessionProcessorClientBuilder maxConcurrentSessions(int maxConcurrentSessions) { if (maxConcurrentSessions < 1) { throw logger.logExceptionAsError( new IllegalArgumentException("'maxConcurrentSessions' cannot be less than 1")); } sessionReceiverClientBuilder.maxConcurrentSessions(maxConcurrentSessions); return this; } /** * Sets the prefetch count of the processor. For both {@link ReceiveMode * ReceiveMode * * Prefetch speeds up the message flow by aiming to have a message readily available for local retrieval when * and before the application starts the processor. * Setting a non-zero value will prefetch that number of messages. Setting the value to zero turns prefetch off. * * @param prefetchCount The prefetch count. * * @return The modified {@link ServiceBusProcessorClientBuilder} object. */ public ServiceBusSessionProcessorClientBuilder prefetchCount(int prefetchCount) { sessionReceiverClientBuilder.prefetchCount(prefetchCount); return this; } /** * Sets the name of the queue to create a processor for. * @param queueName Name of the queue. * * @return The modified {@link ServiceBusSessionProcessorClientBuilder} object. */ public ServiceBusSessionProcessorClientBuilder queueName(String queueName) { sessionReceiverClientBuilder.queueName(queueName); return this; } /** * Sets the receive mode for the processor. * @param receiveMode Mode for receiving messages. * * @return The modified {@link ServiceBusSessionProcessorClientBuilder} object. */ public ServiceBusSessionProcessorClientBuilder receiveMode(ReceiveMode receiveMode) { sessionReceiverClientBuilder.receiveMode(receiveMode); return this; } /** * Sets the name of the subscription in the topic to listen to. <b>{@link * </b> * @param subscriptionName Name of the subscription. * * @return The modified {@link ServiceBusSessionProcessorClientBuilder} object. * @see */ public ServiceBusSessionProcessorClientBuilder subscriptionName(String subscriptionName) { sessionReceiverClientBuilder.subscriptionName(subscriptionName); return this; } /** * Sets the name of the topic. <b>{@link * @param topicName Name of the topic. * * @return The modified {@link ServiceBusSessionProcessorClientBuilder} object. * @see */ public ServiceBusSessionProcessorClientBuilder topicName(String topicName) { sessionReceiverClientBuilder.topicName(topicName); return this; } /** * The message processing callback for the processor that will be executed when a message is received. * @param processMessage The message processing consumer that will be executed when a message is received. * * @return The updated {@link ServiceBusProcessorClientBuilder} object. */ public ServiceBusSessionProcessorClientBuilder processMessage( Consumer<ServiceBusProcessorMessageContext> processMessage) { this.processMessage = processMessage; return this; } /** * The error handler for the processor which will be invoked in the event of an error while receiving messages. * @param processError The error handler which will be executed when an error occurs. * * @return The updated {@link ServiceBusProcessorClientBuilder} object */ /** * Max concurrent messages that this processor should process. * * @param maxConcurrentCalls max concurrent messages that this processor should process. * * @return The updated {@link ServiceBusSessionProcessorClientBuilder} object. */ public ServiceBusSessionProcessorClientBuilder maxConcurrentCalls(int maxConcurrentCalls) { if (maxConcurrentCalls < 1) { throw logger.logExceptionAsError( new IllegalArgumentException("'maxConcurrentCalls' cannot be less than 1")); } processorClientOptions.setMaxConcurrentCalls(maxConcurrentCalls); return this; } /** * Creates a <b>session-aware</b> Service Bus processor responsible for reading * {@link ServiceBusReceivedMessage messages} from a specific queue or topic. * * @return An new {@link ServiceBusProcessorClient} that receives messages from a queue or topic. * @throws IllegalStateException if {@link * topicName} are not set or, both of these fields are set. It is also thrown if the Service Bus {@link * * {@link * * @throws IllegalArgumentException Queue or topic name are not set via {@link * queueName()} or {@link * @throws NullPointerException if the {@link * callbacks are not set. */ public ServiceBusProcessorClient buildProcessorClient() { return new ServiceBusProcessorClient(sessionReceiverClientBuilder, Objects.requireNonNull(processMessage, "'processMessage' cannot be null"), Objects.requireNonNull(processError, "'processError' cannot be null"), processorClientOptions); } }
class ServiceBusSessionProcessorClientBuilder { private final ServiceBusProcessorClientOptions processorClientOptions; private final ServiceBusSessionReceiverClientBuilder sessionReceiverClientBuilder; private Consumer<ServiceBusProcessorMessageContext> processMessage; private Consumer<Throwable> processError; private ServiceBusSessionProcessorClientBuilder() { sessionReceiverClientBuilder = new ServiceBusSessionReceiverClientBuilder(); processorClientOptions = new ServiceBusProcessorClientOptions(); sessionReceiverClientBuilder.maxConcurrentSessions(1); processorClientOptions.setMaxConcurrentCalls(1); } /** * Enables session processing roll-over by processing at most {@code maxConcurrentSessions}. * * @param maxConcurrentSessions Maximum number of concurrent sessions to process at any given time. * * @return The modified {@link ServiceBusSessionProcessorClientBuilder} object. * @throws IllegalArgumentException if {@code maxConcurrentSessions} is less than 1. */ public ServiceBusSessionProcessorClientBuilder maxConcurrentSessions(int maxConcurrentSessions) { if (maxConcurrentSessions < 1) { throw logger.logExceptionAsError( new IllegalArgumentException("'maxConcurrentSessions' cannot be less than 1")); } sessionReceiverClientBuilder.maxConcurrentSessions(maxConcurrentSessions); return this; } /** * Sets the prefetch count of the processor. For both {@link ReceiveMode * ReceiveMode * * Prefetch speeds up the message flow by aiming to have a message readily available for local retrieval when * and before the application starts the processor. * Setting a non-zero value will prefetch that number of messages. Setting the value to zero turns prefetch off. * * @param prefetchCount The prefetch count. * * @return The modified {@link ServiceBusProcessorClientBuilder} object. */ public ServiceBusSessionProcessorClientBuilder prefetchCount(int prefetchCount) { sessionReceiverClientBuilder.prefetchCount(prefetchCount); return this; } /** * Sets the name of the queue to create a processor for. * @param queueName Name of the queue. * * @return The modified {@link ServiceBusSessionProcessorClientBuilder} object. */ public ServiceBusSessionProcessorClientBuilder queueName(String queueName) { sessionReceiverClientBuilder.queueName(queueName); return this; } /** * Sets the receive mode for the processor. * @param receiveMode Mode for receiving messages. * * @return The modified {@link ServiceBusSessionProcessorClientBuilder} object. */ public ServiceBusSessionProcessorClientBuilder receiveMode(ReceiveMode receiveMode) { sessionReceiverClientBuilder.receiveMode(receiveMode); return this; } /** * Sets the name of the subscription in the topic to listen to. <b>{@link * </b> * @param subscriptionName Name of the subscription. * * @return The modified {@link ServiceBusSessionProcessorClientBuilder} object. * @see */ public ServiceBusSessionProcessorClientBuilder subscriptionName(String subscriptionName) { sessionReceiverClientBuilder.subscriptionName(subscriptionName); return this; } /** * Sets the name of the topic. <b>{@link * @param topicName Name of the topic. * * @return The modified {@link ServiceBusSessionProcessorClientBuilder} object. * @see */ public ServiceBusSessionProcessorClientBuilder topicName(String topicName) { sessionReceiverClientBuilder.topicName(topicName); return this; } /** * The message processing callback for the processor that will be executed when a message is received. * @param processMessage The message processing consumer that will be executed when a message is received. * * @return The updated {@link ServiceBusProcessorClientBuilder} object. */ public ServiceBusSessionProcessorClientBuilder processMessage( Consumer<ServiceBusProcessorMessageContext> processMessage) { this.processMessage = processMessage; return this; } /** * The error handler for the processor which will be invoked in the event of an error while receiving messages. * @param processError The error handler which will be executed when an error occurs. * * @return The updated {@link ServiceBusProcessorClientBuilder} object */ /** * Max concurrent messages that this processor should process. * * @param maxConcurrentCalls max concurrent messages that this processor should process. * * @return The updated {@link ServiceBusSessionProcessorClientBuilder} object. */ public ServiceBusSessionProcessorClientBuilder maxConcurrentCalls(int maxConcurrentCalls) { if (maxConcurrentCalls < 1) { throw logger.logExceptionAsError( new IllegalArgumentException("'maxConcurrentCalls' cannot be less than 1")); } processorClientOptions.setMaxConcurrentCalls(maxConcurrentCalls); return this; } /** * Creates a <b>session-aware</b> Service Bus processor responsible for reading * {@link ServiceBusReceivedMessage messages} from a specific queue or topic. * * @return An new {@link ServiceBusProcessorClient} that receives messages from a queue or topic. * @throws IllegalStateException if {@link * topicName} are not set or, both of these fields are set. It is also thrown if the Service Bus {@link * * {@link * * @throws IllegalArgumentException Queue or topic name are not set via {@link * queueName()} or {@link * @throws NullPointerException if the {@link * callbacks are not set. */ public ServiceBusProcessorClient buildProcessorClient() { return new ServiceBusProcessorClient(sessionReceiverClientBuilder, Objects.requireNonNull(processMessage, "'processMessage' cannot be null"), Objects.requireNonNull(processError, "'processError' cannot be null"), processorClientOptions); } }
If user's `processHandler` throw error when they want to abandon the message , are we abandoning the message ?
public void onNext(ServiceBusReceivedMessageContext serviceBusReceivedMessageContext) { if (serviceBusReceivedMessageContext.hasError()) { handleError(serviceBusReceivedMessageContext.getThrowable()); } else { try { ServiceBusProcessorMessageContext serviceBusProcessorMessageContext = new ServiceBusProcessorMessageContext(receiverClient, serviceBusReceivedMessageContext); processMessage.accept(serviceBusProcessorMessageContext); } catch (Exception exception) { handleError(exception); } } if (isRunning.get()) { logger.verbose("Requesting 1 more message from upstream"); receiverSubscription.get().request(1); } }
} catch (Exception exception) {
public void onNext(ServiceBusReceivedMessageContext serviceBusReceivedMessageContext) { if (serviceBusReceivedMessageContext.hasError()) { handleError(serviceBusReceivedMessageContext.getThrowable()); } else { try { ServiceBusProcessorMessageContext serviceBusProcessorMessageContext = new ServiceBusProcessorMessageContext(receiverClient, serviceBusReceivedMessageContext); processMessage.accept(serviceBusProcessorMessageContext); } catch (Exception ex) { handleError(ex); logger.warning("Error when processing message. Abandoning message.", ex); abandonMessage(serviceBusReceivedMessageContext, receiverClient); } } if (isRunning.get()) { logger.verbose("Requesting 1 more message from upstream"); receiverSubscription.get().request(1); } }
class ServiceBusProcessorClient { private static final int SCHEDULER_INTERVAL_IN_SECONDS = 10; private final ClientLogger logger = new ClientLogger(ServiceBusProcessorClient.class); private final ServiceBusClientBuilder.ServiceBusSessionReceiverClientBuilder sessionReceiverBuilder; private final ServiceBusClientBuilder.ServiceBusReceiverClientBuilder receiverBuilder; private final Consumer<ServiceBusProcessorMessageContext> processMessage; private final Consumer<Throwable> processError; private final ServiceBusProcessorClientOptions processorOptions; private final AtomicReference<Subscription> receiverSubscription = new AtomicReference<>(); private final AtomicReference<ServiceBusReceiverAsyncClient> asyncClient = new AtomicReference<>(); private final AtomicBoolean isRunning = new AtomicBoolean(); private ScheduledExecutorService scheduledExecutor; /** * Constructor to create a sessions-enabled processor. * * @param sessionReceiverBuilder The session processor builder to create new instances of async clients. * @param processMessage The message processing callback. * @param processError The error handler. * @param processorOptions Options to configure this instance of the processor. */ ServiceBusProcessorClient(ServiceBusClientBuilder.ServiceBusSessionReceiverClientBuilder sessionReceiverBuilder, Consumer<ServiceBusProcessorMessageContext> processMessage, Consumer<Throwable> processError, ServiceBusProcessorClientOptions processorOptions) { this.sessionReceiverBuilder = Objects.requireNonNull(sessionReceiverBuilder, "'sessionReceiverBuilder' cannot be null"); this.processMessage = Objects.requireNonNull(processMessage, "'processMessage' cannot be null"); this.processError = Objects.requireNonNull(processError, "'processError' cannot be null"); this.processorOptions = Objects.requireNonNull(processorOptions, "'processorOptions' cannot be null"); this.asyncClient.set(sessionReceiverBuilder.buildAsyncClientForProcessor()); this.receiverBuilder = null; } /** * Constructor to create a processor. * * @param receiverBuilder The processor builder to create new instances of async clients. * @param processMessage The message processing callback. * @param processError The error handler. * @param processorOptions Options to configure this instance of the processor. */ ServiceBusProcessorClient(ServiceBusClientBuilder.ServiceBusReceiverClientBuilder receiverBuilder, Consumer<ServiceBusProcessorMessageContext> processMessage, Consumer<Throwable> processError, ServiceBusProcessorClientOptions processorOptions) { this.receiverBuilder = Objects.requireNonNull(receiverBuilder, "'receiverBuilder' cannot be null"); this.processMessage = Objects.requireNonNull(processMessage, "'processMessage' cannot be null"); this.processError = Objects.requireNonNull(processError, "'processError' cannot be null"); this.processorOptions = Objects.requireNonNull(processorOptions, "'processorOptions' cannot be null"); this.asyncClient.set(receiverBuilder.buildAsyncClient()); this.sessionReceiverBuilder = null; } /** * Starts the processor in the background. When this method is called, the processor will initiate a message * receiver that will invoke the message handler when new messages are available. This method is idempotent i.e * calling {@link * after calling {@link * sessions. Calling {@link * a new set of sessions will be processed. */ public synchronized void start() { if (isRunning.getAndSet(true)) { logger.info("Processor is already running"); return; } receiveMessages(); this.scheduledExecutor = Executors.newSingleThreadScheduledExecutor(); scheduledExecutor.scheduleWithFixedDelay(() -> { if (this.asyncClient.get().isConnectionClosed()) { restartMessageReceiver(); } }, SCHEDULER_INTERVAL_IN_SECONDS, SCHEDULER_INTERVAL_IN_SECONDS, TimeUnit.SECONDS); } /** * Stops the message processing for this processor. The receiving links and sessions are kept active and this * processor can resume processing messages by calling {@link */ public synchronized void stop() { isRunning.set(false); } /** * Stops message processing and closes the processor. The receiving links and sessions are closed and calling * {@link */ public synchronized void close() { isRunning.set(false); if (receiverSubscription.get() != null) { receiverSubscription.get().cancel(); } asyncClient.get().close(); scheduledExecutor.shutdown(); } /** * Returns {@code true} if the processor is running. If the processor is stopped or closed, this method returns * {@code false}. * * @return {@code true} if the processor is running. */ public synchronized boolean isRunning() { return isRunning.get(); } private synchronized void receiveMessages() { if (receiverSubscription.get() != null) { receiverSubscription.get().request(1); return; } ServiceBusReceiverAsyncClient receiverClient = asyncClient.get(); receiverClient.receiveMessages() .parallel(processorOptions.getMaxConcurrentCalls()) .runOn(Schedulers.boundedElastic()) .subscribe(new Subscriber<ServiceBusReceivedMessageContext>() { @Override public void onSubscribe(Subscription subscription) { receiverSubscription.set(subscription); receiverSubscription.get().request(1); } @Override @Override public void onError(Throwable throwable) { logger.info("Error receiving messages.", throwable); handleError(throwable); if (isRunning.get()) { restartMessageReceiver(); } } @Override public void onComplete() { logger.info("Completed receiving messages."); if (isRunning.get()) { restartMessageReceiver(); } } }); } private void handleError(Throwable throwable) { try { processError.accept(throwable); } catch (Exception ex) { logger.verbose("Error from error handler. Ignoring error.", ex); } } private void restartMessageReceiver() { receiverSubscription.set(null); ServiceBusReceiverAsyncClient receiverClient = asyncClient.get(); receiverClient.close(); ServiceBusReceiverAsyncClient newReceiverClient = this.receiverBuilder == null ? this.sessionReceiverBuilder.buildAsyncClientForProcessor() : this.receiverBuilder.buildAsyncClient(); asyncClient.set(newReceiverClient); receiveMessages(); } }
class ServiceBusProcessorClient implements AutoCloseable { private static final int SCHEDULER_INTERVAL_IN_SECONDS = 10; private final ClientLogger logger = new ClientLogger(ServiceBusProcessorClient.class); private final ServiceBusClientBuilder.ServiceBusSessionReceiverClientBuilder sessionReceiverBuilder; private final ServiceBusClientBuilder.ServiceBusReceiverClientBuilder receiverBuilder; private final Consumer<ServiceBusProcessorMessageContext> processMessage; private final Consumer<Throwable> processError; private final ServiceBusProcessorClientOptions processorOptions; private final AtomicReference<Subscription> receiverSubscription = new AtomicReference<>(); private final AtomicReference<ServiceBusReceiverAsyncClient> asyncClient = new AtomicReference<>(); private final AtomicBoolean isRunning = new AtomicBoolean(); private ScheduledExecutorService scheduledExecutor; /** * Constructor to create a sessions-enabled processor. * * @param sessionReceiverBuilder The session processor builder to create new instances of async clients. * @param processMessage The message processing callback. * @param processError The error handler. * @param processorOptions Options to configure this instance of the processor. */ ServiceBusProcessorClient(ServiceBusClientBuilder.ServiceBusSessionReceiverClientBuilder sessionReceiverBuilder, Consumer<ServiceBusProcessorMessageContext> processMessage, Consumer<Throwable> processError, ServiceBusProcessorClientOptions processorOptions) { this.sessionReceiverBuilder = Objects.requireNonNull(sessionReceiverBuilder, "'sessionReceiverBuilder' cannot be null"); this.processMessage = Objects.requireNonNull(processMessage, "'processMessage' cannot be null"); this.processError = Objects.requireNonNull(processError, "'processError' cannot be null"); this.processorOptions = Objects.requireNonNull(processorOptions, "'processorOptions' cannot be null"); this.asyncClient.set(sessionReceiverBuilder.buildAsyncClientForProcessor()); this.receiverBuilder = null; } /** * Constructor to create a processor. * * @param receiverBuilder The processor builder to create new instances of async clients. * @param processMessage The message processing callback. * @param processError The error handler. * @param processorOptions Options to configure this instance of the processor. */ ServiceBusProcessorClient(ServiceBusClientBuilder.ServiceBusReceiverClientBuilder receiverBuilder, Consumer<ServiceBusProcessorMessageContext> processMessage, Consumer<Throwable> processError, ServiceBusProcessorClientOptions processorOptions) { this.receiverBuilder = Objects.requireNonNull(receiverBuilder, "'receiverBuilder' cannot be null"); this.processMessage = Objects.requireNonNull(processMessage, "'processMessage' cannot be null"); this.processError = Objects.requireNonNull(processError, "'processError' cannot be null"); this.processorOptions = Objects.requireNonNull(processorOptions, "'processorOptions' cannot be null"); this.asyncClient.set(receiverBuilder.buildAsyncClient()); this.sessionReceiverBuilder = null; } /** * Starts the processor in the background. When this method is called, the processor will initiate a message * receiver that will invoke the message handler when new messages are available. This method is idempotent i.e * calling {@link * after calling {@link * sessions. Calling {@link * a new set of sessions will be processed. */ public synchronized void start() { if (isRunning.getAndSet(true)) { logger.info("Processor is already running"); return; } receiveMessages(); this.scheduledExecutor = Executors.newSingleThreadScheduledExecutor(); scheduledExecutor.scheduleWithFixedDelay(() -> { if (this.asyncClient.get().isConnectionClosed()) { restartMessageReceiver(); } }, SCHEDULER_INTERVAL_IN_SECONDS, SCHEDULER_INTERVAL_IN_SECONDS, TimeUnit.SECONDS); } /** * Stops the message processing for this processor. The receiving links and sessions are kept active and this * processor can resume processing messages by calling {@link */ public synchronized void stop() { isRunning.set(false); } /** * Stops message processing and closes the processor. The receiving links and sessions are closed and calling * {@link */ @Override public synchronized void close() { isRunning.set(false); if (receiverSubscription.get() != null) { receiverSubscription.get().cancel(); } asyncClient.get().close(); scheduledExecutor.shutdown(); } /** * Returns {@code true} if the processor is running. If the processor is stopped or closed, this method returns * {@code false}. * * @return {@code true} if the processor is running. */ public synchronized boolean isRunning() { return isRunning.get(); } private synchronized void receiveMessages() { if (receiverSubscription.get() != null) { receiverSubscription.get().request(1); return; } ServiceBusReceiverAsyncClient receiverClient = asyncClient.get(); receiverClient.receiveMessages() .parallel(processorOptions.getMaxConcurrentCalls()) .runOn(Schedulers.boundedElastic()) .subscribe(new Subscriber<ServiceBusReceivedMessageContext>() { @Override public void onSubscribe(Subscription subscription) { receiverSubscription.set(subscription); receiverSubscription.get().request(1); } @Override @Override public void onError(Throwable throwable) { logger.info("Error receiving messages.", throwable); handleError(throwable); if (isRunning.get()) { restartMessageReceiver(); } } @Override public void onComplete() { logger.info("Completed receiving messages."); if (isRunning.get()) { restartMessageReceiver(); } } }); } private void abandonMessage(ServiceBusReceivedMessageContext serviceBusReceivedMessageContext, ServiceBusReceiverAsyncClient receiverClient) { try { receiverClient.abandon(serviceBusReceivedMessageContext.getMessage()).block(); } catch (Exception exception) { logger.verbose("Failed to abandon message", exception); } } private void handleError(Throwable throwable) { try { processError.accept(throwable); } catch (Exception ex) { logger.verbose("Error from error handler. Ignoring error.", ex); } } private void restartMessageReceiver() { receiverSubscription.set(null); ServiceBusReceiverAsyncClient receiverClient = asyncClient.get(); receiverClient.close(); ServiceBusReceiverAsyncClient newReceiverClient = this.receiverBuilder == null ? this.sessionReceiverBuilder.buildAsyncClientForProcessor() : this.receiverBuilder.buildAsyncClient(); asyncClient.set(newReceiverClient); receiveMessages(); } }
I wrote a code snippet to test it. It doesn't. But It does throw stackoverflow if I comment out two lines (parallel and runOn). You're safe because you use multiple threads. ```java import reactor.core.publisher.Flux; import reactor.core.scheduler.Schedulers; import java.io.IOException; public class Main { public static void foo() { Flux.just(1, 2) .parallel(2) .runOn(Schedulers.boundedElastic()) .subscribe( System.out::println, System.out::println, Main::foo2 ); } public static void foo2() { foo(); } public static void main(String[] args) throws IOException { foo(); System.in.read(); } } ```
public void onComplete() { logger.info("Completed receiving messages."); if (isRunning.get()) { restartMessageReceiver(); } }
restartMessageReceiver();
public void onComplete() { logger.info("Completed receiving messages."); if (isRunning.get()) { restartMessageReceiver(); } }
class ServiceBusProcessorClient { private static final int SCHEDULER_INTERVAL_IN_SECONDS = 10; private final ClientLogger logger = new ClientLogger(ServiceBusProcessorClient.class); private final ServiceBusClientBuilder.ServiceBusSessionReceiverClientBuilder sessionReceiverBuilder; private final ServiceBusClientBuilder.ServiceBusReceiverClientBuilder receiverBuilder; private final Consumer<ServiceBusProcessorMessageContext> processMessage; private final Consumer<Throwable> processError; private final ServiceBusProcessorClientOptions processorOptions; private final AtomicReference<Subscription> receiverSubscription = new AtomicReference<>(); private final AtomicReference<ServiceBusReceiverAsyncClient> asyncClient = new AtomicReference<>(); private final AtomicBoolean isRunning = new AtomicBoolean(); private ScheduledExecutorService scheduledExecutor; /** * Constructor to create a sessions-enabled processor. * * @param sessionReceiverBuilder The session processor builder to create new instances of async clients. * @param processMessage The message processing callback. * @param processError The error handler. * @param processorOptions Options to configure this instance of the processor. */ ServiceBusProcessorClient(ServiceBusClientBuilder.ServiceBusSessionReceiverClientBuilder sessionReceiverBuilder, Consumer<ServiceBusProcessorMessageContext> processMessage, Consumer<Throwable> processError, ServiceBusProcessorClientOptions processorOptions) { this.sessionReceiverBuilder = Objects.requireNonNull(sessionReceiverBuilder, "'sessionReceiverBuilder' cannot be null"); this.processMessage = Objects.requireNonNull(processMessage, "'processMessage' cannot be null"); this.processError = Objects.requireNonNull(processError, "'processError' cannot be null"); this.processorOptions = Objects.requireNonNull(processorOptions, "'processorOptions' cannot be null"); this.asyncClient.set(sessionReceiverBuilder.buildAsyncClientForProcessor()); this.receiverBuilder = null; } /** * Constructor to create a processor. * * @param receiverBuilder The processor builder to create new instances of async clients. * @param processMessage The message processing callback. * @param processError The error handler. * @param processorOptions Options to configure this instance of the processor. */ ServiceBusProcessorClient(ServiceBusClientBuilder.ServiceBusReceiverClientBuilder receiverBuilder, Consumer<ServiceBusProcessorMessageContext> processMessage, Consumer<Throwable> processError, ServiceBusProcessorClientOptions processorOptions) { this.receiverBuilder = Objects.requireNonNull(receiverBuilder, "'receiverBuilder' cannot be null"); this.processMessage = Objects.requireNonNull(processMessage, "'processMessage' cannot be null"); this.processError = Objects.requireNonNull(processError, "'processError' cannot be null"); this.processorOptions = Objects.requireNonNull(processorOptions, "'processorOptions' cannot be null"); this.asyncClient.set(receiverBuilder.buildAsyncClient()); this.sessionReceiverBuilder = null; } /** * Starts the processor in the background. When this method is called, the processor will initiate a message * receiver that will invoke the message handler when new messages are available. This method is idempotent i.e * calling {@link * after calling {@link * sessions. Calling {@link * a new set of sessions will be processed. */ public synchronized void start() { if (isRunning.getAndSet(true)) { logger.info("Processor is already running"); return; } receiveMessages(); this.scheduledExecutor = Executors.newSingleThreadScheduledExecutor(); scheduledExecutor.scheduleWithFixedDelay(() -> { if (this.asyncClient.get().isConnectionClosed()) { restartMessageReceiver(); } }, SCHEDULER_INTERVAL_IN_SECONDS, SCHEDULER_INTERVAL_IN_SECONDS, TimeUnit.SECONDS); } /** * Stops the message processing for this processor. The receiving links and sessions are kept active and this * processor can resume processing messages by calling {@link */ public synchronized void stop() { isRunning.set(false); } /** * Stops message processing and closes the processor. The receiving links and sessions are closed and calling * {@link */ public synchronized void close() { isRunning.set(false); if (receiverSubscription.get() != null) { receiverSubscription.get().cancel(); } asyncClient.get().close(); scheduledExecutor.shutdown(); } /** * Returns {@code true} if the processor is running. If the processor is stopped or closed, this method returns * {@code false}. * * @return {@code true} if the processor is running. */ public synchronized boolean isRunning() { return isRunning.get(); } private synchronized void receiveMessages() { if (receiverSubscription.get() != null) { receiverSubscription.get().request(1); return; } ServiceBusReceiverAsyncClient receiverClient = asyncClient.get(); receiverClient.receiveMessages() .parallel(processorOptions.getMaxConcurrentCalls()) .runOn(Schedulers.boundedElastic()) .subscribe(new Subscriber<ServiceBusReceivedMessageContext>() { @Override public void onSubscribe(Subscription subscription) { receiverSubscription.set(subscription); receiverSubscription.get().request(1); } @Override public void onNext(ServiceBusReceivedMessageContext serviceBusReceivedMessageContext) { if (serviceBusReceivedMessageContext.hasError()) { handleError(serviceBusReceivedMessageContext.getThrowable()); } else { try { ServiceBusProcessorMessageContext serviceBusProcessorMessageContext = new ServiceBusProcessorMessageContext(receiverClient, serviceBusReceivedMessageContext); processMessage.accept(serviceBusProcessorMessageContext); } catch (Exception exception) { handleError(exception); } } if (isRunning.get()) { logger.verbose("Requesting 1 more message from upstream"); receiverSubscription.get().request(1); } } @Override public void onError(Throwable throwable) { logger.info("Error receiving messages.", throwable); handleError(throwable); if (isRunning.get()) { restartMessageReceiver(); } } @Override }); } private void handleError(Throwable throwable) { try { processError.accept(throwable); } catch (Exception ex) { logger.verbose("Error from error handler. Ignoring error.", ex); } } private void restartMessageReceiver() { receiverSubscription.set(null); ServiceBusReceiverAsyncClient receiverClient = asyncClient.get(); receiverClient.close(); ServiceBusReceiverAsyncClient newReceiverClient = this.receiverBuilder == null ? this.sessionReceiverBuilder.buildAsyncClientForProcessor() : this.receiverBuilder.buildAsyncClient(); asyncClient.set(newReceiverClient); receiveMessages(); } }
class ServiceBusProcessorClient implements AutoCloseable { private static final int SCHEDULER_INTERVAL_IN_SECONDS = 10; private final ClientLogger logger = new ClientLogger(ServiceBusProcessorClient.class); private final ServiceBusClientBuilder.ServiceBusSessionReceiverClientBuilder sessionReceiverBuilder; private final ServiceBusClientBuilder.ServiceBusReceiverClientBuilder receiverBuilder; private final Consumer<ServiceBusProcessorMessageContext> processMessage; private final Consumer<Throwable> processError; private final ServiceBusProcessorClientOptions processorOptions; private final AtomicReference<Subscription> receiverSubscription = new AtomicReference<>(); private final AtomicReference<ServiceBusReceiverAsyncClient> asyncClient = new AtomicReference<>(); private final AtomicBoolean isRunning = new AtomicBoolean(); private ScheduledExecutorService scheduledExecutor; /** * Constructor to create a sessions-enabled processor. * * @param sessionReceiverBuilder The session processor builder to create new instances of async clients. * @param processMessage The message processing callback. * @param processError The error handler. * @param processorOptions Options to configure this instance of the processor. */ ServiceBusProcessorClient(ServiceBusClientBuilder.ServiceBusSessionReceiverClientBuilder sessionReceiverBuilder, Consumer<ServiceBusProcessorMessageContext> processMessage, Consumer<Throwable> processError, ServiceBusProcessorClientOptions processorOptions) { this.sessionReceiverBuilder = Objects.requireNonNull(sessionReceiverBuilder, "'sessionReceiverBuilder' cannot be null"); this.processMessage = Objects.requireNonNull(processMessage, "'processMessage' cannot be null"); this.processError = Objects.requireNonNull(processError, "'processError' cannot be null"); this.processorOptions = Objects.requireNonNull(processorOptions, "'processorOptions' cannot be null"); this.asyncClient.set(sessionReceiverBuilder.buildAsyncClientForProcessor()); this.receiverBuilder = null; } /** * Constructor to create a processor. * * @param receiverBuilder The processor builder to create new instances of async clients. * @param processMessage The message processing callback. * @param processError The error handler. * @param processorOptions Options to configure this instance of the processor. */ ServiceBusProcessorClient(ServiceBusClientBuilder.ServiceBusReceiverClientBuilder receiverBuilder, Consumer<ServiceBusProcessorMessageContext> processMessage, Consumer<Throwable> processError, ServiceBusProcessorClientOptions processorOptions) { this.receiverBuilder = Objects.requireNonNull(receiverBuilder, "'receiverBuilder' cannot be null"); this.processMessage = Objects.requireNonNull(processMessage, "'processMessage' cannot be null"); this.processError = Objects.requireNonNull(processError, "'processError' cannot be null"); this.processorOptions = Objects.requireNonNull(processorOptions, "'processorOptions' cannot be null"); this.asyncClient.set(receiverBuilder.buildAsyncClient()); this.sessionReceiverBuilder = null; } /** * Starts the processor in the background. When this method is called, the processor will initiate a message * receiver that will invoke the message handler when new messages are available. This method is idempotent i.e * calling {@link * after calling {@link * sessions. Calling {@link * a new set of sessions will be processed. */ public synchronized void start() { if (isRunning.getAndSet(true)) { logger.info("Processor is already running"); return; } receiveMessages(); this.scheduledExecutor = Executors.newSingleThreadScheduledExecutor(); scheduledExecutor.scheduleWithFixedDelay(() -> { if (this.asyncClient.get().isConnectionClosed()) { restartMessageReceiver(); } }, SCHEDULER_INTERVAL_IN_SECONDS, SCHEDULER_INTERVAL_IN_SECONDS, TimeUnit.SECONDS); } /** * Stops the message processing for this processor. The receiving links and sessions are kept active and this * processor can resume processing messages by calling {@link */ public synchronized void stop() { isRunning.set(false); } /** * Stops message processing and closes the processor. The receiving links and sessions are closed and calling * {@link */ @Override public synchronized void close() { isRunning.set(false); if (receiverSubscription.get() != null) { receiverSubscription.get().cancel(); } asyncClient.get().close(); scheduledExecutor.shutdown(); } /** * Returns {@code true} if the processor is running. If the processor is stopped or closed, this method returns * {@code false}. * * @return {@code true} if the processor is running. */ public synchronized boolean isRunning() { return isRunning.get(); } private synchronized void receiveMessages() { if (receiverSubscription.get() != null) { receiverSubscription.get().request(1); return; } ServiceBusReceiverAsyncClient receiverClient = asyncClient.get(); receiverClient.receiveMessages() .parallel(processorOptions.getMaxConcurrentCalls()) .runOn(Schedulers.boundedElastic()) .subscribe(new Subscriber<ServiceBusReceivedMessageContext>() { @Override public void onSubscribe(Subscription subscription) { receiverSubscription.set(subscription); receiverSubscription.get().request(1); } @Override public void onNext(ServiceBusReceivedMessageContext serviceBusReceivedMessageContext) { if (serviceBusReceivedMessageContext.hasError()) { handleError(serviceBusReceivedMessageContext.getThrowable()); } else { try { ServiceBusProcessorMessageContext serviceBusProcessorMessageContext = new ServiceBusProcessorMessageContext(receiverClient, serviceBusReceivedMessageContext); processMessage.accept(serviceBusProcessorMessageContext); } catch (Exception ex) { handleError(ex); logger.warning("Error when processing message. Abandoning message.", ex); abandonMessage(serviceBusReceivedMessageContext, receiverClient); } } if (isRunning.get()) { logger.verbose("Requesting 1 more message from upstream"); receiverSubscription.get().request(1); } } @Override public void onError(Throwable throwable) { logger.info("Error receiving messages.", throwable); handleError(throwable); if (isRunning.get()) { restartMessageReceiver(); } } @Override }); } private void abandonMessage(ServiceBusReceivedMessageContext serviceBusReceivedMessageContext, ServiceBusReceiverAsyncClient receiverClient) { try { receiverClient.abandon(serviceBusReceivedMessageContext.getMessage()).block(); } catch (Exception exception) { logger.verbose("Failed to abandon message", exception); } } private void handleError(Throwable throwable) { try { processError.accept(throwable); } catch (Exception ex) { logger.verbose("Error from error handler. Ignoring error.", ex); } } private void restartMessageReceiver() { receiverSubscription.set(null); ServiceBusReceiverAsyncClient receiverClient = asyncClient.get(); receiverClient.close(); ServiceBusReceiverAsyncClient newReceiverClient = this.receiverBuilder == null ? this.sessionReceiverBuilder.buildAsyncClientForProcessor() : this.receiverBuilder.buildAsyncClient(); asyncClient.set(newReceiverClient); receiveMessages(); } }
Yeah, this will run on a different thread pool and shouldn't cause stackoverflow.
public void onComplete() { logger.info("Completed receiving messages."); if (isRunning.get()) { restartMessageReceiver(); } }
restartMessageReceiver();
public void onComplete() { logger.info("Completed receiving messages."); if (isRunning.get()) { restartMessageReceiver(); } }
class ServiceBusProcessorClient { private static final int SCHEDULER_INTERVAL_IN_SECONDS = 10; private final ClientLogger logger = new ClientLogger(ServiceBusProcessorClient.class); private final ServiceBusClientBuilder.ServiceBusSessionReceiverClientBuilder sessionReceiverBuilder; private final ServiceBusClientBuilder.ServiceBusReceiverClientBuilder receiverBuilder; private final Consumer<ServiceBusProcessorMessageContext> processMessage; private final Consumer<Throwable> processError; private final ServiceBusProcessorClientOptions processorOptions; private final AtomicReference<Subscription> receiverSubscription = new AtomicReference<>(); private final AtomicReference<ServiceBusReceiverAsyncClient> asyncClient = new AtomicReference<>(); private final AtomicBoolean isRunning = new AtomicBoolean(); private ScheduledExecutorService scheduledExecutor; /** * Constructor to create a sessions-enabled processor. * * @param sessionReceiverBuilder The session processor builder to create new instances of async clients. * @param processMessage The message processing callback. * @param processError The error handler. * @param processorOptions Options to configure this instance of the processor. */ ServiceBusProcessorClient(ServiceBusClientBuilder.ServiceBusSessionReceiverClientBuilder sessionReceiverBuilder, Consumer<ServiceBusProcessorMessageContext> processMessage, Consumer<Throwable> processError, ServiceBusProcessorClientOptions processorOptions) { this.sessionReceiverBuilder = Objects.requireNonNull(sessionReceiverBuilder, "'sessionReceiverBuilder' cannot be null"); this.processMessage = Objects.requireNonNull(processMessage, "'processMessage' cannot be null"); this.processError = Objects.requireNonNull(processError, "'processError' cannot be null"); this.processorOptions = Objects.requireNonNull(processorOptions, "'processorOptions' cannot be null"); this.asyncClient.set(sessionReceiverBuilder.buildAsyncClientForProcessor()); this.receiverBuilder = null; } /** * Constructor to create a processor. * * @param receiverBuilder The processor builder to create new instances of async clients. * @param processMessage The message processing callback. * @param processError The error handler. * @param processorOptions Options to configure this instance of the processor. */ ServiceBusProcessorClient(ServiceBusClientBuilder.ServiceBusReceiverClientBuilder receiverBuilder, Consumer<ServiceBusProcessorMessageContext> processMessage, Consumer<Throwable> processError, ServiceBusProcessorClientOptions processorOptions) { this.receiverBuilder = Objects.requireNonNull(receiverBuilder, "'receiverBuilder' cannot be null"); this.processMessage = Objects.requireNonNull(processMessage, "'processMessage' cannot be null"); this.processError = Objects.requireNonNull(processError, "'processError' cannot be null"); this.processorOptions = Objects.requireNonNull(processorOptions, "'processorOptions' cannot be null"); this.asyncClient.set(receiverBuilder.buildAsyncClient()); this.sessionReceiverBuilder = null; } /** * Starts the processor in the background. When this method is called, the processor will initiate a message * receiver that will invoke the message handler when new messages are available. This method is idempotent i.e * calling {@link * after calling {@link * sessions. Calling {@link * a new set of sessions will be processed. */ public synchronized void start() { if (isRunning.getAndSet(true)) { logger.info("Processor is already running"); return; } receiveMessages(); this.scheduledExecutor = Executors.newSingleThreadScheduledExecutor(); scheduledExecutor.scheduleWithFixedDelay(() -> { if (this.asyncClient.get().isConnectionClosed()) { restartMessageReceiver(); } }, SCHEDULER_INTERVAL_IN_SECONDS, SCHEDULER_INTERVAL_IN_SECONDS, TimeUnit.SECONDS); } /** * Stops the message processing for this processor. The receiving links and sessions are kept active and this * processor can resume processing messages by calling {@link */ public synchronized void stop() { isRunning.set(false); } /** * Stops message processing and closes the processor. The receiving links and sessions are closed and calling * {@link */ public synchronized void close() { isRunning.set(false); if (receiverSubscription.get() != null) { receiverSubscription.get().cancel(); } asyncClient.get().close(); scheduledExecutor.shutdown(); } /** * Returns {@code true} if the processor is running. If the processor is stopped or closed, this method returns * {@code false}. * * @return {@code true} if the processor is running. */ public synchronized boolean isRunning() { return isRunning.get(); } private synchronized void receiveMessages() { if (receiverSubscription.get() != null) { receiverSubscription.get().request(1); return; } ServiceBusReceiverAsyncClient receiverClient = asyncClient.get(); receiverClient.receiveMessages() .parallel(processorOptions.getMaxConcurrentCalls()) .runOn(Schedulers.boundedElastic()) .subscribe(new Subscriber<ServiceBusReceivedMessageContext>() { @Override public void onSubscribe(Subscription subscription) { receiverSubscription.set(subscription); receiverSubscription.get().request(1); } @Override public void onNext(ServiceBusReceivedMessageContext serviceBusReceivedMessageContext) { if (serviceBusReceivedMessageContext.hasError()) { handleError(serviceBusReceivedMessageContext.getThrowable()); } else { try { ServiceBusProcessorMessageContext serviceBusProcessorMessageContext = new ServiceBusProcessorMessageContext(receiverClient, serviceBusReceivedMessageContext); processMessage.accept(serviceBusProcessorMessageContext); } catch (Exception exception) { handleError(exception); } } if (isRunning.get()) { logger.verbose("Requesting 1 more message from upstream"); receiverSubscription.get().request(1); } } @Override public void onError(Throwable throwable) { logger.info("Error receiving messages.", throwable); handleError(throwable); if (isRunning.get()) { restartMessageReceiver(); } } @Override }); } private void handleError(Throwable throwable) { try { processError.accept(throwable); } catch (Exception ex) { logger.verbose("Error from error handler. Ignoring error.", ex); } } private void restartMessageReceiver() { receiverSubscription.set(null); ServiceBusReceiverAsyncClient receiverClient = asyncClient.get(); receiverClient.close(); ServiceBusReceiverAsyncClient newReceiverClient = this.receiverBuilder == null ? this.sessionReceiverBuilder.buildAsyncClientForProcessor() : this.receiverBuilder.buildAsyncClient(); asyncClient.set(newReceiverClient); receiveMessages(); } }
class ServiceBusProcessorClient implements AutoCloseable { private static final int SCHEDULER_INTERVAL_IN_SECONDS = 10; private final ClientLogger logger = new ClientLogger(ServiceBusProcessorClient.class); private final ServiceBusClientBuilder.ServiceBusSessionReceiverClientBuilder sessionReceiverBuilder; private final ServiceBusClientBuilder.ServiceBusReceiverClientBuilder receiverBuilder; private final Consumer<ServiceBusProcessorMessageContext> processMessage; private final Consumer<Throwable> processError; private final ServiceBusProcessorClientOptions processorOptions; private final AtomicReference<Subscription> receiverSubscription = new AtomicReference<>(); private final AtomicReference<ServiceBusReceiverAsyncClient> asyncClient = new AtomicReference<>(); private final AtomicBoolean isRunning = new AtomicBoolean(); private ScheduledExecutorService scheduledExecutor; /** * Constructor to create a sessions-enabled processor. * * @param sessionReceiverBuilder The session processor builder to create new instances of async clients. * @param processMessage The message processing callback. * @param processError The error handler. * @param processorOptions Options to configure this instance of the processor. */ ServiceBusProcessorClient(ServiceBusClientBuilder.ServiceBusSessionReceiverClientBuilder sessionReceiverBuilder, Consumer<ServiceBusProcessorMessageContext> processMessage, Consumer<Throwable> processError, ServiceBusProcessorClientOptions processorOptions) { this.sessionReceiverBuilder = Objects.requireNonNull(sessionReceiverBuilder, "'sessionReceiverBuilder' cannot be null"); this.processMessage = Objects.requireNonNull(processMessage, "'processMessage' cannot be null"); this.processError = Objects.requireNonNull(processError, "'processError' cannot be null"); this.processorOptions = Objects.requireNonNull(processorOptions, "'processorOptions' cannot be null"); this.asyncClient.set(sessionReceiverBuilder.buildAsyncClientForProcessor()); this.receiverBuilder = null; } /** * Constructor to create a processor. * * @param receiverBuilder The processor builder to create new instances of async clients. * @param processMessage The message processing callback. * @param processError The error handler. * @param processorOptions Options to configure this instance of the processor. */ ServiceBusProcessorClient(ServiceBusClientBuilder.ServiceBusReceiverClientBuilder receiverBuilder, Consumer<ServiceBusProcessorMessageContext> processMessage, Consumer<Throwable> processError, ServiceBusProcessorClientOptions processorOptions) { this.receiverBuilder = Objects.requireNonNull(receiverBuilder, "'receiverBuilder' cannot be null"); this.processMessage = Objects.requireNonNull(processMessage, "'processMessage' cannot be null"); this.processError = Objects.requireNonNull(processError, "'processError' cannot be null"); this.processorOptions = Objects.requireNonNull(processorOptions, "'processorOptions' cannot be null"); this.asyncClient.set(receiverBuilder.buildAsyncClient()); this.sessionReceiverBuilder = null; } /** * Starts the processor in the background. When this method is called, the processor will initiate a message * receiver that will invoke the message handler when new messages are available. This method is idempotent i.e * calling {@link * after calling {@link * sessions. Calling {@link * a new set of sessions will be processed. */ public synchronized void start() { if (isRunning.getAndSet(true)) { logger.info("Processor is already running"); return; } receiveMessages(); this.scheduledExecutor = Executors.newSingleThreadScheduledExecutor(); scheduledExecutor.scheduleWithFixedDelay(() -> { if (this.asyncClient.get().isConnectionClosed()) { restartMessageReceiver(); } }, SCHEDULER_INTERVAL_IN_SECONDS, SCHEDULER_INTERVAL_IN_SECONDS, TimeUnit.SECONDS); } /** * Stops the message processing for this processor. The receiving links and sessions are kept active and this * processor can resume processing messages by calling {@link */ public synchronized void stop() { isRunning.set(false); } /** * Stops message processing and closes the processor. The receiving links and sessions are closed and calling * {@link */ @Override public synchronized void close() { isRunning.set(false); if (receiverSubscription.get() != null) { receiverSubscription.get().cancel(); } asyncClient.get().close(); scheduledExecutor.shutdown(); } /** * Returns {@code true} if the processor is running. If the processor is stopped or closed, this method returns * {@code false}. * * @return {@code true} if the processor is running. */ public synchronized boolean isRunning() { return isRunning.get(); } private synchronized void receiveMessages() { if (receiverSubscription.get() != null) { receiverSubscription.get().request(1); return; } ServiceBusReceiverAsyncClient receiverClient = asyncClient.get(); receiverClient.receiveMessages() .parallel(processorOptions.getMaxConcurrentCalls()) .runOn(Schedulers.boundedElastic()) .subscribe(new Subscriber<ServiceBusReceivedMessageContext>() { @Override public void onSubscribe(Subscription subscription) { receiverSubscription.set(subscription); receiverSubscription.get().request(1); } @Override public void onNext(ServiceBusReceivedMessageContext serviceBusReceivedMessageContext) { if (serviceBusReceivedMessageContext.hasError()) { handleError(serviceBusReceivedMessageContext.getThrowable()); } else { try { ServiceBusProcessorMessageContext serviceBusProcessorMessageContext = new ServiceBusProcessorMessageContext(receiverClient, serviceBusReceivedMessageContext); processMessage.accept(serviceBusProcessorMessageContext); } catch (Exception ex) { handleError(ex); logger.warning("Error when processing message. Abandoning message.", ex); abandonMessage(serviceBusReceivedMessageContext, receiverClient); } } if (isRunning.get()) { logger.verbose("Requesting 1 more message from upstream"); receiverSubscription.get().request(1); } } @Override public void onError(Throwable throwable) { logger.info("Error receiving messages.", throwable); handleError(throwable); if (isRunning.get()) { restartMessageReceiver(); } } @Override }); } private void abandonMessage(ServiceBusReceivedMessageContext serviceBusReceivedMessageContext, ServiceBusReceiverAsyncClient receiverClient) { try { receiverClient.abandon(serviceBusReceivedMessageContext.getMessage()).block(); } catch (Exception exception) { logger.verbose("Failed to abandon message", exception); } } private void handleError(Throwable throwable) { try { processError.accept(throwable); } catch (Exception ex) { logger.verbose("Error from error handler. Ignoring error.", ex); } } private void restartMessageReceiver() { receiverSubscription.set(null); ServiceBusReceiverAsyncClient receiverClient = asyncClient.get(); receiverClient.close(); ServiceBusReceiverAsyncClient newReceiverClient = this.receiverBuilder == null ? this.sessionReceiverBuilder.buildAsyncClientForProcessor() : this.receiverBuilder.buildAsyncClient(); asyncClient.set(newReceiverClient); receiveMessages(); } }
Yes, we should abandon the message if user's message handler throws an exception. Updated to abandon the message.
public void onNext(ServiceBusReceivedMessageContext serviceBusReceivedMessageContext) { if (serviceBusReceivedMessageContext.hasError()) { handleError(serviceBusReceivedMessageContext.getThrowable()); } else { try { ServiceBusProcessorMessageContext serviceBusProcessorMessageContext = new ServiceBusProcessorMessageContext(receiverClient, serviceBusReceivedMessageContext); processMessage.accept(serviceBusProcessorMessageContext); } catch (Exception exception) { handleError(exception); } } if (isRunning.get()) { logger.verbose("Requesting 1 more message from upstream"); receiverSubscription.get().request(1); } }
} catch (Exception exception) {
public void onNext(ServiceBusReceivedMessageContext serviceBusReceivedMessageContext) { if (serviceBusReceivedMessageContext.hasError()) { handleError(serviceBusReceivedMessageContext.getThrowable()); } else { try { ServiceBusProcessorMessageContext serviceBusProcessorMessageContext = new ServiceBusProcessorMessageContext(receiverClient, serviceBusReceivedMessageContext); processMessage.accept(serviceBusProcessorMessageContext); } catch (Exception ex) { handleError(ex); logger.warning("Error when processing message. Abandoning message.", ex); abandonMessage(serviceBusReceivedMessageContext, receiverClient); } } if (isRunning.get()) { logger.verbose("Requesting 1 more message from upstream"); receiverSubscription.get().request(1); } }
class ServiceBusProcessorClient { private static final int SCHEDULER_INTERVAL_IN_SECONDS = 10; private final ClientLogger logger = new ClientLogger(ServiceBusProcessorClient.class); private final ServiceBusClientBuilder.ServiceBusSessionReceiverClientBuilder sessionReceiverBuilder; private final ServiceBusClientBuilder.ServiceBusReceiverClientBuilder receiverBuilder; private final Consumer<ServiceBusProcessorMessageContext> processMessage; private final Consumer<Throwable> processError; private final ServiceBusProcessorClientOptions processorOptions; private final AtomicReference<Subscription> receiverSubscription = new AtomicReference<>(); private final AtomicReference<ServiceBusReceiverAsyncClient> asyncClient = new AtomicReference<>(); private final AtomicBoolean isRunning = new AtomicBoolean(); private ScheduledExecutorService scheduledExecutor; /** * Constructor to create a sessions-enabled processor. * * @param sessionReceiverBuilder The session processor builder to create new instances of async clients. * @param processMessage The message processing callback. * @param processError The error handler. * @param processorOptions Options to configure this instance of the processor. */ ServiceBusProcessorClient(ServiceBusClientBuilder.ServiceBusSessionReceiverClientBuilder sessionReceiverBuilder, Consumer<ServiceBusProcessorMessageContext> processMessage, Consumer<Throwable> processError, ServiceBusProcessorClientOptions processorOptions) { this.sessionReceiverBuilder = Objects.requireNonNull(sessionReceiverBuilder, "'sessionReceiverBuilder' cannot be null"); this.processMessage = Objects.requireNonNull(processMessage, "'processMessage' cannot be null"); this.processError = Objects.requireNonNull(processError, "'processError' cannot be null"); this.processorOptions = Objects.requireNonNull(processorOptions, "'processorOptions' cannot be null"); this.asyncClient.set(sessionReceiverBuilder.buildAsyncClientForProcessor()); this.receiverBuilder = null; } /** * Constructor to create a processor. * * @param receiverBuilder The processor builder to create new instances of async clients. * @param processMessage The message processing callback. * @param processError The error handler. * @param processorOptions Options to configure this instance of the processor. */ ServiceBusProcessorClient(ServiceBusClientBuilder.ServiceBusReceiverClientBuilder receiverBuilder, Consumer<ServiceBusProcessorMessageContext> processMessage, Consumer<Throwable> processError, ServiceBusProcessorClientOptions processorOptions) { this.receiverBuilder = Objects.requireNonNull(receiverBuilder, "'receiverBuilder' cannot be null"); this.processMessage = Objects.requireNonNull(processMessage, "'processMessage' cannot be null"); this.processError = Objects.requireNonNull(processError, "'processError' cannot be null"); this.processorOptions = Objects.requireNonNull(processorOptions, "'processorOptions' cannot be null"); this.asyncClient.set(receiverBuilder.buildAsyncClient()); this.sessionReceiverBuilder = null; } /** * Starts the processor in the background. When this method is called, the processor will initiate a message * receiver that will invoke the message handler when new messages are available. This method is idempotent i.e * calling {@link * after calling {@link * sessions. Calling {@link * a new set of sessions will be processed. */ public synchronized void start() { if (isRunning.getAndSet(true)) { logger.info("Processor is already running"); return; } receiveMessages(); this.scheduledExecutor = Executors.newSingleThreadScheduledExecutor(); scheduledExecutor.scheduleWithFixedDelay(() -> { if (this.asyncClient.get().isConnectionClosed()) { restartMessageReceiver(); } }, SCHEDULER_INTERVAL_IN_SECONDS, SCHEDULER_INTERVAL_IN_SECONDS, TimeUnit.SECONDS); } /** * Stops the message processing for this processor. The receiving links and sessions are kept active and this * processor can resume processing messages by calling {@link */ public synchronized void stop() { isRunning.set(false); } /** * Stops message processing and closes the processor. The receiving links and sessions are closed and calling * {@link */ public synchronized void close() { isRunning.set(false); if (receiverSubscription.get() != null) { receiverSubscription.get().cancel(); } asyncClient.get().close(); scheduledExecutor.shutdown(); } /** * Returns {@code true} if the processor is running. If the processor is stopped or closed, this method returns * {@code false}. * * @return {@code true} if the processor is running. */ public synchronized boolean isRunning() { return isRunning.get(); } private synchronized void receiveMessages() { if (receiverSubscription.get() != null) { receiverSubscription.get().request(1); return; } ServiceBusReceiverAsyncClient receiverClient = asyncClient.get(); receiverClient.receiveMessages() .parallel(processorOptions.getMaxConcurrentCalls()) .runOn(Schedulers.boundedElastic()) .subscribe(new Subscriber<ServiceBusReceivedMessageContext>() { @Override public void onSubscribe(Subscription subscription) { receiverSubscription.set(subscription); receiverSubscription.get().request(1); } @Override @Override public void onError(Throwable throwable) { logger.info("Error receiving messages.", throwable); handleError(throwable); if (isRunning.get()) { restartMessageReceiver(); } } @Override public void onComplete() { logger.info("Completed receiving messages."); if (isRunning.get()) { restartMessageReceiver(); } } }); } private void handleError(Throwable throwable) { try { processError.accept(throwable); } catch (Exception ex) { logger.verbose("Error from error handler. Ignoring error.", ex); } } private void restartMessageReceiver() { receiverSubscription.set(null); ServiceBusReceiverAsyncClient receiverClient = asyncClient.get(); receiverClient.close(); ServiceBusReceiverAsyncClient newReceiverClient = this.receiverBuilder == null ? this.sessionReceiverBuilder.buildAsyncClientForProcessor() : this.receiverBuilder.buildAsyncClient(); asyncClient.set(newReceiverClient); receiveMessages(); } }
class ServiceBusProcessorClient implements AutoCloseable { private static final int SCHEDULER_INTERVAL_IN_SECONDS = 10; private final ClientLogger logger = new ClientLogger(ServiceBusProcessorClient.class); private final ServiceBusClientBuilder.ServiceBusSessionReceiverClientBuilder sessionReceiverBuilder; private final ServiceBusClientBuilder.ServiceBusReceiverClientBuilder receiverBuilder; private final Consumer<ServiceBusProcessorMessageContext> processMessage; private final Consumer<Throwable> processError; private final ServiceBusProcessorClientOptions processorOptions; private final AtomicReference<Subscription> receiverSubscription = new AtomicReference<>(); private final AtomicReference<ServiceBusReceiverAsyncClient> asyncClient = new AtomicReference<>(); private final AtomicBoolean isRunning = new AtomicBoolean(); private ScheduledExecutorService scheduledExecutor; /** * Constructor to create a sessions-enabled processor. * * @param sessionReceiverBuilder The session processor builder to create new instances of async clients. * @param processMessage The message processing callback. * @param processError The error handler. * @param processorOptions Options to configure this instance of the processor. */ ServiceBusProcessorClient(ServiceBusClientBuilder.ServiceBusSessionReceiverClientBuilder sessionReceiverBuilder, Consumer<ServiceBusProcessorMessageContext> processMessage, Consumer<Throwable> processError, ServiceBusProcessorClientOptions processorOptions) { this.sessionReceiverBuilder = Objects.requireNonNull(sessionReceiverBuilder, "'sessionReceiverBuilder' cannot be null"); this.processMessage = Objects.requireNonNull(processMessage, "'processMessage' cannot be null"); this.processError = Objects.requireNonNull(processError, "'processError' cannot be null"); this.processorOptions = Objects.requireNonNull(processorOptions, "'processorOptions' cannot be null"); this.asyncClient.set(sessionReceiverBuilder.buildAsyncClientForProcessor()); this.receiverBuilder = null; } /** * Constructor to create a processor. * * @param receiverBuilder The processor builder to create new instances of async clients. * @param processMessage The message processing callback. * @param processError The error handler. * @param processorOptions Options to configure this instance of the processor. */ ServiceBusProcessorClient(ServiceBusClientBuilder.ServiceBusReceiverClientBuilder receiverBuilder, Consumer<ServiceBusProcessorMessageContext> processMessage, Consumer<Throwable> processError, ServiceBusProcessorClientOptions processorOptions) { this.receiverBuilder = Objects.requireNonNull(receiverBuilder, "'receiverBuilder' cannot be null"); this.processMessage = Objects.requireNonNull(processMessage, "'processMessage' cannot be null"); this.processError = Objects.requireNonNull(processError, "'processError' cannot be null"); this.processorOptions = Objects.requireNonNull(processorOptions, "'processorOptions' cannot be null"); this.asyncClient.set(receiverBuilder.buildAsyncClient()); this.sessionReceiverBuilder = null; } /** * Starts the processor in the background. When this method is called, the processor will initiate a message * receiver that will invoke the message handler when new messages are available. This method is idempotent i.e * calling {@link * after calling {@link * sessions. Calling {@link * a new set of sessions will be processed. */ public synchronized void start() { if (isRunning.getAndSet(true)) { logger.info("Processor is already running"); return; } receiveMessages(); this.scheduledExecutor = Executors.newSingleThreadScheduledExecutor(); scheduledExecutor.scheduleWithFixedDelay(() -> { if (this.asyncClient.get().isConnectionClosed()) { restartMessageReceiver(); } }, SCHEDULER_INTERVAL_IN_SECONDS, SCHEDULER_INTERVAL_IN_SECONDS, TimeUnit.SECONDS); } /** * Stops the message processing for this processor. The receiving links and sessions are kept active and this * processor can resume processing messages by calling {@link */ public synchronized void stop() { isRunning.set(false); } /** * Stops message processing and closes the processor. The receiving links and sessions are closed and calling * {@link */ @Override public synchronized void close() { isRunning.set(false); if (receiverSubscription.get() != null) { receiverSubscription.get().cancel(); } asyncClient.get().close(); scheduledExecutor.shutdown(); } /** * Returns {@code true} if the processor is running. If the processor is stopped or closed, this method returns * {@code false}. * * @return {@code true} if the processor is running. */ public synchronized boolean isRunning() { return isRunning.get(); } private synchronized void receiveMessages() { if (receiverSubscription.get() != null) { receiverSubscription.get().request(1); return; } ServiceBusReceiverAsyncClient receiverClient = asyncClient.get(); receiverClient.receiveMessages() .parallel(processorOptions.getMaxConcurrentCalls()) .runOn(Schedulers.boundedElastic()) .subscribe(new Subscriber<ServiceBusReceivedMessageContext>() { @Override public void onSubscribe(Subscription subscription) { receiverSubscription.set(subscription); receiverSubscription.get().request(1); } @Override @Override public void onError(Throwable throwable) { logger.info("Error receiving messages.", throwable); handleError(throwable); if (isRunning.get()) { restartMessageReceiver(); } } @Override public void onComplete() { logger.info("Completed receiving messages."); if (isRunning.get()) { restartMessageReceiver(); } } }); } private void abandonMessage(ServiceBusReceivedMessageContext serviceBusReceivedMessageContext, ServiceBusReceiverAsyncClient receiverClient) { try { receiverClient.abandon(serviceBusReceivedMessageContext.getMessage()).block(); } catch (Exception exception) { logger.verbose("Failed to abandon message", exception); } } private void handleError(Throwable throwable) { try { processError.accept(throwable); } catch (Exception ex) { logger.verbose("Error from error handler. Ignoring error.", ex); } } private void restartMessageReceiver() { receiverSubscription.set(null); ServiceBusReceiverAsyncClient receiverClient = asyncClient.get(); receiverClient.close(); ServiceBusReceiverAsyncClient newReceiverClient = this.receiverBuilder == null ? this.sessionReceiverBuilder.buildAsyncClientForProcessor() : this.receiverBuilder.buildAsyncClient(); asyncClient.set(newReceiverClient); receiveMessages(); } }
formatting differences between this and the previous call on line 189 in terms of line breaks and tabbing.
public void patchComponentSucceedsIfETagMatches(HttpClient httpClient, DigitalTwinsServiceVersion serviceVersion) throws JsonProcessingException { DigitalTwinsAsyncClient asyncClient = getAsyncClient(httpClient, serviceVersion); String wifiComponentName = "wifiAccessPoint"; String roomWithWifiTwinId = UniqueIdHelper.getUniqueDigitalTwinId(TestAssetDefaults.ROOM_WITH_WIFI_TWIN_ID_PREFIX, asyncClient, randomIntegerStringGenerator); String roomWithWifiModelId = UniqueIdHelper.getUniqueModelId(TestAssetDefaults.ROOM_WITH_WIFI_MODEL_ID_PREFIX, asyncClient, randomIntegerStringGenerator); String wifiModelId = UniqueIdHelper.getUniqueModelId(TestAssetDefaults.WIFI_MODEL_ID_PREFIX, asyncClient, randomIntegerStringGenerator); String modelWifi = TestAssetsHelper.getWifiModelPayload(wifiModelId); String modelRoomWithWifi = TestAssetsHelper.getRoomWithWifiModelPayload(roomWithWifiModelId, wifiModelId, wifiComponentName); String roomWithWifiTwin = TestAssetsHelper.getRoomWithWifiTwinPayload(roomWithWifiModelId, wifiComponentName); List<String> modelsList = new ArrayList<>(Arrays.asList(modelWifi, modelRoomWithWifi)); try { StepVerifier .create(asyncClient.createModels(modelsList)) .assertNext(createResponseList -> logger.info("Created models successfully")) .verifyComplete(); StepVerifier.create(asyncClient.createOrReplaceDigitalTwinWithResponse( roomWithWifiTwinId, deserializeJsonString(roomWithWifiTwin, BasicDigitalTwin.class), BasicDigitalTwin.class, null)) .assertNext(createdTwinResponse -> { logger.info("Updated the component successfully"); }) .verifyComplete(); AtomicReference<String> upToDateETag = new AtomicReference<>(); StepVerifier.create(asyncClient.updateComponentWithResponse(roomWithWifiTwinId, wifiComponentName, TestAssetsHelper.getWifiComponentUpdatePayload(), null)) .assertNext(updateResponse -> { upToDateETag.set(updateResponse.getDeserializedHeaders().getETag()); logger.info("Updated the component successfully"); }) .verifyComplete(); StepVerifier.create( asyncClient.updateComponentWithResponse( roomWithWifiTwinId, wifiComponentName, TestAssetsHelper.getWifiComponentSecondUpdatePayload(), new UpdateComponentOptions().setIfMatch(upToDateETag.get()))) .assertNext(response -> { /* don't care as long as it is a success status code */ }) .verifyComplete(); } finally { try { if (roomWithWifiTwinId != null) { asyncClient.deleteDigitalTwin(roomWithWifiTwinId).block(); } if (roomWithWifiModelId != null) { asyncClient.deleteModel(roomWithWifiModelId).block(); } if (wifiModelId != null) { asyncClient.deleteModel(wifiModelId).block(); } } catch (Exception ex) { fail("Test cleanup failed", ex); } } }
StepVerifier.create(asyncClient.createOrReplaceDigitalTwinWithResponse(
public void patchComponentSucceedsIfETagMatches(HttpClient httpClient, DigitalTwinsServiceVersion serviceVersion) throws JsonProcessingException { DigitalTwinsAsyncClient asyncClient = getAsyncClient(httpClient, serviceVersion); String wifiComponentName = "wifiAccessPoint"; String roomWithWifiTwinId = UniqueIdHelper.getUniqueDigitalTwinId(TestAssetDefaults.ROOM_WITH_WIFI_TWIN_ID_PREFIX, asyncClient, randomIntegerStringGenerator); String roomWithWifiModelId = UniqueIdHelper.getUniqueModelId(TestAssetDefaults.ROOM_WITH_WIFI_MODEL_ID_PREFIX, asyncClient, randomIntegerStringGenerator); String wifiModelId = UniqueIdHelper.getUniqueModelId(TestAssetDefaults.WIFI_MODEL_ID_PREFIX, asyncClient, randomIntegerStringGenerator); String modelWifi = TestAssetsHelper.getWifiModelPayload(wifiModelId); String modelRoomWithWifi = TestAssetsHelper.getRoomWithWifiModelPayload(roomWithWifiModelId, wifiModelId, wifiComponentName); String roomWithWifiTwin = TestAssetsHelper.getRoomWithWifiTwinPayload(roomWithWifiModelId, wifiComponentName); List<String> modelsList = new ArrayList<>(Arrays.asList(modelWifi, modelRoomWithWifi)); try { StepVerifier .create(asyncClient.createModels(modelsList)) .assertNext(createResponseList -> logger.info("Created models successfully")) .verifyComplete(); StepVerifier .create(asyncClient.createOrReplaceDigitalTwinWithResponse( roomWithWifiTwinId, deserializeJsonString(roomWithWifiTwin, BasicDigitalTwin.class), BasicDigitalTwin.class, null)) .assertNext(createdTwinResponse -> { logger.info("Updated the component successfully"); }) .verifyComplete(); AtomicReference<String> upToDateETag = new AtomicReference<>(); StepVerifier .create( asyncClient.updateComponentWithResponse( roomWithWifiTwinId, wifiComponentName, TestAssetsHelper.getWifiComponentUpdatePayload(), null)) .assertNext(updateResponse -> { upToDateETag.set(updateResponse.getDeserializedHeaders().getETag()); logger.info("Updated the component successfully"); }) .verifyComplete(); StepVerifier.create( asyncClient.updateComponentWithResponse( roomWithWifiTwinId, wifiComponentName, TestAssetsHelper.getWifiComponentSecondUpdatePayload(), new UpdateComponentOptions().setIfMatch(upToDateETag.get()))) .assertNext(response -> { /* don't care as long as it is a success status code */ }) .verifyComplete(); } finally { try { if (roomWithWifiTwinId != null) { asyncClient.deleteDigitalTwin(roomWithWifiTwinId).block(); } if (roomWithWifiModelId != null) { asyncClient.deleteModel(roomWithWifiModelId).block(); } if (wifiModelId != null) { asyncClient.deleteModel(wifiModelId).block(); } } catch (Exception ex) { fail("Test cleanup failed", ex); } } }
class ComponentsAsyncTests extends ComponentsTestBase { private final ClientLogger logger = new ClientLogger(ComponentsAsyncTests.class); @ParameterizedTest(name = DISPLAY_NAME_WITH_ARGUMENTS) @MethodSource("com.azure.digitaltwins.core.TestHelper @Override public void componentLifecycleTest(HttpClient httpClient, DigitalTwinsServiceVersion serviceVersion) throws JsonProcessingException { DigitalTwinsAsyncClient asyncClient = getAsyncClient(httpClient, serviceVersion); String wifiComponentName = "wifiAccessPoint"; String roomWithWifiTwinId = UniqueIdHelper.getUniqueDigitalTwinId(TestAssetDefaults.ROOM_WITH_WIFI_TWIN_ID_PREFIX, asyncClient, randomIntegerStringGenerator); String roomWithWifiModelId = UniqueIdHelper.getUniqueModelId(TestAssetDefaults.ROOM_WITH_WIFI_MODEL_ID_PREFIX, asyncClient, randomIntegerStringGenerator); String wifiModelId = UniqueIdHelper.getUniqueModelId(TestAssetDefaults.WIFI_MODEL_ID_PREFIX, asyncClient, randomIntegerStringGenerator); String modelWifi = TestAssetsHelper.getWifiModelPayload(wifiModelId); String modelRoomWithWifi = TestAssetsHelper.getRoomWithWifiModelPayload(roomWithWifiModelId, wifiModelId, wifiComponentName); String roomWithWifiTwin = TestAssetsHelper.getRoomWithWifiTwinPayload(roomWithWifiModelId, wifiComponentName); List<String> modelsList = new ArrayList<>(Arrays.asList(modelWifi, modelRoomWithWifi)); try { StepVerifier .create(asyncClient.createModels(modelsList)) .assertNext(createResponseList -> logger.info("Created models successfully")) .verifyComplete(); StepVerifier.create(asyncClient.createOrReplaceDigitalTwin(roomWithWifiTwinId, deserializeJsonString(roomWithWifiTwin, BasicDigitalTwin.class), BasicDigitalTwin.class)) .assertNext(createdTwin -> { assertEquals(createdTwin.getId(), roomWithWifiTwinId); logger.info("Created {} twin successfully", createdTwin.getId()); }) .verifyComplete(); StepVerifier.create(asyncClient.getComponentWithResponse(roomWithWifiTwinId, wifiComponentName, String.class, null)) .assertNext(createResponse -> { assertEquals(createResponse.getStatusCode(), HttpURLConnection.HTTP_OK); logger.info("Got component successfully"); }) .verifyComplete(); StepVerifier.create(asyncClient.updateComponentWithResponse(roomWithWifiTwinId, wifiComponentName, TestAssetsHelper.getWifiComponentUpdatePayload(), null)) .assertNext(updateResponse -> { assertEquals(updateResponse.getStatusCode(), HttpURLConnection.HTTP_NO_CONTENT); logger.info("Updated the component successfully"); }) .verifyComplete(); } finally { try { if (roomWithWifiTwinId != null) { asyncClient.deleteDigitalTwin(roomWithWifiTwinId).block(); } if (roomWithWifiModelId != null) { asyncClient.deleteModel(roomWithWifiModelId).block(); } if (wifiModelId != null) { asyncClient.deleteModel(wifiModelId).block(); } } catch (Exception ex) { fail("Test cleanup failed", ex); } } } @ParameterizedTest(name = DISPLAY_NAME_WITH_ARGUMENTS) @MethodSource("com.azure.digitaltwins.core.TestHelper @Override public void patchComponentFailsIfETagDoesNotMatch(HttpClient httpClient, DigitalTwinsServiceVersion serviceVersion) throws JsonProcessingException { DigitalTwinsAsyncClient asyncClient = getAsyncClient(httpClient, serviceVersion); String wifiComponentName = "wifiAccessPoint"; String roomWithWifiTwinId = UniqueIdHelper.getUniqueDigitalTwinId(TestAssetDefaults.ROOM_WITH_WIFI_TWIN_ID_PREFIX, asyncClient, randomIntegerStringGenerator); String roomWithWifiModelId = UniqueIdHelper.getUniqueModelId(TestAssetDefaults.ROOM_WITH_WIFI_MODEL_ID_PREFIX, asyncClient, randomIntegerStringGenerator); String wifiModelId = UniqueIdHelper.getUniqueModelId(TestAssetDefaults.WIFI_MODEL_ID_PREFIX, asyncClient, randomIntegerStringGenerator); String modelWifi = TestAssetsHelper.getWifiModelPayload(wifiModelId); String modelRoomWithWifi = TestAssetsHelper.getRoomWithWifiModelPayload(roomWithWifiModelId, wifiModelId, wifiComponentName); String roomWithWifiTwin = TestAssetsHelper.getRoomWithWifiTwinPayload(roomWithWifiModelId, wifiComponentName); List<String> modelsList = new ArrayList<>(Arrays.asList(modelWifi, modelRoomWithWifi)); try { StepVerifier .create(asyncClient.createModels(modelsList)) .assertNext(createResponseList -> logger.info("Created models successfully")) .verifyComplete(); AtomicReference<String> etagBeforeUpdate = new AtomicReference<>(); StepVerifier.create(asyncClient.createOrReplaceDigitalTwinWithResponse( roomWithWifiTwinId, deserializeJsonString(roomWithWifiTwin, BasicDigitalTwin.class), BasicDigitalTwin.class, null)) .assertNext(createdTwinResponse -> { etagBeforeUpdate.set(createdTwinResponse.getDeserializedHeaders().getETag()); }) .verifyComplete(); StepVerifier.create(asyncClient.updateComponentWithResponse(roomWithWifiTwinId, wifiComponentName, TestAssetsHelper.getWifiComponentUpdatePayload(), null)) .assertNext(updateResponse -> { logger.info("Updated the component successfully"); }) .verifyComplete(); StepVerifier.create( asyncClient.updateComponentWithResponse( roomWithWifiTwinId, wifiComponentName, TestAssetsHelper.getWifiComponentSecondUpdatePayload(), new UpdateComponentOptions().setIfMatch(etagBeforeUpdate.get()))) .verifyErrorSatisfies(ex -> assertRestException(ex, HttpURLConnection.HTTP_PRECON_FAILED)); } finally { try { if (roomWithWifiTwinId != null) { asyncClient.deleteDigitalTwin(roomWithWifiTwinId).block(); } if (roomWithWifiModelId != null) { asyncClient.deleteModel(roomWithWifiModelId).block(); } if (wifiModelId != null) { asyncClient.deleteModel(wifiModelId).block(); } } catch (Exception ex) { fail("Test cleanup failed", ex); } } } @ParameterizedTest(name = DISPLAY_NAME_WITH_ARGUMENTS) @MethodSource("com.azure.digitaltwins.core.TestHelper @Override }
class ComponentsAsyncTests extends ComponentsTestBase { private final ClientLogger logger = new ClientLogger(ComponentsAsyncTests.class); @ParameterizedTest(name = DISPLAY_NAME_WITH_ARGUMENTS) @MethodSource("com.azure.digitaltwins.core.TestHelper @Override public void componentLifecycleTest(HttpClient httpClient, DigitalTwinsServiceVersion serviceVersion) throws JsonProcessingException { DigitalTwinsAsyncClient asyncClient = getAsyncClient(httpClient, serviceVersion); String wifiComponentName = "wifiAccessPoint"; String roomWithWifiTwinId = UniqueIdHelper.getUniqueDigitalTwinId(TestAssetDefaults.ROOM_WITH_WIFI_TWIN_ID_PREFIX, asyncClient, randomIntegerStringGenerator); String roomWithWifiModelId = UniqueIdHelper.getUniqueModelId(TestAssetDefaults.ROOM_WITH_WIFI_MODEL_ID_PREFIX, asyncClient, randomIntegerStringGenerator); String wifiModelId = UniqueIdHelper.getUniqueModelId(TestAssetDefaults.WIFI_MODEL_ID_PREFIX, asyncClient, randomIntegerStringGenerator); String modelWifi = TestAssetsHelper.getWifiModelPayload(wifiModelId); String modelRoomWithWifi = TestAssetsHelper.getRoomWithWifiModelPayload(roomWithWifiModelId, wifiModelId, wifiComponentName); String roomWithWifiTwin = TestAssetsHelper.getRoomWithWifiTwinPayload(roomWithWifiModelId, wifiComponentName); List<String> modelsList = new ArrayList<>(Arrays.asList(modelWifi, modelRoomWithWifi)); try { StepVerifier .create(asyncClient.createModels(modelsList)) .assertNext(createResponseList -> logger.info("Created models successfully")) .verifyComplete(); StepVerifier.create(asyncClient.createOrReplaceDigitalTwin(roomWithWifiTwinId, deserializeJsonString(roomWithWifiTwin, BasicDigitalTwin.class), BasicDigitalTwin.class)) .assertNext(createdTwin -> { assertEquals(createdTwin.getId(), roomWithWifiTwinId); logger.info("Created {} twin successfully", createdTwin.getId()); }) .verifyComplete(); StepVerifier.create(asyncClient.getComponentWithResponse(roomWithWifiTwinId, wifiComponentName, String.class, null)) .assertNext(createResponse -> { assertEquals(createResponse.getStatusCode(), HttpURLConnection.HTTP_OK); logger.info("Got component successfully"); }) .verifyComplete(); StepVerifier.create(asyncClient.updateComponentWithResponse(roomWithWifiTwinId, wifiComponentName, TestAssetsHelper.getWifiComponentUpdatePayload(), null)) .assertNext(updateResponse -> { assertEquals(updateResponse.getStatusCode(), HttpURLConnection.HTTP_NO_CONTENT); logger.info("Updated the component successfully"); }) .verifyComplete(); } finally { try { if (roomWithWifiTwinId != null) { asyncClient.deleteDigitalTwin(roomWithWifiTwinId).block(); } if (roomWithWifiModelId != null) { asyncClient.deleteModel(roomWithWifiModelId).block(); } if (wifiModelId != null) { asyncClient.deleteModel(wifiModelId).block(); } } catch (Exception ex) { fail("Test cleanup failed", ex); } } } @ParameterizedTest(name = DISPLAY_NAME_WITH_ARGUMENTS) @MethodSource("com.azure.digitaltwins.core.TestHelper @Override public void patchComponentFailsIfETagDoesNotMatch(HttpClient httpClient, DigitalTwinsServiceVersion serviceVersion) throws JsonProcessingException { DigitalTwinsAsyncClient asyncClient = getAsyncClient(httpClient, serviceVersion); String wifiComponentName = "wifiAccessPoint"; String roomWithWifiTwinId = UniqueIdHelper.getUniqueDigitalTwinId(TestAssetDefaults.ROOM_WITH_WIFI_TWIN_ID_PREFIX, asyncClient, randomIntegerStringGenerator); String roomWithWifiModelId = UniqueIdHelper.getUniqueModelId(TestAssetDefaults.ROOM_WITH_WIFI_MODEL_ID_PREFIX, asyncClient, randomIntegerStringGenerator); String wifiModelId = UniqueIdHelper.getUniqueModelId(TestAssetDefaults.WIFI_MODEL_ID_PREFIX, asyncClient, randomIntegerStringGenerator); String modelWifi = TestAssetsHelper.getWifiModelPayload(wifiModelId); String modelRoomWithWifi = TestAssetsHelper.getRoomWithWifiModelPayload(roomWithWifiModelId, wifiModelId, wifiComponentName); String roomWithWifiTwin = TestAssetsHelper.getRoomWithWifiTwinPayload(roomWithWifiModelId, wifiComponentName); List<String> modelsList = new ArrayList<>(Arrays.asList(modelWifi, modelRoomWithWifi)); try { StepVerifier .create(asyncClient.createModels(modelsList)) .assertNext(createResponseList -> logger.info("Created models successfully")) .verifyComplete(); AtomicReference<String> etagBeforeUpdate = new AtomicReference<>(); StepVerifier.create(asyncClient.createOrReplaceDigitalTwinWithResponse( roomWithWifiTwinId, deserializeJsonString(roomWithWifiTwin, BasicDigitalTwin.class), BasicDigitalTwin.class, null)) .assertNext(createdTwinResponse -> { etagBeforeUpdate.set(createdTwinResponse.getDeserializedHeaders().getETag()); }) .verifyComplete(); StepVerifier.create(asyncClient.updateComponentWithResponse(roomWithWifiTwinId, wifiComponentName, TestAssetsHelper.getWifiComponentUpdatePayload(), null)) .assertNext(updateResponse -> { logger.info("Updated the component successfully"); }) .verifyComplete(); StepVerifier.create( asyncClient.updateComponentWithResponse( roomWithWifiTwinId, wifiComponentName, TestAssetsHelper.getWifiComponentSecondUpdatePayload(), new UpdateComponentOptions().setIfMatch(etagBeforeUpdate.get()))) .verifyErrorSatisfies(ex -> assertRestException(ex, HttpURLConnection.HTTP_PRECON_FAILED)); } finally { try { if (roomWithWifiTwinId != null) { asyncClient.deleteDigitalTwin(roomWithWifiTwinId).block(); } if (roomWithWifiModelId != null) { asyncClient.deleteModel(roomWithWifiModelId).block(); } if (wifiModelId != null) { asyncClient.deleteModel(wifiModelId).block(); } } catch (Exception ex) { fail("Test cleanup failed", ex); } } } @ParameterizedTest(name = DISPLAY_NAME_WITH_ARGUMENTS) @MethodSource("com.azure.digitaltwins.core.TestHelper @Override }
in Java, shouldn't brace formatting be like `} finally {`, `try {`, and `} catch {`?
public void patchComponentSucceedsIfETagMatches(HttpClient httpClient, DigitalTwinsServiceVersion serviceVersion) throws JsonProcessingException { DigitalTwinsAsyncClient asyncClient = getAsyncClient(httpClient, serviceVersion); String wifiComponentName = "wifiAccessPoint"; String roomWithWifiTwinId = UniqueIdHelper.getUniqueDigitalTwinId(TestAssetDefaults.ROOM_WITH_WIFI_TWIN_ID_PREFIX, asyncClient, randomIntegerStringGenerator); String roomWithWifiModelId = UniqueIdHelper.getUniqueModelId(TestAssetDefaults.ROOM_WITH_WIFI_MODEL_ID_PREFIX, asyncClient, randomIntegerStringGenerator); String wifiModelId = UniqueIdHelper.getUniqueModelId(TestAssetDefaults.WIFI_MODEL_ID_PREFIX, asyncClient, randomIntegerStringGenerator); String modelWifi = TestAssetsHelper.getWifiModelPayload(wifiModelId); String modelRoomWithWifi = TestAssetsHelper.getRoomWithWifiModelPayload(roomWithWifiModelId, wifiModelId, wifiComponentName); String roomWithWifiTwin = TestAssetsHelper.getRoomWithWifiTwinPayload(roomWithWifiModelId, wifiComponentName); List<String> modelsList = new ArrayList<>(Arrays.asList(modelWifi, modelRoomWithWifi)); try { StepVerifier .create(asyncClient.createModels(modelsList)) .assertNext(createResponseList -> logger.info("Created models successfully")) .verifyComplete(); StepVerifier.create(asyncClient.createOrReplaceDigitalTwinWithResponse( roomWithWifiTwinId, deserializeJsonString(roomWithWifiTwin, BasicDigitalTwin.class), BasicDigitalTwin.class, null)) .assertNext(createdTwinResponse -> { logger.info("Updated the component successfully"); }) .verifyComplete(); AtomicReference<String> upToDateETag = new AtomicReference<>(); StepVerifier.create(asyncClient.updateComponentWithResponse(roomWithWifiTwinId, wifiComponentName, TestAssetsHelper.getWifiComponentUpdatePayload(), null)) .assertNext(updateResponse -> { upToDateETag.set(updateResponse.getDeserializedHeaders().getETag()); logger.info("Updated the component successfully"); }) .verifyComplete(); StepVerifier.create( asyncClient.updateComponentWithResponse( roomWithWifiTwinId, wifiComponentName, TestAssetsHelper.getWifiComponentSecondUpdatePayload(), new UpdateComponentOptions().setIfMatch(upToDateETag.get()))) .assertNext(response -> { /* don't care as long as it is a success status code */ }) .verifyComplete(); } finally { try { if (roomWithWifiTwinId != null) { asyncClient.deleteDigitalTwin(roomWithWifiTwinId).block(); } if (roomWithWifiModelId != null) { asyncClient.deleteModel(roomWithWifiModelId).block(); } if (wifiModelId != null) { asyncClient.deleteModel(wifiModelId).block(); } } catch (Exception ex) { fail("Test cleanup failed", ex); } } }
}
public void patchComponentSucceedsIfETagMatches(HttpClient httpClient, DigitalTwinsServiceVersion serviceVersion) throws JsonProcessingException { DigitalTwinsAsyncClient asyncClient = getAsyncClient(httpClient, serviceVersion); String wifiComponentName = "wifiAccessPoint"; String roomWithWifiTwinId = UniqueIdHelper.getUniqueDigitalTwinId(TestAssetDefaults.ROOM_WITH_WIFI_TWIN_ID_PREFIX, asyncClient, randomIntegerStringGenerator); String roomWithWifiModelId = UniqueIdHelper.getUniqueModelId(TestAssetDefaults.ROOM_WITH_WIFI_MODEL_ID_PREFIX, asyncClient, randomIntegerStringGenerator); String wifiModelId = UniqueIdHelper.getUniqueModelId(TestAssetDefaults.WIFI_MODEL_ID_PREFIX, asyncClient, randomIntegerStringGenerator); String modelWifi = TestAssetsHelper.getWifiModelPayload(wifiModelId); String modelRoomWithWifi = TestAssetsHelper.getRoomWithWifiModelPayload(roomWithWifiModelId, wifiModelId, wifiComponentName); String roomWithWifiTwin = TestAssetsHelper.getRoomWithWifiTwinPayload(roomWithWifiModelId, wifiComponentName); List<String> modelsList = new ArrayList<>(Arrays.asList(modelWifi, modelRoomWithWifi)); try { StepVerifier .create(asyncClient.createModels(modelsList)) .assertNext(createResponseList -> logger.info("Created models successfully")) .verifyComplete(); StepVerifier .create(asyncClient.createOrReplaceDigitalTwinWithResponse( roomWithWifiTwinId, deserializeJsonString(roomWithWifiTwin, BasicDigitalTwin.class), BasicDigitalTwin.class, null)) .assertNext(createdTwinResponse -> { logger.info("Updated the component successfully"); }) .verifyComplete(); AtomicReference<String> upToDateETag = new AtomicReference<>(); StepVerifier .create( asyncClient.updateComponentWithResponse( roomWithWifiTwinId, wifiComponentName, TestAssetsHelper.getWifiComponentUpdatePayload(), null)) .assertNext(updateResponse -> { upToDateETag.set(updateResponse.getDeserializedHeaders().getETag()); logger.info("Updated the component successfully"); }) .verifyComplete(); StepVerifier.create( asyncClient.updateComponentWithResponse( roomWithWifiTwinId, wifiComponentName, TestAssetsHelper.getWifiComponentSecondUpdatePayload(), new UpdateComponentOptions().setIfMatch(upToDateETag.get()))) .assertNext(response -> { /* don't care as long as it is a success status code */ }) .verifyComplete(); } finally { try { if (roomWithWifiTwinId != null) { asyncClient.deleteDigitalTwin(roomWithWifiTwinId).block(); } if (roomWithWifiModelId != null) { asyncClient.deleteModel(roomWithWifiModelId).block(); } if (wifiModelId != null) { asyncClient.deleteModel(wifiModelId).block(); } } catch (Exception ex) { fail("Test cleanup failed", ex); } } }
class ComponentsAsyncTests extends ComponentsTestBase { private final ClientLogger logger = new ClientLogger(ComponentsAsyncTests.class); @ParameterizedTest(name = DISPLAY_NAME_WITH_ARGUMENTS) @MethodSource("com.azure.digitaltwins.core.TestHelper @Override public void componentLifecycleTest(HttpClient httpClient, DigitalTwinsServiceVersion serviceVersion) throws JsonProcessingException { DigitalTwinsAsyncClient asyncClient = getAsyncClient(httpClient, serviceVersion); String wifiComponentName = "wifiAccessPoint"; String roomWithWifiTwinId = UniqueIdHelper.getUniqueDigitalTwinId(TestAssetDefaults.ROOM_WITH_WIFI_TWIN_ID_PREFIX, asyncClient, randomIntegerStringGenerator); String roomWithWifiModelId = UniqueIdHelper.getUniqueModelId(TestAssetDefaults.ROOM_WITH_WIFI_MODEL_ID_PREFIX, asyncClient, randomIntegerStringGenerator); String wifiModelId = UniqueIdHelper.getUniqueModelId(TestAssetDefaults.WIFI_MODEL_ID_PREFIX, asyncClient, randomIntegerStringGenerator); String modelWifi = TestAssetsHelper.getWifiModelPayload(wifiModelId); String modelRoomWithWifi = TestAssetsHelper.getRoomWithWifiModelPayload(roomWithWifiModelId, wifiModelId, wifiComponentName); String roomWithWifiTwin = TestAssetsHelper.getRoomWithWifiTwinPayload(roomWithWifiModelId, wifiComponentName); List<String> modelsList = new ArrayList<>(Arrays.asList(modelWifi, modelRoomWithWifi)); try { StepVerifier .create(asyncClient.createModels(modelsList)) .assertNext(createResponseList -> logger.info("Created models successfully")) .verifyComplete(); StepVerifier.create(asyncClient.createOrReplaceDigitalTwin(roomWithWifiTwinId, deserializeJsonString(roomWithWifiTwin, BasicDigitalTwin.class), BasicDigitalTwin.class)) .assertNext(createdTwin -> { assertEquals(createdTwin.getId(), roomWithWifiTwinId); logger.info("Created {} twin successfully", createdTwin.getId()); }) .verifyComplete(); StepVerifier.create(asyncClient.getComponentWithResponse(roomWithWifiTwinId, wifiComponentName, String.class, null)) .assertNext(createResponse -> { assertEquals(createResponse.getStatusCode(), HttpURLConnection.HTTP_OK); logger.info("Got component successfully"); }) .verifyComplete(); StepVerifier.create(asyncClient.updateComponentWithResponse(roomWithWifiTwinId, wifiComponentName, TestAssetsHelper.getWifiComponentUpdatePayload(), null)) .assertNext(updateResponse -> { assertEquals(updateResponse.getStatusCode(), HttpURLConnection.HTTP_NO_CONTENT); logger.info("Updated the component successfully"); }) .verifyComplete(); } finally { try { if (roomWithWifiTwinId != null) { asyncClient.deleteDigitalTwin(roomWithWifiTwinId).block(); } if (roomWithWifiModelId != null) { asyncClient.deleteModel(roomWithWifiModelId).block(); } if (wifiModelId != null) { asyncClient.deleteModel(wifiModelId).block(); } } catch (Exception ex) { fail("Test cleanup failed", ex); } } } @ParameterizedTest(name = DISPLAY_NAME_WITH_ARGUMENTS) @MethodSource("com.azure.digitaltwins.core.TestHelper @Override public void patchComponentFailsIfETagDoesNotMatch(HttpClient httpClient, DigitalTwinsServiceVersion serviceVersion) throws JsonProcessingException { DigitalTwinsAsyncClient asyncClient = getAsyncClient(httpClient, serviceVersion); String wifiComponentName = "wifiAccessPoint"; String roomWithWifiTwinId = UniqueIdHelper.getUniqueDigitalTwinId(TestAssetDefaults.ROOM_WITH_WIFI_TWIN_ID_PREFIX, asyncClient, randomIntegerStringGenerator); String roomWithWifiModelId = UniqueIdHelper.getUniqueModelId(TestAssetDefaults.ROOM_WITH_WIFI_MODEL_ID_PREFIX, asyncClient, randomIntegerStringGenerator); String wifiModelId = UniqueIdHelper.getUniqueModelId(TestAssetDefaults.WIFI_MODEL_ID_PREFIX, asyncClient, randomIntegerStringGenerator); String modelWifi = TestAssetsHelper.getWifiModelPayload(wifiModelId); String modelRoomWithWifi = TestAssetsHelper.getRoomWithWifiModelPayload(roomWithWifiModelId, wifiModelId, wifiComponentName); String roomWithWifiTwin = TestAssetsHelper.getRoomWithWifiTwinPayload(roomWithWifiModelId, wifiComponentName); List<String> modelsList = new ArrayList<>(Arrays.asList(modelWifi, modelRoomWithWifi)); try { StepVerifier .create(asyncClient.createModels(modelsList)) .assertNext(createResponseList -> logger.info("Created models successfully")) .verifyComplete(); AtomicReference<String> etagBeforeUpdate = new AtomicReference<>(); StepVerifier.create(asyncClient.createOrReplaceDigitalTwinWithResponse( roomWithWifiTwinId, deserializeJsonString(roomWithWifiTwin, BasicDigitalTwin.class), BasicDigitalTwin.class, null)) .assertNext(createdTwinResponse -> { etagBeforeUpdate.set(createdTwinResponse.getDeserializedHeaders().getETag()); }) .verifyComplete(); StepVerifier.create(asyncClient.updateComponentWithResponse(roomWithWifiTwinId, wifiComponentName, TestAssetsHelper.getWifiComponentUpdatePayload(), null)) .assertNext(updateResponse -> { logger.info("Updated the component successfully"); }) .verifyComplete(); StepVerifier.create( asyncClient.updateComponentWithResponse( roomWithWifiTwinId, wifiComponentName, TestAssetsHelper.getWifiComponentSecondUpdatePayload(), new UpdateComponentOptions().setIfMatch(etagBeforeUpdate.get()))) .verifyErrorSatisfies(ex -> assertRestException(ex, HttpURLConnection.HTTP_PRECON_FAILED)); } finally { try { if (roomWithWifiTwinId != null) { asyncClient.deleteDigitalTwin(roomWithWifiTwinId).block(); } if (roomWithWifiModelId != null) { asyncClient.deleteModel(roomWithWifiModelId).block(); } if (wifiModelId != null) { asyncClient.deleteModel(wifiModelId).block(); } } catch (Exception ex) { fail("Test cleanup failed", ex); } } } @ParameterizedTest(name = DISPLAY_NAME_WITH_ARGUMENTS) @MethodSource("com.azure.digitaltwins.core.TestHelper @Override }
class ComponentsAsyncTests extends ComponentsTestBase { private final ClientLogger logger = new ClientLogger(ComponentsAsyncTests.class); @ParameterizedTest(name = DISPLAY_NAME_WITH_ARGUMENTS) @MethodSource("com.azure.digitaltwins.core.TestHelper @Override public void componentLifecycleTest(HttpClient httpClient, DigitalTwinsServiceVersion serviceVersion) throws JsonProcessingException { DigitalTwinsAsyncClient asyncClient = getAsyncClient(httpClient, serviceVersion); String wifiComponentName = "wifiAccessPoint"; String roomWithWifiTwinId = UniqueIdHelper.getUniqueDigitalTwinId(TestAssetDefaults.ROOM_WITH_WIFI_TWIN_ID_PREFIX, asyncClient, randomIntegerStringGenerator); String roomWithWifiModelId = UniqueIdHelper.getUniqueModelId(TestAssetDefaults.ROOM_WITH_WIFI_MODEL_ID_PREFIX, asyncClient, randomIntegerStringGenerator); String wifiModelId = UniqueIdHelper.getUniqueModelId(TestAssetDefaults.WIFI_MODEL_ID_PREFIX, asyncClient, randomIntegerStringGenerator); String modelWifi = TestAssetsHelper.getWifiModelPayload(wifiModelId); String modelRoomWithWifi = TestAssetsHelper.getRoomWithWifiModelPayload(roomWithWifiModelId, wifiModelId, wifiComponentName); String roomWithWifiTwin = TestAssetsHelper.getRoomWithWifiTwinPayload(roomWithWifiModelId, wifiComponentName); List<String> modelsList = new ArrayList<>(Arrays.asList(modelWifi, modelRoomWithWifi)); try { StepVerifier .create(asyncClient.createModels(modelsList)) .assertNext(createResponseList -> logger.info("Created models successfully")) .verifyComplete(); StepVerifier.create(asyncClient.createOrReplaceDigitalTwin(roomWithWifiTwinId, deserializeJsonString(roomWithWifiTwin, BasicDigitalTwin.class), BasicDigitalTwin.class)) .assertNext(createdTwin -> { assertEquals(createdTwin.getId(), roomWithWifiTwinId); logger.info("Created {} twin successfully", createdTwin.getId()); }) .verifyComplete(); StepVerifier.create(asyncClient.getComponentWithResponse(roomWithWifiTwinId, wifiComponentName, String.class, null)) .assertNext(createResponse -> { assertEquals(createResponse.getStatusCode(), HttpURLConnection.HTTP_OK); logger.info("Got component successfully"); }) .verifyComplete(); StepVerifier.create(asyncClient.updateComponentWithResponse(roomWithWifiTwinId, wifiComponentName, TestAssetsHelper.getWifiComponentUpdatePayload(), null)) .assertNext(updateResponse -> { assertEquals(updateResponse.getStatusCode(), HttpURLConnection.HTTP_NO_CONTENT); logger.info("Updated the component successfully"); }) .verifyComplete(); } finally { try { if (roomWithWifiTwinId != null) { asyncClient.deleteDigitalTwin(roomWithWifiTwinId).block(); } if (roomWithWifiModelId != null) { asyncClient.deleteModel(roomWithWifiModelId).block(); } if (wifiModelId != null) { asyncClient.deleteModel(wifiModelId).block(); } } catch (Exception ex) { fail("Test cleanup failed", ex); } } } @ParameterizedTest(name = DISPLAY_NAME_WITH_ARGUMENTS) @MethodSource("com.azure.digitaltwins.core.TestHelper @Override public void patchComponentFailsIfETagDoesNotMatch(HttpClient httpClient, DigitalTwinsServiceVersion serviceVersion) throws JsonProcessingException { DigitalTwinsAsyncClient asyncClient = getAsyncClient(httpClient, serviceVersion); String wifiComponentName = "wifiAccessPoint"; String roomWithWifiTwinId = UniqueIdHelper.getUniqueDigitalTwinId(TestAssetDefaults.ROOM_WITH_WIFI_TWIN_ID_PREFIX, asyncClient, randomIntegerStringGenerator); String roomWithWifiModelId = UniqueIdHelper.getUniqueModelId(TestAssetDefaults.ROOM_WITH_WIFI_MODEL_ID_PREFIX, asyncClient, randomIntegerStringGenerator); String wifiModelId = UniqueIdHelper.getUniqueModelId(TestAssetDefaults.WIFI_MODEL_ID_PREFIX, asyncClient, randomIntegerStringGenerator); String modelWifi = TestAssetsHelper.getWifiModelPayload(wifiModelId); String modelRoomWithWifi = TestAssetsHelper.getRoomWithWifiModelPayload(roomWithWifiModelId, wifiModelId, wifiComponentName); String roomWithWifiTwin = TestAssetsHelper.getRoomWithWifiTwinPayload(roomWithWifiModelId, wifiComponentName); List<String> modelsList = new ArrayList<>(Arrays.asList(modelWifi, modelRoomWithWifi)); try { StepVerifier .create(asyncClient.createModels(modelsList)) .assertNext(createResponseList -> logger.info("Created models successfully")) .verifyComplete(); AtomicReference<String> etagBeforeUpdate = new AtomicReference<>(); StepVerifier.create(asyncClient.createOrReplaceDigitalTwinWithResponse( roomWithWifiTwinId, deserializeJsonString(roomWithWifiTwin, BasicDigitalTwin.class), BasicDigitalTwin.class, null)) .assertNext(createdTwinResponse -> { etagBeforeUpdate.set(createdTwinResponse.getDeserializedHeaders().getETag()); }) .verifyComplete(); StepVerifier.create(asyncClient.updateComponentWithResponse(roomWithWifiTwinId, wifiComponentName, TestAssetsHelper.getWifiComponentUpdatePayload(), null)) .assertNext(updateResponse -> { logger.info("Updated the component successfully"); }) .verifyComplete(); StepVerifier.create( asyncClient.updateComponentWithResponse( roomWithWifiTwinId, wifiComponentName, TestAssetsHelper.getWifiComponentSecondUpdatePayload(), new UpdateComponentOptions().setIfMatch(etagBeforeUpdate.get()))) .verifyErrorSatisfies(ex -> assertRestException(ex, HttpURLConnection.HTTP_PRECON_FAILED)); } finally { try { if (roomWithWifiTwinId != null) { asyncClient.deleteDigitalTwin(roomWithWifiTwinId).block(); } if (roomWithWifiModelId != null) { asyncClient.deleteModel(roomWithWifiModelId).block(); } if (wifiModelId != null) { asyncClient.deleteModel(wifiModelId).block(); } } catch (Exception ex) { fail("Test cleanup failed", ex); } } } @ParameterizedTest(name = DISPLAY_NAME_WITH_ARGUMENTS) @MethodSource("com.azure.digitaltwins.core.TestHelper @Override }
Why are we not wrapping AmqpException?
private Throwable mapError(Throwable throwable, ServiceBusErrorSource errorSource) { if (!(throwable instanceof AmqpException) && !(throwable instanceof ServiceBusReceiverException)) { return new ServiceBusReceiverException(throwable, errorSource); } return throwable; }
if (!(throwable instanceof AmqpException) && !(throwable instanceof ServiceBusReceiverException)) {
private Throwable mapError(Throwable throwable, ServiceBusErrorSource errorSource) { if (!(throwable instanceof ServiceBusReceiverException)) { return new ServiceBusReceiverException(throwable, errorSource); } return throwable; }
class ServiceBusReceiverAsyncClient implements AutoCloseable { private static final DeadLetterOptions DEFAULT_DEAD_LETTER_OPTIONS = new DeadLetterOptions(); private static final String TRANSACTION_LINK_NAME = "coordinator"; private final LockContainer<LockRenewalOperation> renewalContainer; private final AtomicBoolean isDisposed = new AtomicBoolean(); private final LockContainer<OffsetDateTime> managementNodeLocks; private final ClientLogger logger = new ClientLogger(ServiceBusReceiverAsyncClient.class); private final String fullyQualifiedNamespace; private final String entityPath; private final MessagingEntityType entityType; private final ReceiverOptions receiverOptions; private final ServiceBusConnectionProcessor connectionProcessor; private final TracerProvider tracerProvider; private final MessageSerializer messageSerializer; private final Runnable onClientClose; private final ServiceBusSessionManager sessionManager; private final Semaphore completionLock = new Semaphore(1); private final AtomicLong lastPeekedSequenceNumber = new AtomicLong(-1); private final AtomicReference<ServiceBusAsyncConsumer> consumer = new AtomicReference<>(); /** * Creates a receiver that listens to a Service Bus resource. * * @param fullyQualifiedNamespace The fully qualified domain name for the Service Bus resource. * @param entityPath The name of the topic or queue. * @param entityType The type of the Service Bus resource. * @param receiverOptions Options when receiving messages. * @param connectionProcessor The AMQP connection to the Service Bus resource. * @param tracerProvider Tracer for telemetry. * @param messageSerializer Serializes and deserializes Service Bus messages. * @param onClientClose Operation to run when the client completes. */ ServiceBusReceiverAsyncClient(String fullyQualifiedNamespace, String entityPath, MessagingEntityType entityType, ReceiverOptions receiverOptions, ServiceBusConnectionProcessor connectionProcessor, Duration cleanupInterval, TracerProvider tracerProvider, MessageSerializer messageSerializer, Runnable onClientClose) { this.fullyQualifiedNamespace = Objects.requireNonNull(fullyQualifiedNamespace, "'fullyQualifiedNamespace' cannot be null."); this.entityPath = Objects.requireNonNull(entityPath, "'entityPath' cannot be null."); this.entityType = Objects.requireNonNull(entityType, "'entityType' cannot be null."); this.receiverOptions = Objects.requireNonNull(receiverOptions, "'receiveOptions cannot be null.'"); this.connectionProcessor = Objects.requireNonNull(connectionProcessor, "'connectionProcessor' cannot be null."); this.tracerProvider = Objects.requireNonNull(tracerProvider, "'tracerProvider' cannot be null."); this.messageSerializer = Objects.requireNonNull(messageSerializer, "'messageSerializer' cannot be null."); this.onClientClose = Objects.requireNonNull(onClientClose, "'onClientClose' cannot be null."); this.managementNodeLocks = new LockContainer<>(cleanupInterval); this.renewalContainer = new LockContainer<>(Duration.ofMinutes(2), renewal -> { logger.verbose("Closing expired renewal operation. lockToken[{}]. status[{}]. throwable[{}].", renewal.getLockToken(), renewal.getStatus(), renewal.getThrowable()); renewal.close(); }); this.sessionManager = null; } ServiceBusReceiverAsyncClient(String fullyQualifiedNamespace, String entityPath, MessagingEntityType entityType, ReceiverOptions receiverOptions, ServiceBusConnectionProcessor connectionProcessor, Duration cleanupInterval, TracerProvider tracerProvider, MessageSerializer messageSerializer, Runnable onClientClose, ServiceBusSessionManager sessionManager) { this.fullyQualifiedNamespace = Objects.requireNonNull(fullyQualifiedNamespace, "'fullyQualifiedNamespace' cannot be null."); this.entityPath = Objects.requireNonNull(entityPath, "'entityPath' cannot be null."); this.entityType = Objects.requireNonNull(entityType, "'entityType' cannot be null."); this.receiverOptions = Objects.requireNonNull(receiverOptions, "'receiveOptions cannot be null.'"); this.connectionProcessor = Objects.requireNonNull(connectionProcessor, "'connectionProcessor' cannot be null."); this.tracerProvider = Objects.requireNonNull(tracerProvider, "'tracerProvider' cannot be null."); this.messageSerializer = Objects.requireNonNull(messageSerializer, "'messageSerializer' cannot be null."); this.onClientClose = Objects.requireNonNull(onClientClose, "'onClientClose' cannot be null."); this.sessionManager = Objects.requireNonNull(sessionManager, "'sessionManager' cannot be null."); this.managementNodeLocks = new LockContainer<>(cleanupInterval); this.renewalContainer = new LockContainer<>(Duration.ofMinutes(2), renewal -> { logger.info("Closing expired renewal operation. sessionId[{}]. status[{}]. throwable[{}]", renewal.getSessionId(), renewal.getStatus(), renewal.getThrowable()); renewal.close(); }); } /** * Gets the fully qualified Service Bus namespace that the connection is associated with. This is likely similar to * {@code {yournamespace}.servicebus.windows.net}. * * @return The fully qualified Service Bus namespace that the connection is associated with. */ public String getFullyQualifiedNamespace() { return fullyQualifiedNamespace; } /** * Gets the Service Bus resource this client interacts with. * * @return The Service Bus resource this client interacts with. */ public String getEntityPath() { return entityPath; } /** * Abandon a {@link ServiceBusReceivedMessage message}. This will make the message available * again for processing. Abandoning a message will increase the delivery count on the message. * * @param message The {@link ServiceBusReceivedMessage} to perform this operation. * * @return A {@link Mono} that completes when the Service Bus abandon operation completes. * @throws NullPointerException if {@code message} is null. * @throws UnsupportedOperationException if the receiver was opened in {@link ReceiveMode * mode. */ public Mono<Void> abandon(ServiceBusReceivedMessage message) { return updateDisposition(message, DispositionStatus.ABANDONED, null, null, null, null); } /** * Abandon a {@link ServiceBusReceivedMessage message} updates the message's properties. * This will make the message available again for processing. Abandoning a message will increase the delivery count * on the message. * * @param message The {@link ServiceBusReceivedMessage} to perform this operation. * @param options to abandon the message. You can specify * {@link AbandonOptions * {@code transactionContext} can be set using * {@link AbandonOptions * created first by {@link ServiceBusReceiverAsyncClient * {@link ServiceBusSenderAsyncClient * * @return A {@link Mono} that completes when the Service Bus operation finishes. * @throws NullPointerException if {@code message} or {@code options} is null. Also if * {@code transactionContext.transactionId} is null when {@code options.transactionContext} is specified. * @throws UnsupportedOperationException if the receiver was opened in {@link ReceiveMode * mode. */ public Mono<Void> abandon(ServiceBusReceivedMessage message, AbandonOptions options) { if (Objects.isNull(options)) { return monoError(logger, new NullPointerException("'settlementOptions' cannot be null.")); } else if (!Objects.isNull(options.getTransactionContext()) && Objects.isNull(options.getTransactionContext().getTransactionId())) { return monoError(logger, new NullPointerException( "'options.transactionContext.transactionId' cannot be null.")); } return updateDisposition(message, DispositionStatus.ABANDONED, null, null, options.getPropertiesToModify(), options.getTransactionContext()); } /** * Completes a {@link ServiceBusReceivedMessage message}. This will delete the message from the service. * * @param message The {@link ServiceBusReceivedMessage} to perform this operation. * * @return A {@link Mono} that finishes when the message is completed on Service Bus. * @throws NullPointerException if {@code message} is null. * @throws UnsupportedOperationException if the receiver was opened in {@link ReceiveMode * mode. */ public Mono<Void> complete(ServiceBusReceivedMessage message) { return updateDisposition(message, DispositionStatus.COMPLETED, null, null, null, null); } /** * Completes a {@link ServiceBusReceivedMessage message}. This will delete the message from the * service. * * @param message The {@link ServiceBusReceivedMessage} to perform this operation. * @param options to complete the message. The {@code transactionContext} can be set using * {@link CompleteOptions * created first by {@link ServiceBusReceiverAsyncClient * {@link ServiceBusSenderAsyncClient * * @return A {@link Mono} that finishes when the message is completed on Service Bus. * @throws NullPointerException if {@code message} or {@code options} is null. Also if * {@code transactionContext.transactionId} is null when {@code options.transactionContext} is specified. * @throws UnsupportedOperationException if the receiver was opened in {@link ReceiveMode * mode. */ public Mono<Void> complete(ServiceBusReceivedMessage message, CompleteOptions options) { if (Objects.isNull(options)) { return monoError(logger, new NullPointerException("'options' cannot be null.")); } else if (!Objects.isNull(options.getTransactionContext()) && Objects.isNull(options.getTransactionContext().getTransactionId())) { return monoError(logger, new NullPointerException( "'options.transactionContext.transactionId' cannot be null.")); } return updateDisposition(message, DispositionStatus.COMPLETED, null, null, null, options.getTransactionContext()); } /** * Defers a {@link ServiceBusReceivedMessage message}. This will move message into the deferred subqueue. * * @param message The {@link ServiceBusReceivedMessage} to perform this operation. * * @return A {@link Mono} that completes when the Service Bus defer operation finishes. * @throws NullPointerException if {@code message} is null. * @throws UnsupportedOperationException if the receiver was opened in {@link ReceiveMode * @see <a href="https: */ public Mono<Void> defer(ServiceBusReceivedMessage message) { return updateDisposition(message, DispositionStatus.DEFERRED, null, null, null, null); } /** * Defers a {@link ServiceBusReceivedMessage message} with modified message property. This will move message into * the deferred subqueue. * * @param message The {@link ServiceBusReceivedMessage} to perform this operation. * @param options to defer the message. You can specify {@link DeferOptions * to modify on the Message. The {@code transactionContext} can be set using * {@link DeferOptions * created first by {@link ServiceBusReceiverAsyncClient * {@link ServiceBusSenderAsyncClient * * @return A {@link Mono} that completes when the defer operation finishes. * @throws NullPointerException if {@code message} or {@code options} is null. Also if * {@code transactionContext.transactionId} is null when {@code options.transactionContext} is specified. * @throws UnsupportedOperationException if the receiver was opened in {@link ReceiveMode * mode. * @see <a href="https: */ public Mono<Void> defer(ServiceBusReceivedMessage message, DeferOptions options) { if (Objects.isNull(options)) { return monoError(logger, new NullPointerException("'options' cannot be null.")); } else if (!Objects.isNull(options.getTransactionContext()) && Objects.isNull(options.getTransactionContext().getTransactionId())) { return monoError(logger, new NullPointerException( "'options.transactionContext.transactionId' cannot be null.")); } return updateDisposition(message, DispositionStatus.DEFERRED, null, null, options.getPropertiesToModify(), options.getTransactionContext()); } /** * Moves a {@link ServiceBusReceivedMessage message} to the deadletter sub-queue. * * @param message The {@link ServiceBusReceivedMessage} to perform this operation. * * @return A {@link Mono} that completes when the dead letter operation finishes. * @throws NullPointerException if {@code message} is null. * @throws UnsupportedOperationException if the receiver was opened in {@link ReceiveMode * mode. * @see <a href="https: * queues</a> */ public Mono<Void> deadLetter(ServiceBusReceivedMessage message) { return deadLetter(message, DEFAULT_DEAD_LETTER_OPTIONS); } /** * Moves a {@link ServiceBusReceivedMessage message} to the deadletter sub-queue. * * @param message The {@link ServiceBusReceivedMessage} to perform this operation. * @param options to deadLetter the message. You can specify * {@link DeadLetterOptions * {@code transactionContext} can be set using * {@link DeadLetterOptions * created first by {@link ServiceBusReceiverAsyncClient * {@link ServiceBusSenderAsyncClient * @return A {@link Mono} that completes when the dead letter operation finishes. * @throws NullPointerException if {@code message} or {@code options} is null. Also if * {@code transactionContext.transactionId} is null when {@code options.transactionContext} is specified. * @throws UnsupportedOperationException if the receiver was opened in {@link ReceiveMode * mode. * @see <a href="https: * queues</a> */ public Mono<Void> deadLetter(ServiceBusReceivedMessage message, DeadLetterOptions options) { if (Objects.isNull(options)) { return monoError(logger, new NullPointerException("'options' cannot be null.")); } else if (!Objects.isNull(options.getTransactionContext()) && Objects.isNull(options.getTransactionContext().getTransactionId())) { return monoError(logger, new NullPointerException( "'options.transactionContext.transactionId' cannot be null.")); } return updateDisposition(message, DispositionStatus.SUSPENDED, options.getDeadLetterReason(), options.getDeadLetterErrorDescription(), options.getPropertiesToModify(), options.getTransactionContext()); } /** * Gets the state of the session if this receiver is a session receiver. * * @return The session state or an empty Mono if there is no state set for the session. * @throws IllegalStateException if the receiver is a non-session receiver. */ public Mono<byte[]> getSessionState() { return getSessionState(receiverOptions.getSessionId()); } /** * Reads the next active message without changing the state of the receiver or the message source. The first call to * {@code peek()} fetches the first active message for this receiver. Each subsequent call fetches the subsequent * message in the entity. * * @return A peeked {@link ServiceBusReceivedMessage}. * @see <a href="https: */ public Mono<ServiceBusReceivedMessage> peekMessage() { return peekMessage(receiverOptions.getSessionId()); } /** * Reads the next active message without changing the state of the receiver or the message source. The first call to * {@code peek()} fetches the first active message for this receiver. Each subsequent call fetches the subsequent * message in the entity. * * @param sessionId Session id of the message to peek from. {@code null} if there is no session. * * @return A peeked {@link ServiceBusReceivedMessage}. * @throws IllegalStateException if the receiver is disposed. * @see <a href="https: */ Mono<ServiceBusReceivedMessage> peekMessage(String sessionId) { if (isDisposed.get()) { return monoError(logger, new IllegalStateException( String.format(INVALID_OPERATION_DISPOSED_RECEIVER, "peek"))); } return connectionProcessor .flatMap(connection -> connection.getManagementNode(entityPath, entityType)) .flatMap(channel -> { final long sequence = lastPeekedSequenceNumber.get() + 1; logger.verbose("Peek message from sequence number: {}", sequence); return channel.peek(sequence, sessionId, getLinkName(sessionId)); }) .handle((message, sink) -> { final long current = lastPeekedSequenceNumber .updateAndGet(value -> Math.max(value, message.getSequenceNumber())); logger.verbose("Updating last peeked sequence number: {}", current); sink.next(message); }); } /** * Starting from the given sequence number, reads next the active message without changing the state of the receiver * or the message source. * * @param sequenceNumber The sequence number from where to read the message. * * @return A peeked {@link ServiceBusReceivedMessage}. * @see <a href="https: */ public Mono<ServiceBusReceivedMessage> peekMessageAt(long sequenceNumber) { return peekMessageAt(sequenceNumber, receiverOptions.getSessionId()); } /** * Starting from the given sequence number, reads next the active message without changing the state of the receiver * or the message source. * * @param sequenceNumber The sequence number from where to read the message. * @param sessionId Session id of the message to peek from. {@code null} if there is no session. * * @return A peeked {@link ServiceBusReceivedMessage}. * @see <a href="https: */ Mono<ServiceBusReceivedMessage> peekMessageAt(long sequenceNumber, String sessionId) { if (isDisposed.get()) { return monoError(logger, new IllegalStateException( String.format(INVALID_OPERATION_DISPOSED_RECEIVER, "peekAt"))); } return connectionProcessor .flatMap(connection -> connection.getManagementNode(entityPath, entityType)) .flatMap(node -> node.peek(sequenceNumber, sessionId, getLinkName(sessionId))); } /** * Reads the next batch of active messages without changing the state of the receiver or the message source. * * @param maxMessages The number of messages. * * @return A {@link Flux} of {@link ServiceBusReceivedMessage messages} that are peeked. * @throws IllegalArgumentException if {@code maxMessages} is not a positive integer. * @see <a href="https: */ public Flux<ServiceBusReceivedMessage> peekMessages(int maxMessages) { return peekMessages(maxMessages, receiverOptions.getSessionId()); } /** * Reads the next batch of active messages without changing the state of the receiver or the message source. * * @param maxMessages The number of messages. * @param sessionId Session id of the messages to peek from. {@code null} if there is no session. * * @return An {@link IterableStream} of {@link ServiceBusReceivedMessage messages} that are peeked. * @throws IllegalArgumentException if {@code maxMessages} is not a positive integer. * @see <a href="https: */ Flux<ServiceBusReceivedMessage> peekMessages(int maxMessages, String sessionId) { if (isDisposed.get()) { return fluxError(logger, new IllegalStateException( String.format(INVALID_OPERATION_DISPOSED_RECEIVER, "peekBatch"))); } return connectionProcessor .flatMap(connection -> connection.getManagementNode(entityPath, entityType)) .flatMapMany(node -> { final long nextSequenceNumber = lastPeekedSequenceNumber.get() + 1; logger.verbose("Peek batch from sequence number: {}", nextSequenceNumber); final Flux<ServiceBusReceivedMessage> messages = node.peek(nextSequenceNumber, sessionId, getLinkName(sessionId), maxMessages); final Mono<ServiceBusReceivedMessage> handle = messages .switchIfEmpty(Mono.fromCallable(() -> { ServiceBusReceivedMessage emptyMessage = new ServiceBusReceivedMessage(BinaryData .fromBytes(new byte[0])); emptyMessage.setSequenceNumber(lastPeekedSequenceNumber.get()); return emptyMessage; })) .last() .handle((last, sink) -> { final long current = lastPeekedSequenceNumber .updateAndGet(value -> Math.max(value, last.getSequenceNumber())); logger.verbose("Last peeked sequence number in batch: {}", current); sink.complete(); }); return Flux.merge(messages, handle); }); } /** * Starting from the given sequence number, reads the next batch of active messages without changing the state of * the receiver or the message source. * * @param maxMessages The number of messages. * @param sequenceNumber The sequence number from where to start reading messages. * * @return A {@link Flux} of {@link ServiceBusReceivedMessage} peeked. * @throws IllegalArgumentException if {@code maxMessages} is not a positive integer. * @see <a href="https: */ public Flux<ServiceBusReceivedMessage> peekMessagesAt(int maxMessages, long sequenceNumber) { return peekMessagesAt(maxMessages, sequenceNumber, receiverOptions.getSessionId()); } /** * Starting from the given sequence number, reads the next batch of active messages without changing the state of * the receiver or the message source. * * @param maxMessages The number of messages. * @param sequenceNumber The sequence number from where to start reading messages. * @param sessionId Session id of the messages to peek from. {@code null} if there is no session. * * @return An {@link IterableStream} of {@link ServiceBusReceivedMessage} peeked. * @throws IllegalArgumentException if {@code maxMessages} is not a positive integer. * @see <a href="https: */ Flux<ServiceBusReceivedMessage> peekMessagesAt(int maxMessages, long sequenceNumber, String sessionId) { if (isDisposed.get()) { return fluxError(logger, new IllegalStateException( String.format(INVALID_OPERATION_DISPOSED_RECEIVER, "peekBatchAt"))); } return connectionProcessor .flatMap(connection -> connection.getManagementNode(entityPath, entityType)) .flatMapMany(node -> node.peek(sequenceNumber, sessionId, getLinkName(sessionId), maxMessages)); } /** * Receives an <b>infinite</b> stream of {@link ServiceBusReceivedMessage messages} from the Service Bus entity. * This Flux continuously receives messages from a Service Bus entity until either: * * <ul> * <li>The receiver is closed.</li> * <li>The subscription to the Flux is disposed.</li> * <li>A terminal signal from a downstream subscriber is propagated upstream (ie. {@link Flux * {@link Flux * <li>An {@link AmqpException} occurs that causes the receive link to stop.</li> * </ul> * * @return An <b>infinite</b> stream of messages from the Service Bus entity. */ public Flux<ServiceBusReceivedMessage> receiveMessages() { return receiveMessagesWithContext() .handle((serviceBusMessageContext, sink) -> { if (serviceBusMessageContext.hasError()) { sink.error(serviceBusMessageContext.getThrowable()); return; } sink.next(serviceBusMessageContext.getMessage()); }); } /** * Receives an <b>infinite</b> stream of {@link ServiceBusReceivedMessage messages} from the Service Bus entity. * This Flux continuously receives messages from a Service Bus entity until either: * * <ul> * <li>The receiver is closed.</li> * <li>The subscription to the Flux is disposed.</li> * <li>A terminal signal from a downstream subscriber is propagated upstream (ie. {@link Flux * {@link Flux * <li>An {@link AmqpException} occurs that causes the receive link to stop.</li> * </ul> * * @return An <b>infinite</b> stream of messages from the Service Bus entity. */ Flux<ServiceBusMessageContext> receiveMessagesWithContext() { final Flux<ServiceBusMessageContext> messageFlux = sessionManager != null ? sessionManager.receive() : getOrCreateConsumer().receive().map(ServiceBusMessageContext::new); final Flux<ServiceBusMessageContext> withAutoLockRenewal; if (receiverOptions.isAutoLockRenewEnabled()) { withAutoLockRenewal = new FluxAutoLockRenew(messageFlux, receiverOptions.getMaxLockRenewDuration(), renewalContainer, this::renewMessageLock); } else { withAutoLockRenewal = messageFlux; } final Flux<ServiceBusMessageContext> withAutoComplete; if (receiverOptions.isEnableAutoComplete()) { withAutoComplete = new FluxAutoComplete(withAutoLockRenewal, completionLock, context -> context.getMessage() != null ? complete(context.getMessage()) : Mono.empty(), context -> context.getMessage() != null ? abandon(context.getMessage()) : Mono.empty()); } else { withAutoComplete = withAutoLockRenewal; } return withAutoComplete .onErrorMap(throwable -> mapError(throwable, ServiceBusErrorSource.RECEIVE)); } /** * Receives a deferred {@link ServiceBusReceivedMessage message}. Deferred messages can only be received by using * sequence number. * * @param sequenceNumber The {@link ServiceBusReceivedMessage * message. * * @return A deferred message with the matching {@code sequenceNumber}. */ public Mono<ServiceBusReceivedMessage> receiveDeferredMessage(long sequenceNumber) { return receiveDeferredMessage(sequenceNumber, receiverOptions.getSessionId()); } /** * Receives a deferred {@link ServiceBusReceivedMessage message}. Deferred messages can only be received by using * sequence number. * * @param sequenceNumber The {@link ServiceBusReceivedMessage * message. * @param sessionId Session id of the deferred message. {@code null} if there is no session. * * @return A deferred message with the matching {@code sequenceNumber}. */ Mono<ServiceBusReceivedMessage> receiveDeferredMessage(long sequenceNumber, String sessionId) { return connectionProcessor .flatMap(connection -> connection.getManagementNode(entityPath, entityType)) .flatMap(node -> node.receiveDeferredMessages(receiverOptions.getReceiveMode(), sessionId, getLinkName(sessionId), Collections.singleton(sequenceNumber)).last()) .map(receivedMessage -> { if (CoreUtils.isNullOrEmpty(receivedMessage.getLockToken())) { return receivedMessage; } if (receiverOptions.getReceiveMode() == ReceiveMode.PEEK_LOCK) { receivedMessage.setLockedUntil(managementNodeLocks.addOrUpdate(receivedMessage.getLockToken(), receivedMessage.getLockedUntil(), receivedMessage.getLockedUntil())); } return receivedMessage; }); } /** * Receives a batch of deferred {@link ServiceBusReceivedMessage messages}. Deferred messages can only be received * by using sequence number. * * @param sequenceNumbers The sequence numbers of the deferred messages. * * @return A {@link Flux} of deferred {@link ServiceBusReceivedMessage messages}. */ public Flux<ServiceBusReceivedMessage> receiveDeferredMessages(Iterable<Long> sequenceNumbers) { return receiveDeferredMessages(sequenceNumbers, receiverOptions.getSessionId()); } /** * Receives a batch of deferred {@link ServiceBusReceivedMessage messages}. Deferred messages can only be received * by using sequence number. * * @param sequenceNumbers The sequence numbers of the deferred messages. * @param sessionId Session id of the deferred messages. {@code null} if there is no session. * * @return An {@link IterableStream} of deferred {@link ServiceBusReceivedMessage messages}. */ Flux<ServiceBusReceivedMessage> receiveDeferredMessages(Iterable<Long> sequenceNumbers, String sessionId) { if (isDisposed.get()) { return fluxError(logger, new IllegalStateException( String.format(INVALID_OPERATION_DISPOSED_RECEIVER, "receiveDeferredMessageBatch"))); } return connectionProcessor .flatMap(connection -> connection.getManagementNode(entityPath, entityType)) .flatMapMany(node -> node.receiveDeferredMessages(receiverOptions.getReceiveMode(), sessionId, getLinkName(sessionId), sequenceNumbers)) .map(receivedMessage -> { if (CoreUtils.isNullOrEmpty(receivedMessage.getLockToken())) { return receivedMessage; } if (receiverOptions.getReceiveMode() == ReceiveMode.PEEK_LOCK) { receivedMessage.setLockedUntil(managementNodeLocks.addOrUpdate(receivedMessage.getLockToken(), receivedMessage.getLockedUntil(), receivedMessage.getLockedUntil())); } return receivedMessage; }); } /** * Asynchronously renews the lock on the message. The lock will be renewed based on the setting specified on the * entity. When a message is received in {@link ReceiveMode * this receiver instance for a duration as specified during the entity creation (LockDuration). If processing of * the message requires longer than this duration, the lock needs to be renewed. For each renewal, the lock is reset * to the entity's LockDuration value. * * @param message The {@link ServiceBusReceivedMessage} to perform auto-lock renewal. * * @return The new expiration time for the message. * @throws NullPointerException if {@code message} or {@code message.getLockToken()} is null. * @throws UnsupportedOperationException if the receiver was opened in {@link ReceiveMode * mode. * @throws IllegalStateException if the receiver is a session receiver. * @throws IllegalArgumentException if {@code message.getLockToken()} is an empty value. */ public Mono<OffsetDateTime> renewMessageLock(ServiceBusReceivedMessage message) { if (isDisposed.get()) { return monoError(logger, new IllegalStateException( String.format(INVALID_OPERATION_DISPOSED_RECEIVER, "renewMessageLock"))); } else if (Objects.isNull(message)) { return monoError(logger, new NullPointerException("'message' cannot be null.")); } else if (Objects.isNull(message.getLockToken())) { return monoError(logger, new NullPointerException("'message.getLockToken()' cannot be null.")); } else if (message.getLockToken().isEmpty()) { return monoError(logger, new IllegalArgumentException("'message.getLockToken()' cannot be empty.")); } else if (receiverOptions.isSessionReceiver()) { return monoError(logger, new IllegalStateException( String.format("Cannot renew message lock [%s] for a session receiver.", message.getLockToken()))); } return renewMessageLock(message.getLockToken()) .onErrorMap(throwable -> mapError(throwable, ServiceBusErrorSource.RENEW_LOCK)); } /** * Asynchronously renews the lock on the message. The lock will be renewed based on the setting specified on the * entity. * * @param lockToken to be renewed. * * @return The new expiration time for the message. */ Mono<OffsetDateTime> renewMessageLock(String lockToken) { return connectionProcessor .flatMap(connection -> connection.getManagementNode(entityPath, entityType)) .flatMap(serviceBusManagementNode -> serviceBusManagementNode.renewMessageLock(lockToken, getLinkName(null))) .map(offsetDateTime -> managementNodeLocks.addOrUpdate(lockToken, offsetDateTime, offsetDateTime)); } /** * Starts the auto lock renewal for a {@link ServiceBusReceivedMessage message}. * * @param message The {@link ServiceBusReceivedMessage} to perform this operation. * @param maxLockRenewalDuration Maximum duration to keep renewing the lock token. * * @return A lock renewal operation for the message. * @throws NullPointerException if {@code message}, {@code message.getLockToken()} or {@code * maxLockRenewalDuration} is null. * @throws IllegalStateException if the receiver is a session receiver or the receiver is disposed. * @throws IllegalArgumentException if {@code message.getLockToken()} is an empty value. */ public Mono<Void> renewMessageLock(ServiceBusReceivedMessage message, Duration maxLockRenewalDuration) { if (isDisposed.get()) { return monoError(logger, new IllegalStateException( String.format(INVALID_OPERATION_DISPOSED_RECEIVER, "getAutoRenewMessageLock"))); } else if (Objects.isNull(message)) { return monoError(logger, new NullPointerException("'message' cannot be null.")); } else if (Objects.isNull(message.getLockToken())) { return monoError(logger, new NullPointerException("'message.getLockToken()' cannot be null.")); } else if (message.getLockToken().isEmpty()) { return monoError(logger, new IllegalArgumentException("'message.getLockToken()' cannot be empty.")); } else if (receiverOptions.isSessionReceiver()) { return monoError(logger, new IllegalStateException( String.format("Cannot renew message lock [%s] for a session receiver.", message.getLockToken()))); } else if (maxLockRenewalDuration == null) { return monoError(logger, new NullPointerException("'maxLockRenewalDuration' cannot be null.")); } else if (maxLockRenewalDuration.isNegative()) { return monoError(logger, new IllegalArgumentException("'maxLockRenewalDuration' cannot be negative.")); } final LockRenewalOperation operation = new LockRenewalOperation(message.getLockToken(), maxLockRenewalDuration, false, ignored -> renewMessageLock(message)); renewalContainer.addOrUpdate(message.getLockToken(), OffsetDateTime.now().plus(maxLockRenewalDuration), operation); return operation.getCompletionOperation() .onErrorMap(throwable -> mapError(throwable, ServiceBusErrorSource.RENEW_LOCK)); } /** * Renews the session lock if this receiver is a session receiver. * * @return The next expiration time for the session lock. * @throws IllegalStateException if the receiver is a non-session receiver. */ public Mono<OffsetDateTime> renewSessionLock() { return renewSessionLock(receiverOptions.getSessionId()); } /** * Starts the auto lock renewal for the session this receiver works for. * * @param maxLockRenewalDuration Maximum duration to keep renewing the session lock. * * @return A lock renewal operation for the message. * @throws NullPointerException if {@code sessionId} or {@code maxLockRenewalDuration} is null. * @throws IllegalArgumentException if {@code sessionId} is an empty string. * @throws IllegalStateException if the receiver is a non-session receiver or the receiver is disposed. */ public Mono<Void> renewSessionLock(Duration maxLockRenewalDuration) { return this.renewSessionLock(receiverOptions.getSessionId(), maxLockRenewalDuration); } /** * Sets the state of the session this receiver works for. * * @param sessionState State to set on the session. * * @return A Mono that completes when the session is set * @throws IllegalStateException if the receiver is a non-session receiver. */ public Mono<Void> setSessionState(byte[] sessionState) { return this.setSessionState(receiverOptions.getSessionId(), sessionState); } /** * Starts a new service side transaction. The {@link ServiceBusTransactionContext} should be passed to all * operations that needs to be in this transaction. * * <p><strong>Create a transaction</strong></p> * {@codesnippet com.azure.messaging.servicebus.servicebusasyncreceiverclient.createTransaction} * * @return The {@link Mono} that finishes this operation on service bus resource. */ public Mono<ServiceBusTransactionContext> createTransaction() { if (isDisposed.get()) { return monoError(logger, new IllegalStateException( String.format(INVALID_OPERATION_DISPOSED_RECEIVER, "createTransaction"))); } return connectionProcessor .flatMap(connection -> connection.createSession(TRANSACTION_LINK_NAME)) .flatMap(transactionSession -> transactionSession.createTransaction()) .map(transaction -> new ServiceBusTransactionContext(transaction.getTransactionId())); } /** * Commits the transaction given {@link ServiceBusTransactionContext}. This will make a call to Service Bus. * <p><strong>Commit a transaction</strong></p> * {@codesnippet com.azure.messaging.servicebus.servicebusasyncreceiverclient.commitTransaction} * * @param transactionContext to be committed. * * @return The {@link Mono} that finishes this operation on service bus resource. * @throws NullPointerException if {@code transactionContext} or {@code transactionContext.transactionId} is * null. */ public Mono<Void> commitTransaction(ServiceBusTransactionContext transactionContext) { if (isDisposed.get()) { return monoError(logger, new IllegalStateException( String.format(INVALID_OPERATION_DISPOSED_RECEIVER, "commitTransaction"))); } if (Objects.isNull(transactionContext)) { return monoError(logger, new NullPointerException("'transactionContext' cannot be null.")); } else if (Objects.isNull(transactionContext.getTransactionId())) { return monoError(logger, new NullPointerException("'transactionContext.transactionId' cannot be null.")); } return connectionProcessor .flatMap(connection -> connection.createSession(TRANSACTION_LINK_NAME)) .flatMap(transactionSession -> transactionSession.commitTransaction(new AmqpTransaction( transactionContext.getTransactionId()))); } /** * Rollbacks the transaction given {@link ServiceBusTransactionContext}. This will make a call to Service Bus. * <p><strong>Rollback a transaction</strong></p> * {@codesnippet com.azure.messaging.servicebus.servicebusasyncreceiverclient.rollbackTransaction} * * @param transactionContext to be rollbacked. * * @return The {@link Mono} that finishes this operation on service bus resource. * @throws NullPointerException if {@code transactionContext} or {@code transactionContext.transactionId} is * null. */ public Mono<Void> rollbackTransaction(ServiceBusTransactionContext transactionContext) { if (isDisposed.get()) { return monoError(logger, new IllegalStateException( String.format(INVALID_OPERATION_DISPOSED_RECEIVER, "rollbackTransaction"))); } if (Objects.isNull(transactionContext)) { return monoError(logger, new NullPointerException("'transactionContext' cannot be null.")); } else if (Objects.isNull(transactionContext.getTransactionId())) { return monoError(logger, new NullPointerException("'transactionContext.transactionId' cannot be null.")); } return connectionProcessor .flatMap(connection -> connection.createSession(TRANSACTION_LINK_NAME)) .flatMap(transactionSession -> transactionSession.rollbackTransaction(new AmqpTransaction( transactionContext.getTransactionId()))); } /** * Disposes of the consumer by closing the underlying connection to the service. */ @Override public void close() { if (isDisposed.getAndSet(true)) { return; } try { completionLock.acquire(); } catch (InterruptedException e) { logger.info("Unable to obtain completion lock.", e); } logger.info("Removing receiver links."); final ServiceBusAsyncConsumer disposed = consumer.getAndSet(null); if (disposed != null) { disposed.close(); } if (sessionManager != null) { sessionManager.close(); } onClientClose.run(); } /** * @return receiver options set by user; */ ReceiverOptions getReceiverOptions() { return receiverOptions; } /** * Gets whether or not the management node contains the message lock token and it has not expired. Lock tokens are * held by the management node when they are received from the management node or management operations are * performed using that {@code lockToken}. * * @param lockToken Lock token to check for. * * @return {@code true} if the management node contains the lock token and false otherwise. */ private boolean isManagementToken(String lockToken) { return managementNodeLocks.containsUnexpired(lockToken); } private Mono<Void> updateDisposition(ServiceBusReceivedMessage message, DispositionStatus dispositionStatus, String deadLetterReason, String deadLetterErrorDescription, Map<String, Object> propertiesToModify, ServiceBusTransactionContext transactionContext) { if (isDisposed.get()) { return monoError(logger, new IllegalStateException( String.format(INVALID_OPERATION_DISPOSED_RECEIVER, dispositionStatus.getValue()))); } else if (Objects.isNull(message)) { return monoError(logger, new NullPointerException("'message' cannot be null.")); } final String lockToken = message.getLockToken(); final String sessionId = message.getSessionId(); if (receiverOptions.getReceiveMode() != ReceiveMode.PEEK_LOCK) { return Mono.error(logger.logExceptionAsError(new UnsupportedOperationException(String.format( "'%s' is not supported on a receiver opened in ReceiveMode.RECEIVE_AND_DELETE.", dispositionStatus)))); } final String sessionIdToUse; if (sessionId == null && !CoreUtils.isNullOrEmpty(receiverOptions.getSessionId())) { sessionIdToUse = receiverOptions.getSessionId(); } else { sessionIdToUse = sessionId; } logger.verbose("{}: Update started. Disposition: {}. Lock: {}. SessionId: {}.", entityPath, dispositionStatus, lockToken, sessionIdToUse); final Mono<Void> performOnManagement = connectionProcessor .flatMap(connection -> connection.getManagementNode(entityPath, entityType)) .flatMap(node -> node.updateDisposition(lockToken, dispositionStatus, deadLetterReason, deadLetterErrorDescription, propertiesToModify, sessionId, getLinkName(sessionId), transactionContext)) .then(Mono.fromRunnable(() -> { logger.info("{}: Management node Update completed. Disposition: {}. Lock: {}.", entityPath, dispositionStatus, lockToken); managementNodeLocks.remove(lockToken); renewalContainer.remove(lockToken); })); Mono<Void> updateDispositionOperation; if (sessionManager != null) { updateDispositionOperation = sessionManager.updateDisposition(lockToken, sessionId, dispositionStatus, propertiesToModify, deadLetterReason, deadLetterErrorDescription, transactionContext) .flatMap(isSuccess -> { if (isSuccess) { renewalContainer.remove(lockToken); return Mono.empty(); } logger.info("Could not perform on session manger. Performing on management node."); return performOnManagement; }); } else { final ServiceBusAsyncConsumer existingConsumer = consumer.get(); if (isManagementToken(lockToken) || existingConsumer == null) { updateDispositionOperation = performOnManagement; } else { updateDispositionOperation = existingConsumer.updateDisposition(lockToken, dispositionStatus, deadLetterReason, deadLetterErrorDescription, propertiesToModify, transactionContext) .then(Mono.fromRunnable(() -> { logger.verbose("{}: Update completed. Disposition: {}. Lock: {}.", entityPath, dispositionStatus, lockToken); renewalContainer.remove(lockToken); })); } } return updateDispositionOperation .onErrorMap(throwable -> { if (receiverOptions.isEnableAutoComplete() && throwable instanceof AmqpException) { switch (dispositionStatus) { case COMPLETED: return new ServiceBusReceiverException((AmqpException) throwable, ServiceBusErrorSource.COMPLETE); case ABANDONED: return new ServiceBusReceiverException((AmqpException) throwable, ServiceBusErrorSource.ABANDONED); default: } } return throwable; }); } private ServiceBusAsyncConsumer getOrCreateConsumer() { final ServiceBusAsyncConsumer existing = consumer.get(); if (existing != null) { return existing; } final String linkName = StringUtil.getRandomString(entityPath); logger.info("{}: Creating consumer for link '{}'", entityPath, linkName); final Flux<ServiceBusReceiveLink> receiveLink = connectionProcessor.flatMap(connection -> { if (receiverOptions.isSessionReceiver()) { return connection.createReceiveLink(linkName, entityPath, receiverOptions.getReceiveMode(), null, entityType, receiverOptions.getSessionId()); } else { return connection.createReceiveLink(linkName, entityPath, receiverOptions.getReceiveMode(), null, entityType); } }) .doOnNext(next -> { final String format = "Created consumer for Service Bus resource: [{}] mode: [{}]" + " sessionEnabled? {} transferEntityPath: [{}], entityType: [{}]"; logger.verbose(format, next.getEntityPath(), receiverOptions.getReceiveMode(), CoreUtils.isNullOrEmpty(receiverOptions.getSessionId()), "N/A", entityType); }) .repeat(); final AmqpRetryPolicy retryPolicy = RetryUtil.getRetryPolicy(connectionProcessor.getRetryOptions()); final ServiceBusReceiveLinkProcessor linkMessageProcessor = receiveLink.subscribeWith( new ServiceBusReceiveLinkProcessor(receiverOptions.getPrefetchCount(), retryPolicy, receiverOptions.getReceiveMode())); final ServiceBusAsyncConsumer newConsumer = new ServiceBusAsyncConsumer(linkName, linkMessageProcessor, messageSerializer, receiverOptions); if (consumer.compareAndSet(null, newConsumer)) { return newConsumer; } else { newConsumer.close(); return consumer.get(); } } /** * If the receiver has not connected via {@link * through the management node. * * @return The name of the receive link, or null of it has not connected via a receive link. */ private String getLinkName(String sessionId) { if (sessionManager != null && !CoreUtils.isNullOrEmpty(sessionId)) { return sessionManager.getLinkName(sessionId); } else if (!CoreUtils.isNullOrEmpty(sessionId) && !receiverOptions.isSessionReceiver()) { return null; } else { final ServiceBusAsyncConsumer existing = consumer.get(); return existing != null ? existing.getLinkName() : null; } } Mono<OffsetDateTime> renewSessionLock(String sessionId) { if (isDisposed.get()) { return monoError(logger, new IllegalStateException( String.format(INVALID_OPERATION_DISPOSED_RECEIVER, "renewSessionLock"))); } else if (!receiverOptions.isSessionReceiver()) { return monoError(logger, new IllegalStateException("Cannot renew session lock on a non-session receiver.")); } final String linkName = sessionManager != null ? sessionManager.getLinkName(sessionId) : null; return connectionProcessor .flatMap(connection -> connection.getManagementNode(entityPath, entityType)) .flatMap(channel -> channel.renewSessionLock(sessionId, linkName)) .onErrorMap(throwable -> mapError(throwable, ServiceBusErrorSource.RENEW_LOCK)); } Mono<Void> renewSessionLock(String sessionId, Duration maxLockRenewalDuration) { if (isDisposed.get()) { return monoError(logger, new IllegalStateException( String.format(INVALID_OPERATION_DISPOSED_RECEIVER, "getAutoRenewSessionLock"))); } else if (!receiverOptions.isSessionReceiver()) { return monoError(logger, new IllegalStateException( "Cannot renew session lock on a non-session receiver.")); } else if (maxLockRenewalDuration == null) { return monoError(logger, new NullPointerException("'maxLockRenewalDuration' cannot be null.")); } else if (maxLockRenewalDuration.isNegative()) { return monoError(logger, new IllegalArgumentException( "'maxLockRenewalDuration' cannot be negative.")); } else if (Objects.isNull(sessionId)) { return monoError(logger, new NullPointerException("'sessionId' cannot be null.")); } else if (sessionId.isEmpty()) { return monoError(logger, new IllegalArgumentException("'sessionId' cannot be empty.")); } final LockRenewalOperation operation = new LockRenewalOperation(sessionId, maxLockRenewalDuration, true, this::renewSessionLock); renewalContainer.addOrUpdate(sessionId, OffsetDateTime.now().plus(maxLockRenewalDuration), operation); return operation.getCompletionOperation() .onErrorMap(throwable -> mapError(throwable, ServiceBusErrorSource.RENEW_LOCK)); } Mono<Void> setSessionState(String sessionId, byte[] sessionState) { if (isDisposed.get()) { return monoError(logger, new IllegalStateException( String.format(INVALID_OPERATION_DISPOSED_RECEIVER, "setSessionState"))); } else if (!receiverOptions.isSessionReceiver()) { return monoError(logger, new IllegalStateException("Cannot set session state on a non-session receiver.")); } final String linkName = sessionManager != null ? sessionManager.getLinkName(sessionId) : null; return connectionProcessor .flatMap(connection -> connection.getManagementNode(entityPath, entityType)) .flatMap(channel -> channel.setSessionState(sessionId, sessionState, linkName)); } Mono<byte[]> getSessionState(String sessionId) { if (isDisposed.get()) { return monoError(logger, new IllegalStateException( String.format(INVALID_OPERATION_DISPOSED_RECEIVER, "getSessionState"))); } else if (!receiverOptions.isSessionReceiver()) { return monoError(logger, new IllegalStateException("Cannot get session state on a non-session receiver.")); } if (sessionManager != null) { return sessionManager.getSessionState(sessionId); } else { return connectionProcessor .flatMap(connection -> connection.getManagementNode(entityPath, entityType)) .flatMap(channel -> channel.getSessionState(sessionId, getLinkName(sessionId))); } } /** * Map the error to {@link ServiceBusReceiverException} */ boolean isConnectionClosed() { return this.connectionProcessor.isChannelClosed(); } }
class ServiceBusReceiverAsyncClient implements AutoCloseable { private static final DeadLetterOptions DEFAULT_DEAD_LETTER_OPTIONS = new DeadLetterOptions(); private static final String TRANSACTION_LINK_NAME = "coordinator"; private final LockContainer<LockRenewalOperation> renewalContainer; private final AtomicBoolean isDisposed = new AtomicBoolean(); private final LockContainer<OffsetDateTime> managementNodeLocks; private final ClientLogger logger = new ClientLogger(ServiceBusReceiverAsyncClient.class); private final String fullyQualifiedNamespace; private final String entityPath; private final MessagingEntityType entityType; private final ReceiverOptions receiverOptions; private final ServiceBusConnectionProcessor connectionProcessor; private final TracerProvider tracerProvider; private final MessageSerializer messageSerializer; private final Runnable onClientClose; private final ServiceBusSessionManager sessionManager; private final Semaphore completionLock = new Semaphore(1); private final AtomicLong lastPeekedSequenceNumber = new AtomicLong(-1); private final AtomicReference<ServiceBusAsyncConsumer> consumer = new AtomicReference<>(); /** * Creates a receiver that listens to a Service Bus resource. * * @param fullyQualifiedNamespace The fully qualified domain name for the Service Bus resource. * @param entityPath The name of the topic or queue. * @param entityType The type of the Service Bus resource. * @param receiverOptions Options when receiving messages. * @param connectionProcessor The AMQP connection to the Service Bus resource. * @param tracerProvider Tracer for telemetry. * @param messageSerializer Serializes and deserializes Service Bus messages. * @param onClientClose Operation to run when the client completes. */ ServiceBusReceiverAsyncClient(String fullyQualifiedNamespace, String entityPath, MessagingEntityType entityType, ReceiverOptions receiverOptions, ServiceBusConnectionProcessor connectionProcessor, Duration cleanupInterval, TracerProvider tracerProvider, MessageSerializer messageSerializer, Runnable onClientClose) { this.fullyQualifiedNamespace = Objects.requireNonNull(fullyQualifiedNamespace, "'fullyQualifiedNamespace' cannot be null."); this.entityPath = Objects.requireNonNull(entityPath, "'entityPath' cannot be null."); this.entityType = Objects.requireNonNull(entityType, "'entityType' cannot be null."); this.receiverOptions = Objects.requireNonNull(receiverOptions, "'receiveOptions cannot be null.'"); this.connectionProcessor = Objects.requireNonNull(connectionProcessor, "'connectionProcessor' cannot be null."); this.tracerProvider = Objects.requireNonNull(tracerProvider, "'tracerProvider' cannot be null."); this.messageSerializer = Objects.requireNonNull(messageSerializer, "'messageSerializer' cannot be null."); this.onClientClose = Objects.requireNonNull(onClientClose, "'onClientClose' cannot be null."); this.managementNodeLocks = new LockContainer<>(cleanupInterval); this.renewalContainer = new LockContainer<>(Duration.ofMinutes(2), renewal -> { logger.verbose("Closing expired renewal operation. lockToken[{}]. status[{}]. throwable[{}].", renewal.getLockToken(), renewal.getStatus(), renewal.getThrowable()); renewal.close(); }); this.sessionManager = null; } ServiceBusReceiverAsyncClient(String fullyQualifiedNamespace, String entityPath, MessagingEntityType entityType, ReceiverOptions receiverOptions, ServiceBusConnectionProcessor connectionProcessor, Duration cleanupInterval, TracerProvider tracerProvider, MessageSerializer messageSerializer, Runnable onClientClose, ServiceBusSessionManager sessionManager) { this.fullyQualifiedNamespace = Objects.requireNonNull(fullyQualifiedNamespace, "'fullyQualifiedNamespace' cannot be null."); this.entityPath = Objects.requireNonNull(entityPath, "'entityPath' cannot be null."); this.entityType = Objects.requireNonNull(entityType, "'entityType' cannot be null."); this.receiverOptions = Objects.requireNonNull(receiverOptions, "'receiveOptions cannot be null.'"); this.connectionProcessor = Objects.requireNonNull(connectionProcessor, "'connectionProcessor' cannot be null."); this.tracerProvider = Objects.requireNonNull(tracerProvider, "'tracerProvider' cannot be null."); this.messageSerializer = Objects.requireNonNull(messageSerializer, "'messageSerializer' cannot be null."); this.onClientClose = Objects.requireNonNull(onClientClose, "'onClientClose' cannot be null."); this.sessionManager = Objects.requireNonNull(sessionManager, "'sessionManager' cannot be null."); this.managementNodeLocks = new LockContainer<>(cleanupInterval); this.renewalContainer = new LockContainer<>(Duration.ofMinutes(2), renewal -> { logger.info("Closing expired renewal operation. sessionId[{}]. status[{}]. throwable[{}]", renewal.getSessionId(), renewal.getStatus(), renewal.getThrowable()); renewal.close(); }); } /** * Gets the fully qualified Service Bus namespace that the connection is associated with. This is likely similar to * {@code {yournamespace}.servicebus.windows.net}. * * @return The fully qualified Service Bus namespace that the connection is associated with. */ public String getFullyQualifiedNamespace() { return fullyQualifiedNamespace; } /** * Gets the Service Bus resource this client interacts with. * * @return The Service Bus resource this client interacts with. */ public String getEntityPath() { return entityPath; } /** * Abandon a {@link ServiceBusReceivedMessage message}. This will make the message available * again for processing. Abandoning a message will increase the delivery count on the message. * * @param message The {@link ServiceBusReceivedMessage} to perform this operation. * * @return A {@link Mono} that completes when the Service Bus abandon operation completes. * @throws NullPointerException if {@code message} is null. * @throws UnsupportedOperationException if the receiver was opened in {@link ReceiveMode * mode. */ public Mono<Void> abandon(ServiceBusReceivedMessage message) { return updateDisposition(message, DispositionStatus.ABANDONED, null, null, null, null); } /** * Abandon a {@link ServiceBusReceivedMessage message} updates the message's properties. * This will make the message available again for processing. Abandoning a message will increase the delivery count * on the message. * * @param message The {@link ServiceBusReceivedMessage} to perform this operation. * @param options to abandon the message. You can specify * {@link AbandonOptions * {@code transactionContext} can be set using * {@link AbandonOptions * created first by {@link ServiceBusReceiverAsyncClient * {@link ServiceBusSenderAsyncClient * * @return A {@link Mono} that completes when the Service Bus operation finishes. * @throws NullPointerException if {@code message} or {@code options} is null. Also if * {@code transactionContext.transactionId} is null when {@code options.transactionContext} is specified. * @throws UnsupportedOperationException if the receiver was opened in {@link ReceiveMode * mode. */ public Mono<Void> abandon(ServiceBusReceivedMessage message, AbandonOptions options) { if (Objects.isNull(options)) { return monoError(logger, new NullPointerException("'settlementOptions' cannot be null.")); } else if (!Objects.isNull(options.getTransactionContext()) && Objects.isNull(options.getTransactionContext().getTransactionId())) { return monoError(logger, new NullPointerException( "'options.transactionContext.transactionId' cannot be null.")); } return updateDisposition(message, DispositionStatus.ABANDONED, null, null, options.getPropertiesToModify(), options.getTransactionContext()); } /** * Completes a {@link ServiceBusReceivedMessage message}. This will delete the message from the service. * * @param message The {@link ServiceBusReceivedMessage} to perform this operation. * * @return A {@link Mono} that finishes when the message is completed on Service Bus. * @throws NullPointerException if {@code message} is null. * @throws UnsupportedOperationException if the receiver was opened in {@link ReceiveMode * mode. */ public Mono<Void> complete(ServiceBusReceivedMessage message) { return updateDisposition(message, DispositionStatus.COMPLETED, null, null, null, null); } /** * Completes a {@link ServiceBusReceivedMessage message}. This will delete the message from the * service. * * @param message The {@link ServiceBusReceivedMessage} to perform this operation. * @param options to complete the message. The {@code transactionContext} can be set using * {@link CompleteOptions * created first by {@link ServiceBusReceiverAsyncClient * {@link ServiceBusSenderAsyncClient * * @return A {@link Mono} that finishes when the message is completed on Service Bus. * @throws NullPointerException if {@code message} or {@code options} is null. Also if * {@code transactionContext.transactionId} is null when {@code options.transactionContext} is specified. * @throws UnsupportedOperationException if the receiver was opened in {@link ReceiveMode * mode. */ public Mono<Void> complete(ServiceBusReceivedMessage message, CompleteOptions options) { if (Objects.isNull(options)) { return monoError(logger, new NullPointerException("'options' cannot be null.")); } else if (!Objects.isNull(options.getTransactionContext()) && Objects.isNull(options.getTransactionContext().getTransactionId())) { return monoError(logger, new NullPointerException( "'options.transactionContext.transactionId' cannot be null.")); } return updateDisposition(message, DispositionStatus.COMPLETED, null, null, null, options.getTransactionContext()); } /** * Defers a {@link ServiceBusReceivedMessage message}. This will move message into the deferred subqueue. * * @param message The {@link ServiceBusReceivedMessage} to perform this operation. * * @return A {@link Mono} that completes when the Service Bus defer operation finishes. * @throws NullPointerException if {@code message} is null. * @throws UnsupportedOperationException if the receiver was opened in {@link ReceiveMode * @see <a href="https: */ public Mono<Void> defer(ServiceBusReceivedMessage message) { return updateDisposition(message, DispositionStatus.DEFERRED, null, null, null, null); } /** * Defers a {@link ServiceBusReceivedMessage message} with modified message property. This will move message into * the deferred subqueue. * * @param message The {@link ServiceBusReceivedMessage} to perform this operation. * @param options to defer the message. You can specify {@link DeferOptions * to modify on the Message. The {@code transactionContext} can be set using * {@link DeferOptions * created first by {@link ServiceBusReceiverAsyncClient * {@link ServiceBusSenderAsyncClient * * @return A {@link Mono} that completes when the defer operation finishes. * @throws NullPointerException if {@code message} or {@code options} is null. Also if * {@code transactionContext.transactionId} is null when {@code options.transactionContext} is specified. * @throws UnsupportedOperationException if the receiver was opened in {@link ReceiveMode * mode. * @see <a href="https: */ public Mono<Void> defer(ServiceBusReceivedMessage message, DeferOptions options) { if (Objects.isNull(options)) { return monoError(logger, new NullPointerException("'options' cannot be null.")); } else if (!Objects.isNull(options.getTransactionContext()) && Objects.isNull(options.getTransactionContext().getTransactionId())) { return monoError(logger, new NullPointerException( "'options.transactionContext.transactionId' cannot be null.")); } return updateDisposition(message, DispositionStatus.DEFERRED, null, null, options.getPropertiesToModify(), options.getTransactionContext()); } /** * Moves a {@link ServiceBusReceivedMessage message} to the deadletter sub-queue. * * @param message The {@link ServiceBusReceivedMessage} to perform this operation. * * @return A {@link Mono} that completes when the dead letter operation finishes. * @throws NullPointerException if {@code message} is null. * @throws UnsupportedOperationException if the receiver was opened in {@link ReceiveMode * mode. * @see <a href="https: * queues</a> */ public Mono<Void> deadLetter(ServiceBusReceivedMessage message) { return deadLetter(message, DEFAULT_DEAD_LETTER_OPTIONS); } /** * Moves a {@link ServiceBusReceivedMessage message} to the deadletter sub-queue. * * @param message The {@link ServiceBusReceivedMessage} to perform this operation. * @param options to deadLetter the message. You can specify * {@link DeadLetterOptions * {@code transactionContext} can be set using * {@link DeadLetterOptions * created first by {@link ServiceBusReceiverAsyncClient * {@link ServiceBusSenderAsyncClient * @return A {@link Mono} that completes when the dead letter operation finishes. * @throws NullPointerException if {@code message} or {@code options} is null. Also if * {@code transactionContext.transactionId} is null when {@code options.transactionContext} is specified. * @throws UnsupportedOperationException if the receiver was opened in {@link ReceiveMode * mode. * @see <a href="https: * queues</a> */ public Mono<Void> deadLetter(ServiceBusReceivedMessage message, DeadLetterOptions options) { if (Objects.isNull(options)) { return monoError(logger, new NullPointerException("'options' cannot be null.")); } else if (!Objects.isNull(options.getTransactionContext()) && Objects.isNull(options.getTransactionContext().getTransactionId())) { return monoError(logger, new NullPointerException( "'options.transactionContext.transactionId' cannot be null.")); } return updateDisposition(message, DispositionStatus.SUSPENDED, options.getDeadLetterReason(), options.getDeadLetterErrorDescription(), options.getPropertiesToModify(), options.getTransactionContext()); } /** * Gets the state of the session if this receiver is a session receiver. * * @return The session state or an empty Mono if there is no state set for the session. * @throws IllegalStateException if the receiver is a non-session receiver. */ public Mono<byte[]> getSessionState() { return getSessionState(receiverOptions.getSessionId()); } /** * Reads the next active message without changing the state of the receiver or the message source. The first call to * {@code peek()} fetches the first active message for this receiver. Each subsequent call fetches the subsequent * message in the entity. * * @return A peeked {@link ServiceBusReceivedMessage}. * @see <a href="https: */ public Mono<ServiceBusReceivedMessage> peekMessage() { return peekMessage(receiverOptions.getSessionId()); } /** * Reads the next active message without changing the state of the receiver or the message source. The first call to * {@code peek()} fetches the first active message for this receiver. Each subsequent call fetches the subsequent * message in the entity. * * @param sessionId Session id of the message to peek from. {@code null} if there is no session. * * @return A peeked {@link ServiceBusReceivedMessage}. * @throws IllegalStateException if the receiver is disposed. * @see <a href="https: */ Mono<ServiceBusReceivedMessage> peekMessage(String sessionId) { if (isDisposed.get()) { return monoError(logger, new IllegalStateException( String.format(INVALID_OPERATION_DISPOSED_RECEIVER, "peek"))); } return connectionProcessor .flatMap(connection -> connection.getManagementNode(entityPath, entityType)) .flatMap(channel -> { final long sequence = lastPeekedSequenceNumber.get() + 1; logger.verbose("Peek message from sequence number: {}", sequence); return channel.peek(sequence, sessionId, getLinkName(sessionId)); }) .handle((message, sink) -> { final long current = lastPeekedSequenceNumber .updateAndGet(value -> Math.max(value, message.getSequenceNumber())); logger.verbose("Updating last peeked sequence number: {}", current); sink.next(message); }); } /** * Starting from the given sequence number, reads next the active message without changing the state of the receiver * or the message source. * * @param sequenceNumber The sequence number from where to read the message. * * @return A peeked {@link ServiceBusReceivedMessage}. * @see <a href="https: */ public Mono<ServiceBusReceivedMessage> peekMessageAt(long sequenceNumber) { return peekMessageAt(sequenceNumber, receiverOptions.getSessionId()); } /** * Starting from the given sequence number, reads next the active message without changing the state of the receiver * or the message source. * * @param sequenceNumber The sequence number from where to read the message. * @param sessionId Session id of the message to peek from. {@code null} if there is no session. * * @return A peeked {@link ServiceBusReceivedMessage}. * @see <a href="https: */ Mono<ServiceBusReceivedMessage> peekMessageAt(long sequenceNumber, String sessionId) { if (isDisposed.get()) { return monoError(logger, new IllegalStateException( String.format(INVALID_OPERATION_DISPOSED_RECEIVER, "peekAt"))); } return connectionProcessor .flatMap(connection -> connection.getManagementNode(entityPath, entityType)) .flatMap(node -> node.peek(sequenceNumber, sessionId, getLinkName(sessionId))); } /** * Reads the next batch of active messages without changing the state of the receiver or the message source. * * @param maxMessages The number of messages. * * @return A {@link Flux} of {@link ServiceBusReceivedMessage messages} that are peeked. * @throws IllegalArgumentException if {@code maxMessages} is not a positive integer. * @see <a href="https: */ public Flux<ServiceBusReceivedMessage> peekMessages(int maxMessages) { return peekMessages(maxMessages, receiverOptions.getSessionId()); } /** * Reads the next batch of active messages without changing the state of the receiver or the message source. * * @param maxMessages The number of messages. * @param sessionId Session id of the messages to peek from. {@code null} if there is no session. * * @return An {@link IterableStream} of {@link ServiceBusReceivedMessage messages} that are peeked. * @throws IllegalArgumentException if {@code maxMessages} is not a positive integer. * @see <a href="https: */ Flux<ServiceBusReceivedMessage> peekMessages(int maxMessages, String sessionId) { if (isDisposed.get()) { return fluxError(logger, new IllegalStateException( String.format(INVALID_OPERATION_DISPOSED_RECEIVER, "peekBatch"))); } return connectionProcessor .flatMap(connection -> connection.getManagementNode(entityPath, entityType)) .flatMapMany(node -> { final long nextSequenceNumber = lastPeekedSequenceNumber.get() + 1; logger.verbose("Peek batch from sequence number: {}", nextSequenceNumber); final Flux<ServiceBusReceivedMessage> messages = node.peek(nextSequenceNumber, sessionId, getLinkName(sessionId), maxMessages); final Mono<ServiceBusReceivedMessage> handle = messages .switchIfEmpty(Mono.fromCallable(() -> { ServiceBusReceivedMessage emptyMessage = new ServiceBusReceivedMessage(BinaryData .fromBytes(new byte[0])); emptyMessage.setSequenceNumber(lastPeekedSequenceNumber.get()); return emptyMessage; })) .last() .handle((last, sink) -> { final long current = lastPeekedSequenceNumber .updateAndGet(value -> Math.max(value, last.getSequenceNumber())); logger.verbose("Last peeked sequence number in batch: {}", current); sink.complete(); }); return Flux.merge(messages, handle); }); } /** * Starting from the given sequence number, reads the next batch of active messages without changing the state of * the receiver or the message source. * * @param maxMessages The number of messages. * @param sequenceNumber The sequence number from where to start reading messages. * * @return A {@link Flux} of {@link ServiceBusReceivedMessage} peeked. * @throws IllegalArgumentException if {@code maxMessages} is not a positive integer. * @see <a href="https: */ public Flux<ServiceBusReceivedMessage> peekMessagesAt(int maxMessages, long sequenceNumber) { return peekMessagesAt(maxMessages, sequenceNumber, receiverOptions.getSessionId()); } /** * Starting from the given sequence number, reads the next batch of active messages without changing the state of * the receiver or the message source. * * @param maxMessages The number of messages. * @param sequenceNumber The sequence number from where to start reading messages. * @param sessionId Session id of the messages to peek from. {@code null} if there is no session. * * @return An {@link IterableStream} of {@link ServiceBusReceivedMessage} peeked. * @throws IllegalArgumentException if {@code maxMessages} is not a positive integer. * @see <a href="https: */ Flux<ServiceBusReceivedMessage> peekMessagesAt(int maxMessages, long sequenceNumber, String sessionId) { if (isDisposed.get()) { return fluxError(logger, new IllegalStateException( String.format(INVALID_OPERATION_DISPOSED_RECEIVER, "peekBatchAt"))); } return connectionProcessor .flatMap(connection -> connection.getManagementNode(entityPath, entityType)) .flatMapMany(node -> node.peek(sequenceNumber, sessionId, getLinkName(sessionId), maxMessages)); } /** * Receives an <b>infinite</b> stream of {@link ServiceBusReceivedMessage messages} from the Service Bus entity. * This Flux continuously receives messages from a Service Bus entity until either: * * <ul> * <li>The receiver is closed.</li> * <li>The subscription to the Flux is disposed.</li> * <li>A terminal signal from a downstream subscriber is propagated upstream (ie. {@link Flux * {@link Flux * <li>An {@link AmqpException} occurs that causes the receive link to stop.</li> * </ul> * * @return An <b>infinite</b> stream of messages from the Service Bus entity. */ public Flux<ServiceBusReceivedMessage> receiveMessages() { return receiveMessagesWithContext() .handle((serviceBusMessageContext, sink) -> { if (serviceBusMessageContext.hasError()) { sink.error(serviceBusMessageContext.getThrowable()); return; } sink.next(serviceBusMessageContext.getMessage()); }); } /** * Receives an <b>infinite</b> stream of {@link ServiceBusReceivedMessage messages} from the Service Bus entity. * This Flux continuously receives messages from a Service Bus entity until either: * * <ul> * <li>The receiver is closed.</li> * <li>The subscription to the Flux is disposed.</li> * <li>A terminal signal from a downstream subscriber is propagated upstream (ie. {@link Flux * {@link Flux * <li>An {@link AmqpException} occurs that causes the receive link to stop.</li> * </ul> * * @return An <b>infinite</b> stream of messages from the Service Bus entity. */ Flux<ServiceBusMessageContext> receiveMessagesWithContext() { final Flux<ServiceBusMessageContext> messageFlux = sessionManager != null ? sessionManager.receive() : getOrCreateConsumer().receive().map(ServiceBusMessageContext::new); final Flux<ServiceBusMessageContext> withAutoLockRenewal; if (receiverOptions.isAutoLockRenewEnabled()) { withAutoLockRenewal = new FluxAutoLockRenew(messageFlux, receiverOptions.getMaxLockRenewDuration(), renewalContainer, this::renewMessageLock); } else { withAutoLockRenewal = messageFlux; } final Flux<ServiceBusMessageContext> withAutoComplete; if (receiverOptions.isEnableAutoComplete()) { withAutoComplete = new FluxAutoComplete(withAutoLockRenewal, completionLock, context -> context.getMessage() != null ? complete(context.getMessage()) : Mono.empty(), context -> context.getMessage() != null ? abandon(context.getMessage()) : Mono.empty()); } else { withAutoComplete = withAutoLockRenewal; } return withAutoComplete .onErrorMap(throwable -> mapError(throwable, ServiceBusErrorSource.RECEIVE)); } /** * Receives a deferred {@link ServiceBusReceivedMessage message}. Deferred messages can only be received by using * sequence number. * * @param sequenceNumber The {@link ServiceBusReceivedMessage * message. * * @return A deferred message with the matching {@code sequenceNumber}. */ public Mono<ServiceBusReceivedMessage> receiveDeferredMessage(long sequenceNumber) { return receiveDeferredMessage(sequenceNumber, receiverOptions.getSessionId()); } /** * Receives a deferred {@link ServiceBusReceivedMessage message}. Deferred messages can only be received by using * sequence number. * * @param sequenceNumber The {@link ServiceBusReceivedMessage * message. * @param sessionId Session id of the deferred message. {@code null} if there is no session. * * @return A deferred message with the matching {@code sequenceNumber}. */ Mono<ServiceBusReceivedMessage> receiveDeferredMessage(long sequenceNumber, String sessionId) { return connectionProcessor .flatMap(connection -> connection.getManagementNode(entityPath, entityType)) .flatMap(node -> node.receiveDeferredMessages(receiverOptions.getReceiveMode(), sessionId, getLinkName(sessionId), Collections.singleton(sequenceNumber)).last()) .map(receivedMessage -> { if (CoreUtils.isNullOrEmpty(receivedMessage.getLockToken())) { return receivedMessage; } if (receiverOptions.getReceiveMode() == ReceiveMode.PEEK_LOCK) { receivedMessage.setLockedUntil(managementNodeLocks.addOrUpdate(receivedMessage.getLockToken(), receivedMessage.getLockedUntil(), receivedMessage.getLockedUntil())); } return receivedMessage; }); } /** * Receives a batch of deferred {@link ServiceBusReceivedMessage messages}. Deferred messages can only be received * by using sequence number. * * @param sequenceNumbers The sequence numbers of the deferred messages. * * @return A {@link Flux} of deferred {@link ServiceBusReceivedMessage messages}. */ public Flux<ServiceBusReceivedMessage> receiveDeferredMessages(Iterable<Long> sequenceNumbers) { return receiveDeferredMessages(sequenceNumbers, receiverOptions.getSessionId()); } /** * Receives a batch of deferred {@link ServiceBusReceivedMessage messages}. Deferred messages can only be received * by using sequence number. * * @param sequenceNumbers The sequence numbers of the deferred messages. * @param sessionId Session id of the deferred messages. {@code null} if there is no session. * * @return An {@link IterableStream} of deferred {@link ServiceBusReceivedMessage messages}. */ Flux<ServiceBusReceivedMessage> receiveDeferredMessages(Iterable<Long> sequenceNumbers, String sessionId) { if (isDisposed.get()) { return fluxError(logger, new IllegalStateException( String.format(INVALID_OPERATION_DISPOSED_RECEIVER, "receiveDeferredMessageBatch"))); } return connectionProcessor .flatMap(connection -> connection.getManagementNode(entityPath, entityType)) .flatMapMany(node -> node.receiveDeferredMessages(receiverOptions.getReceiveMode(), sessionId, getLinkName(sessionId), sequenceNumbers)) .map(receivedMessage -> { if (CoreUtils.isNullOrEmpty(receivedMessage.getLockToken())) { return receivedMessage; } if (receiverOptions.getReceiveMode() == ReceiveMode.PEEK_LOCK) { receivedMessage.setLockedUntil(managementNodeLocks.addOrUpdate(receivedMessage.getLockToken(), receivedMessage.getLockedUntil(), receivedMessage.getLockedUntil())); } return receivedMessage; }); } /** * Asynchronously renews the lock on the message. The lock will be renewed based on the setting specified on the * entity. When a message is received in {@link ReceiveMode * this receiver instance for a duration as specified during the entity creation (LockDuration). If processing of * the message requires longer than this duration, the lock needs to be renewed. For each renewal, the lock is reset * to the entity's LockDuration value. * * @param message The {@link ServiceBusReceivedMessage} to perform auto-lock renewal. * * @return The new expiration time for the message. * @throws NullPointerException if {@code message} or {@code message.getLockToken()} is null. * @throws UnsupportedOperationException if the receiver was opened in {@link ReceiveMode * mode. * @throws IllegalStateException if the receiver is a session receiver. * @throws IllegalArgumentException if {@code message.getLockToken()} is an empty value. */ public Mono<OffsetDateTime> renewMessageLock(ServiceBusReceivedMessage message) { if (isDisposed.get()) { return monoError(logger, new IllegalStateException( String.format(INVALID_OPERATION_DISPOSED_RECEIVER, "renewMessageLock"))); } else if (Objects.isNull(message)) { return monoError(logger, new NullPointerException("'message' cannot be null.")); } else if (Objects.isNull(message.getLockToken())) { return monoError(logger, new NullPointerException("'message.getLockToken()' cannot be null.")); } else if (message.getLockToken().isEmpty()) { return monoError(logger, new IllegalArgumentException("'message.getLockToken()' cannot be empty.")); } else if (receiverOptions.isSessionReceiver()) { return monoError(logger, new IllegalStateException( String.format("Cannot renew message lock [%s] for a session receiver.", message.getLockToken()))); } return renewMessageLock(message.getLockToken()) .onErrorMap(throwable -> mapError(throwable, ServiceBusErrorSource.RENEW_LOCK)); } /** * Asynchronously renews the lock on the message. The lock will be renewed based on the setting specified on the * entity. * * @param lockToken to be renewed. * * @return The new expiration time for the message. */ Mono<OffsetDateTime> renewMessageLock(String lockToken) { return connectionProcessor .flatMap(connection -> connection.getManagementNode(entityPath, entityType)) .flatMap(serviceBusManagementNode -> serviceBusManagementNode.renewMessageLock(lockToken, getLinkName(null))) .map(offsetDateTime -> managementNodeLocks.addOrUpdate(lockToken, offsetDateTime, offsetDateTime)); } /** * Starts the auto lock renewal for a {@link ServiceBusReceivedMessage message}. * * @param message The {@link ServiceBusReceivedMessage} to perform this operation. * @param maxLockRenewalDuration Maximum duration to keep renewing the lock token. * * @return A lock renewal operation for the message. * @throws NullPointerException if {@code message}, {@code message.getLockToken()} or {@code * maxLockRenewalDuration} is null. * @throws IllegalStateException if the receiver is a session receiver or the receiver is disposed. * @throws IllegalArgumentException if {@code message.getLockToken()} is an empty value. */ public Mono<Void> renewMessageLock(ServiceBusReceivedMessage message, Duration maxLockRenewalDuration) { if (isDisposed.get()) { return monoError(logger, new IllegalStateException( String.format(INVALID_OPERATION_DISPOSED_RECEIVER, "getAutoRenewMessageLock"))); } else if (Objects.isNull(message)) { return monoError(logger, new NullPointerException("'message' cannot be null.")); } else if (Objects.isNull(message.getLockToken())) { return monoError(logger, new NullPointerException("'message.getLockToken()' cannot be null.")); } else if (message.getLockToken().isEmpty()) { return monoError(logger, new IllegalArgumentException("'message.getLockToken()' cannot be empty.")); } else if (receiverOptions.isSessionReceiver()) { return monoError(logger, new IllegalStateException( String.format("Cannot renew message lock [%s] for a session receiver.", message.getLockToken()))); } else if (maxLockRenewalDuration == null) { return monoError(logger, new NullPointerException("'maxLockRenewalDuration' cannot be null.")); } else if (maxLockRenewalDuration.isNegative()) { return monoError(logger, new IllegalArgumentException("'maxLockRenewalDuration' cannot be negative.")); } final LockRenewalOperation operation = new LockRenewalOperation(message.getLockToken(), maxLockRenewalDuration, false, ignored -> renewMessageLock(message)); renewalContainer.addOrUpdate(message.getLockToken(), OffsetDateTime.now().plus(maxLockRenewalDuration), operation); return operation.getCompletionOperation() .onErrorMap(throwable -> mapError(throwable, ServiceBusErrorSource.RENEW_LOCK)); } /** * Renews the session lock if this receiver is a session receiver. * * @return The next expiration time for the session lock. * @throws IllegalStateException if the receiver is a non-session receiver. */ public Mono<OffsetDateTime> renewSessionLock() { return renewSessionLock(receiverOptions.getSessionId()); } /** * Starts the auto lock renewal for the session this receiver works for. * * @param maxLockRenewalDuration Maximum duration to keep renewing the session lock. * * @return A lock renewal operation for the message. * @throws NullPointerException if {@code sessionId} or {@code maxLockRenewalDuration} is null. * @throws IllegalArgumentException if {@code sessionId} is an empty string. * @throws IllegalStateException if the receiver is a non-session receiver or the receiver is disposed. */ public Mono<Void> renewSessionLock(Duration maxLockRenewalDuration) { return this.renewSessionLock(receiverOptions.getSessionId(), maxLockRenewalDuration); } /** * Sets the state of the session this receiver works for. * * @param sessionState State to set on the session. * * @return A Mono that completes when the session is set * @throws IllegalStateException if the receiver is a non-session receiver. */ public Mono<Void> setSessionState(byte[] sessionState) { return this.setSessionState(receiverOptions.getSessionId(), sessionState); } /** * Starts a new service side transaction. The {@link ServiceBusTransactionContext} should be passed to all * operations that needs to be in this transaction. * * <p><strong>Create a transaction</strong></p> * {@codesnippet com.azure.messaging.servicebus.servicebusasyncreceiverclient.createTransaction} * * @return The {@link Mono} that finishes this operation on service bus resource. */ public Mono<ServiceBusTransactionContext> createTransaction() { if (isDisposed.get()) { return monoError(logger, new IllegalStateException( String.format(INVALID_OPERATION_DISPOSED_RECEIVER, "createTransaction"))); } return connectionProcessor .flatMap(connection -> connection.createSession(TRANSACTION_LINK_NAME)) .flatMap(transactionSession -> transactionSession.createTransaction()) .map(transaction -> new ServiceBusTransactionContext(transaction.getTransactionId())); } /** * Commits the transaction given {@link ServiceBusTransactionContext}. This will make a call to Service Bus. * <p><strong>Commit a transaction</strong></p> * {@codesnippet com.azure.messaging.servicebus.servicebusasyncreceiverclient.commitTransaction} * * @param transactionContext to be committed. * * @return The {@link Mono} that finishes this operation on service bus resource. * @throws NullPointerException if {@code transactionContext} or {@code transactionContext.transactionId} is * null. */ public Mono<Void> commitTransaction(ServiceBusTransactionContext transactionContext) { if (isDisposed.get()) { return monoError(logger, new IllegalStateException( String.format(INVALID_OPERATION_DISPOSED_RECEIVER, "commitTransaction"))); } if (Objects.isNull(transactionContext)) { return monoError(logger, new NullPointerException("'transactionContext' cannot be null.")); } else if (Objects.isNull(transactionContext.getTransactionId())) { return monoError(logger, new NullPointerException("'transactionContext.transactionId' cannot be null.")); } return connectionProcessor .flatMap(connection -> connection.createSession(TRANSACTION_LINK_NAME)) .flatMap(transactionSession -> transactionSession.commitTransaction(new AmqpTransaction( transactionContext.getTransactionId()))); } /** * Rollbacks the transaction given {@link ServiceBusTransactionContext}. This will make a call to Service Bus. * <p><strong>Rollback a transaction</strong></p> * {@codesnippet com.azure.messaging.servicebus.servicebusasyncreceiverclient.rollbackTransaction} * * @param transactionContext to be rollbacked. * * @return The {@link Mono} that finishes this operation on service bus resource. * @throws NullPointerException if {@code transactionContext} or {@code transactionContext.transactionId} is * null. */ public Mono<Void> rollbackTransaction(ServiceBusTransactionContext transactionContext) { if (isDisposed.get()) { return monoError(logger, new IllegalStateException( String.format(INVALID_OPERATION_DISPOSED_RECEIVER, "rollbackTransaction"))); } if (Objects.isNull(transactionContext)) { return monoError(logger, new NullPointerException("'transactionContext' cannot be null.")); } else if (Objects.isNull(transactionContext.getTransactionId())) { return monoError(logger, new NullPointerException("'transactionContext.transactionId' cannot be null.")); } return connectionProcessor .flatMap(connection -> connection.createSession(TRANSACTION_LINK_NAME)) .flatMap(transactionSession -> transactionSession.rollbackTransaction(new AmqpTransaction( transactionContext.getTransactionId()))); } /** * Disposes of the consumer by closing the underlying connection to the service. */ @Override public void close() { if (isDisposed.getAndSet(true)) { return; } try { completionLock.acquire(); } catch (InterruptedException e) { logger.info("Unable to obtain completion lock.", e); } logger.info("Removing receiver links."); final ServiceBusAsyncConsumer disposed = consumer.getAndSet(null); if (disposed != null) { disposed.close(); } if (sessionManager != null) { sessionManager.close(); } onClientClose.run(); } /** * @return receiver options set by user; */ ReceiverOptions getReceiverOptions() { return receiverOptions; } /** * Gets whether or not the management node contains the message lock token and it has not expired. Lock tokens are * held by the management node when they are received from the management node or management operations are * performed using that {@code lockToken}. * * @param lockToken Lock token to check for. * * @return {@code true} if the management node contains the lock token and false otherwise. */ private boolean isManagementToken(String lockToken) { return managementNodeLocks.containsUnexpired(lockToken); } private Mono<Void> updateDisposition(ServiceBusReceivedMessage message, DispositionStatus dispositionStatus, String deadLetterReason, String deadLetterErrorDescription, Map<String, Object> propertiesToModify, ServiceBusTransactionContext transactionContext) { if (isDisposed.get()) { return monoError(logger, new IllegalStateException( String.format(INVALID_OPERATION_DISPOSED_RECEIVER, dispositionStatus.getValue()))); } else if (Objects.isNull(message)) { return monoError(logger, new NullPointerException("'message' cannot be null.")); } final String lockToken = message.getLockToken(); final String sessionId = message.getSessionId(); if (receiverOptions.getReceiveMode() != ReceiveMode.PEEK_LOCK) { return Mono.error(logger.logExceptionAsError(new UnsupportedOperationException(String.format( "'%s' is not supported on a receiver opened in ReceiveMode.RECEIVE_AND_DELETE.", dispositionStatus)))); } final String sessionIdToUse; if (sessionId == null && !CoreUtils.isNullOrEmpty(receiverOptions.getSessionId())) { sessionIdToUse = receiverOptions.getSessionId(); } else { sessionIdToUse = sessionId; } logger.verbose("{}: Update started. Disposition: {}. Lock: {}. SessionId: {}.", entityPath, dispositionStatus, lockToken, sessionIdToUse); final Mono<Void> performOnManagement = connectionProcessor .flatMap(connection -> connection.getManagementNode(entityPath, entityType)) .flatMap(node -> node.updateDisposition(lockToken, dispositionStatus, deadLetterReason, deadLetterErrorDescription, propertiesToModify, sessionId, getLinkName(sessionId), transactionContext)) .then(Mono.fromRunnable(() -> { logger.info("{}: Management node Update completed. Disposition: {}. Lock: {}.", entityPath, dispositionStatus, lockToken); managementNodeLocks.remove(lockToken); renewalContainer.remove(lockToken); })); Mono<Void> updateDispositionOperation; if (sessionManager != null) { updateDispositionOperation = sessionManager.updateDisposition(lockToken, sessionId, dispositionStatus, propertiesToModify, deadLetterReason, deadLetterErrorDescription, transactionContext) .flatMap(isSuccess -> { if (isSuccess) { renewalContainer.remove(lockToken); return Mono.empty(); } logger.info("Could not perform on session manger. Performing on management node."); return performOnManagement; }); } else { final ServiceBusAsyncConsumer existingConsumer = consumer.get(); if (isManagementToken(lockToken) || existingConsumer == null) { updateDispositionOperation = performOnManagement; } else { updateDispositionOperation = existingConsumer.updateDisposition(lockToken, dispositionStatus, deadLetterReason, deadLetterErrorDescription, propertiesToModify, transactionContext) .then(Mono.fromRunnable(() -> { logger.verbose("{}: Update completed. Disposition: {}. Lock: {}.", entityPath, dispositionStatus, lockToken); renewalContainer.remove(lockToken); })); } } return updateDispositionOperation .onErrorMap(throwable -> { if (throwable instanceof ServiceBusReceiverException) { return throwable; } switch (dispositionStatus) { case COMPLETED: return new ServiceBusReceiverException(throwable, ServiceBusErrorSource.COMPLETE); case ABANDONED: return new ServiceBusReceiverException(throwable, ServiceBusErrorSource.ABANDONED); default: return new ServiceBusReceiverException(throwable, ServiceBusErrorSource.UNKNOWN); } }); } private ServiceBusAsyncConsumer getOrCreateConsumer() { final ServiceBusAsyncConsumer existing = consumer.get(); if (existing != null) { return existing; } final String linkName = StringUtil.getRandomString(entityPath); logger.info("{}: Creating consumer for link '{}'", entityPath, linkName); final Flux<ServiceBusReceiveLink> receiveLink = connectionProcessor.flatMap(connection -> { if (receiverOptions.isSessionReceiver()) { return connection.createReceiveLink(linkName, entityPath, receiverOptions.getReceiveMode(), null, entityType, receiverOptions.getSessionId()); } else { return connection.createReceiveLink(linkName, entityPath, receiverOptions.getReceiveMode(), null, entityType); } }) .doOnNext(next -> { final String format = "Created consumer for Service Bus resource: [{}] mode: [{}]" + " sessionEnabled? {} transferEntityPath: [{}], entityType: [{}]"; logger.verbose(format, next.getEntityPath(), receiverOptions.getReceiveMode(), CoreUtils.isNullOrEmpty(receiverOptions.getSessionId()), "N/A", entityType); }) .repeat(); final AmqpRetryPolicy retryPolicy = RetryUtil.getRetryPolicy(connectionProcessor.getRetryOptions()); final ServiceBusReceiveLinkProcessor linkMessageProcessor = receiveLink.subscribeWith( new ServiceBusReceiveLinkProcessor(receiverOptions.getPrefetchCount(), retryPolicy, receiverOptions.getReceiveMode())); final ServiceBusAsyncConsumer newConsumer = new ServiceBusAsyncConsumer(linkName, linkMessageProcessor, messageSerializer, receiverOptions); if (consumer.compareAndSet(null, newConsumer)) { return newConsumer; } else { newConsumer.close(); return consumer.get(); } } /** * If the receiver has not connected via {@link * through the management node. * * @return The name of the receive link, or null of it has not connected via a receive link. */ private String getLinkName(String sessionId) { if (sessionManager != null && !CoreUtils.isNullOrEmpty(sessionId)) { return sessionManager.getLinkName(sessionId); } else if (!CoreUtils.isNullOrEmpty(sessionId) && !receiverOptions.isSessionReceiver()) { return null; } else { final ServiceBusAsyncConsumer existing = consumer.get(); return existing != null ? existing.getLinkName() : null; } } Mono<OffsetDateTime> renewSessionLock(String sessionId) { if (isDisposed.get()) { return monoError(logger, new IllegalStateException( String.format(INVALID_OPERATION_DISPOSED_RECEIVER, "renewSessionLock"))); } else if (!receiverOptions.isSessionReceiver()) { return monoError(logger, new IllegalStateException("Cannot renew session lock on a non-session receiver.")); } final String linkName = sessionManager != null ? sessionManager.getLinkName(sessionId) : null; return connectionProcessor .flatMap(connection -> connection.getManagementNode(entityPath, entityType)) .flatMap(channel -> channel.renewSessionLock(sessionId, linkName)) .onErrorMap(throwable -> mapError(throwable, ServiceBusErrorSource.RENEW_LOCK)); } Mono<Void> renewSessionLock(String sessionId, Duration maxLockRenewalDuration) { if (isDisposed.get()) { return monoError(logger, new IllegalStateException( String.format(INVALID_OPERATION_DISPOSED_RECEIVER, "getAutoRenewSessionLock"))); } else if (!receiverOptions.isSessionReceiver()) { return monoError(logger, new IllegalStateException( "Cannot renew session lock on a non-session receiver.")); } else if (maxLockRenewalDuration == null) { return monoError(logger, new NullPointerException("'maxLockRenewalDuration' cannot be null.")); } else if (maxLockRenewalDuration.isNegative()) { return monoError(logger, new IllegalArgumentException( "'maxLockRenewalDuration' cannot be negative.")); } else if (Objects.isNull(sessionId)) { return monoError(logger, new NullPointerException("'sessionId' cannot be null.")); } else if (sessionId.isEmpty()) { return monoError(logger, new IllegalArgumentException("'sessionId' cannot be empty.")); } final LockRenewalOperation operation = new LockRenewalOperation(sessionId, maxLockRenewalDuration, true, this::renewSessionLock); renewalContainer.addOrUpdate(sessionId, OffsetDateTime.now().plus(maxLockRenewalDuration), operation); return operation.getCompletionOperation() .onErrorMap(throwable -> mapError(throwable, ServiceBusErrorSource.RENEW_LOCK)); } Mono<Void> setSessionState(String sessionId, byte[] sessionState) { if (isDisposed.get()) { return monoError(logger, new IllegalStateException( String.format(INVALID_OPERATION_DISPOSED_RECEIVER, "setSessionState"))); } else if (!receiverOptions.isSessionReceiver()) { return monoError(logger, new IllegalStateException("Cannot set session state on a non-session receiver.")); } final String linkName = sessionManager != null ? sessionManager.getLinkName(sessionId) : null; return connectionProcessor .flatMap(connection -> connection.getManagementNode(entityPath, entityType)) .flatMap(channel -> channel.setSessionState(sessionId, sessionState, linkName)); } Mono<byte[]> getSessionState(String sessionId) { if (isDisposed.get()) { return monoError(logger, new IllegalStateException( String.format(INVALID_OPERATION_DISPOSED_RECEIVER, "getSessionState"))); } else if (!receiverOptions.isSessionReceiver()) { return monoError(logger, new IllegalStateException("Cannot get session state on a non-session receiver.")); } if (sessionManager != null) { return sessionManager.getSessionState(sessionId); } else { return connectionProcessor .flatMap(connection -> connection.getManagementNode(entityPath, entityType)) .flatMap(channel -> channel.getSessionState(sessionId, getLinkName(sessionId))); } } /** * Map the error to {@link ServiceBusReceiverException} */ boolean isConnectionClosed() { return this.connectionProcessor.isChannelClosed(); } }
Done
public AzureMonitorExporterBuilder connectionString(String connectionString) { this.instrumentationKey = extractValueFromConnectionString(connectionString, "InstrumentationKey"); this.endpoint(extractValueFromConnectionString(connectionString, "IngestionEndpoint")); return this; }
this.instrumentationKey = extractValueFromConnectionString(connectionString, "InstrumentationKey");
public AzureMonitorExporterBuilder connectionString(String connectionString) { Map<String, String> keyValues = extractKeyValuesFromConnectionString(connectionString); if (!keyValues.containsKey("InstrumentationKey")) { throw logger.logExceptionAsError( new IllegalArgumentException("InstrumentationKey not found in connectionString")); } this.instrumentationKey = keyValues.get("InstrumentationKey"); String endpoint = keyValues.get("IngestionEndpoint"); if (endpoint != null) { this.endpoint(endpoint); } return this; }
class AzureMonitorExporterBuilder { private final ClientLogger logger = new ClientLogger(AzureMonitorExporterBuilder.class); private final ApplicationInsightsClientImplBuilder restServiceClientBuilder; private String instrumentationKey; /** * Creates an instance of {@link AzureMonitorExporterBuilder}. */ public AzureMonitorExporterBuilder() { restServiceClientBuilder = new ApplicationInsightsClientImplBuilder(); } /** * Sets the service endpoint for the Azure Monitor Exporter. * @param endpoint The URL of the Azure Monitor Exporter endpoint. * @return The updated {@link AzureMonitorExporterBuilder} object. * @throws NullPointerException if {@code endpoint} is null. * @throws IllegalArgumentException if {@code endpoint} cannot be parsed into a valid URL. */ AzureMonitorExporterBuilder endpoint(String endpoint) { Objects.requireNonNull(endpoint, "'endpoint' cannot be null."); try { URL url = new URL(endpoint); restServiceClientBuilder.host(url.getProtocol() + ": } catch (MalformedURLException ex) { throw logger.logExceptionAsWarning( new IllegalArgumentException("'endpoint' must be a valid URL.", ex)); } return this; } /** * Sets the HTTP pipeline to use for the service client. If {@code pipeline} is set, all other settings are * ignored, apart from {@link * * @param httpPipeline The HTTP pipeline to use for sending service requests and receiving responses. * @return The updated {@link AzureMonitorExporterBuilder} object. */ public AzureMonitorExporterBuilder pipeline(HttpPipeline httpPipeline) { restServiceClientBuilder.pipeline(httpPipeline); return this; } /** * Sets the HTTP client to use for sending and receiving requests to and from the service. * * @param client The HTTP client to use for requests. * @return The updated {@link AzureMonitorExporterBuilder} object. */ public AzureMonitorExporterBuilder httpClient(HttpClient client) { restServiceClientBuilder.httpClient(client); return this; } /** * Sets the logging configuration for HTTP requests and responses. * * <p> If logLevel is not provided, default value of {@link HttpLogDetailLevel * @param logOptions The logging configuration to use when sending and receiving HTTP requests/responses. * * @return The updated {@link AzureMonitorExporterBuilder} object. */ public AzureMonitorExporterBuilder httpLogOptions(HttpLogOptions logOptions) { restServiceClientBuilder.httpLogOptions(logOptions); return this; } /** * Sets the {@link RetryPolicy} that is used when each request is sent. * <p> * The default retry policy will be used if not provided to build {@link AzureMonitorExporterBuilder} . * @param retryPolicy user's retry policy applied to each request. * * @return The updated {@link AzureMonitorExporterBuilder} object. */ public AzureMonitorExporterBuilder retryPolicy(RetryPolicy retryPolicy) { restServiceClientBuilder.retryPolicy(retryPolicy); return this; } /** * Adds a policy to the set of existing policies that are executed after required policies. * @param policy The retry policy for service requests. * * @return The updated {@link AzureMonitorExporterBuilder} object. * @throws NullPointerException If {@code policy} is {@code null}. */ public AzureMonitorExporterBuilder addPolicy(HttpPipelinePolicy policy) { restServiceClientBuilder.addPolicy(Objects.requireNonNull(policy, "'policy' cannot be null.")); return this; } /** * Sets the configuration store that is used during construction of the service client. * <p> * The default configuration store is a clone of the {@link Configuration * configuration store}, use {@link Configuration * * @param configuration The configuration store used to * @return The updated {@link AzureMonitorExporterBuilder} object. */ public AzureMonitorExporterBuilder configuration(Configuration configuration) { restServiceClientBuilder.configuration(configuration); return this; } /** * The connection string to use for exporting telemetry events to Azure Monitor. * @param connectionString The connection string for the Azure Monitor resource. * @return The updated {@link AzureMonitorExporterBuilder} object. * @throws NullPointerException If the connection string is {@code null}. * @throws IllegalArgumentException If the connection string is invalid. */ private String extractValueFromConnectionString(String connectionString, String key) { Objects.requireNonNull(connectionString); return Arrays.stream(connectionString.split(";")) .filter(keyValue -> { String[] keyValuePair = keyValue.split("="); return keyValuePair.length == 2 && keyValuePair[0].equalsIgnoreCase(key); }) .map(instrumentationKeyValue -> instrumentationKeyValue.split("=")[1]) .filter(iKey -> !CoreUtils.isNullOrEmpty(iKey)) .findFirst() .orElseThrow(() -> new IllegalArgumentException(key + " not found in connectionString")); } /** * Creates a {@link MonitorExporterClient} based on options set in the builder. Every time {@code * buildAsyncClient()} is called a new instance of {@link MonitorExporterClient} is created. * * <p> * If {@link * endpoint} are used to create the {@link MonitorExporterAsyncClient client}. All other builder settings are * ignored. * </p> * @return A {@link MonitorExporterClient} with the options set from the builder. * @throws NullPointerException if {@link */ MonitorExporterClient buildClient() { return new MonitorExporterClient(buildAsyncClient()); } /** * Creates a {@link MonitorExporterAsyncClient} based on options set in the builder. Every time {@code * buildAsyncClient()} is called a new instance of {@link MonitorExporterAsyncClient} is created. * * <p> * If {@link * endpoint} are used to create the {@link MonitorExporterAsyncClient client}. All other builder settings are * ignored. * </p> * @return A {@link MonitorExporterAsyncClient} with the options set from the builder. */ MonitorExporterAsyncClient buildAsyncClient() { final SimpleModule ndjsonModule = new SimpleModule("Ndjson List Serializer"); JacksonAdapter jacksonAdapter = new JacksonAdapter(); jacksonAdapter.serializer().registerModule(ndjsonModule); ndjsonModule.addSerializer(new NdJsonSerializer()); restServiceClientBuilder.serializerAdapter(jacksonAdapter); ApplicationInsightsClientImpl restServiceClient = restServiceClientBuilder.buildClient(); return new MonitorExporterAsyncClient(restServiceClient); } /** * Creates an {@link AzureMonitorExporter} based on the options set in the builder. This exporter is an * implementation of OpenTelemetry {@link SpanExporter}. * * @return An instance of {@link AzureMonitorExporter}. * @throws NullPointerException if the instrumentation key is not set. */ public AzureMonitorExporter buildExporter() { Objects.requireNonNull(instrumentationKey, "'connectionString' cannot be null"); return new AzureMonitorExporter(buildClient(), instrumentationKey); } }
class AzureMonitorExporterBuilder { private final ClientLogger logger = new ClientLogger(AzureMonitorExporterBuilder.class); private final ApplicationInsightsClientImplBuilder restServiceClientBuilder; private String instrumentationKey; /** * Creates an instance of {@link AzureMonitorExporterBuilder}. */ public AzureMonitorExporterBuilder() { restServiceClientBuilder = new ApplicationInsightsClientImplBuilder(); } /** * Sets the service endpoint for the Azure Monitor Exporter. * @param endpoint The URL of the Azure Monitor Exporter endpoint. * @return The updated {@link AzureMonitorExporterBuilder} object. * @throws NullPointerException if {@code endpoint} is null. * @throws IllegalArgumentException if {@code endpoint} cannot be parsed into a valid URL. */ AzureMonitorExporterBuilder endpoint(String endpoint) { Objects.requireNonNull(endpoint, "'endpoint' cannot be null."); try { URL url = new URL(endpoint); restServiceClientBuilder.host(url.getProtocol() + ": } catch (MalformedURLException ex) { throw logger.logExceptionAsWarning( new IllegalArgumentException("'endpoint' must be a valid URL.", ex)); } return this; } /** * Sets the HTTP pipeline to use for the service client. If {@code pipeline} is set, all other settings are * ignored, apart from {@link * * @param httpPipeline The HTTP pipeline to use for sending service requests and receiving responses. * @return The updated {@link AzureMonitorExporterBuilder} object. */ public AzureMonitorExporterBuilder pipeline(HttpPipeline httpPipeline) { restServiceClientBuilder.pipeline(httpPipeline); return this; } /** * Sets the HTTP client to use for sending and receiving requests to and from the service. * * @param client The HTTP client to use for requests. * @return The updated {@link AzureMonitorExporterBuilder} object. */ public AzureMonitorExporterBuilder httpClient(HttpClient client) { restServiceClientBuilder.httpClient(client); return this; } /** * Sets the logging configuration for HTTP requests and responses. * * <p> If logLevel is not provided, default value of {@link HttpLogDetailLevel * @param logOptions The logging configuration to use when sending and receiving HTTP requests/responses. * * @return The updated {@link AzureMonitorExporterBuilder} object. */ public AzureMonitorExporterBuilder httpLogOptions(HttpLogOptions logOptions) { restServiceClientBuilder.httpLogOptions(logOptions); return this; } /** * Sets the {@link RetryPolicy} that is used when each request is sent. * <p> * The default retry policy will be used if not provided to build {@link AzureMonitorExporterBuilder} . * @param retryPolicy user's retry policy applied to each request. * * @return The updated {@link AzureMonitorExporterBuilder} object. */ public AzureMonitorExporterBuilder retryPolicy(RetryPolicy retryPolicy) { restServiceClientBuilder.retryPolicy(retryPolicy); return this; } /** * Adds a policy to the set of existing policies that are executed after required policies. * @param policy The retry policy for service requests. * * @return The updated {@link AzureMonitorExporterBuilder} object. * @throws NullPointerException If {@code policy} is {@code null}. */ public AzureMonitorExporterBuilder addPolicy(HttpPipelinePolicy policy) { restServiceClientBuilder.addPolicy(Objects.requireNonNull(policy, "'policy' cannot be null.")); return this; } /** * Sets the configuration store that is used during construction of the service client. * <p> * The default configuration store is a clone of the {@link Configuration * configuration store}, use {@link Configuration * * @param configuration The configuration store used to * @return The updated {@link AzureMonitorExporterBuilder} object. */ public AzureMonitorExporterBuilder configuration(Configuration configuration) { restServiceClientBuilder.configuration(configuration); return this; } /** * The connection string to use for exporting telemetry events to Azure Monitor. * @param connectionString The connection string for the Azure Monitor resource. * @return The updated {@link AzureMonitorExporterBuilder} object. * @throws NullPointerException If the connection string is {@code null}. * @throws IllegalArgumentException If the connection string is invalid. */ private Map<String, String> extractKeyValuesFromConnectionString(String connectionString) { Objects.requireNonNull(connectionString); Map<String, String> keyValues = new HashMap<>(); String[] splits = connectionString.split(";"); for (String split : splits) { String[] keyValPair = split.split("="); if (keyValPair.length == 2) { keyValues.put(keyValPair[0], keyValPair[1]); } } return keyValues; } /** * Creates a {@link MonitorExporterClient} based on options set in the builder. Every time {@code * buildAsyncClient()} is called a new instance of {@link MonitorExporterClient} is created. * * <p> * If {@link * endpoint} are used to create the {@link MonitorExporterAsyncClient client}. All other builder settings are * ignored. * </p> * @return A {@link MonitorExporterClient} with the options set from the builder. * @throws NullPointerException if {@link */ MonitorExporterClient buildClient() { return new MonitorExporterClient(buildAsyncClient()); } /** * Creates a {@link MonitorExporterAsyncClient} based on options set in the builder. Every time {@code * buildAsyncClient()} is called a new instance of {@link MonitorExporterAsyncClient} is created. * * <p> * If {@link * endpoint} are used to create the {@link MonitorExporterAsyncClient client}. All other builder settings are * ignored. * </p> * @return A {@link MonitorExporterAsyncClient} with the options set from the builder. */ MonitorExporterAsyncClient buildAsyncClient() { final SimpleModule ndjsonModule = new SimpleModule("Ndjson List Serializer"); JacksonAdapter jacksonAdapter = new JacksonAdapter(); jacksonAdapter.serializer().registerModule(ndjsonModule); ndjsonModule.addSerializer(new NdJsonSerializer()); restServiceClientBuilder.serializerAdapter(jacksonAdapter); ApplicationInsightsClientImpl restServiceClient = restServiceClientBuilder.buildClient(); return new MonitorExporterAsyncClient(restServiceClient); } /** * Creates an {@link AzureMonitorExporter} based on the options set in the builder. This exporter is an * implementation of OpenTelemetry {@link SpanExporter}. * * @return An instance of {@link AzureMonitorExporter}. * @throws NullPointerException if the instrumentation key is not set. */ public AzureMonitorExporter buildExporter() { Objects.requireNonNull(instrumentationKey, "'connectionString' cannot be null"); return new AzureMonitorExporter(buildClient(), instrumentationKey); } }
based on our discussion converting all the exception to SBRE .
private Throwable mapError(Throwable throwable, ServiceBusErrorSource errorSource) { if (!(throwable instanceof AmqpException) && !(throwable instanceof ServiceBusReceiverException)) { return new ServiceBusReceiverException(throwable, errorSource); } return throwable; }
if (!(throwable instanceof AmqpException) && !(throwable instanceof ServiceBusReceiverException)) {
private Throwable mapError(Throwable throwable, ServiceBusErrorSource errorSource) { if (!(throwable instanceof ServiceBusReceiverException)) { return new ServiceBusReceiverException(throwable, errorSource); } return throwable; }
class ServiceBusReceiverAsyncClient implements AutoCloseable { private static final DeadLetterOptions DEFAULT_DEAD_LETTER_OPTIONS = new DeadLetterOptions(); private static final String TRANSACTION_LINK_NAME = "coordinator"; private final LockContainer<LockRenewalOperation> renewalContainer; private final AtomicBoolean isDisposed = new AtomicBoolean(); private final LockContainer<OffsetDateTime> managementNodeLocks; private final ClientLogger logger = new ClientLogger(ServiceBusReceiverAsyncClient.class); private final String fullyQualifiedNamespace; private final String entityPath; private final MessagingEntityType entityType; private final ReceiverOptions receiverOptions; private final ServiceBusConnectionProcessor connectionProcessor; private final TracerProvider tracerProvider; private final MessageSerializer messageSerializer; private final Runnable onClientClose; private final ServiceBusSessionManager sessionManager; private final Semaphore completionLock = new Semaphore(1); private final AtomicLong lastPeekedSequenceNumber = new AtomicLong(-1); private final AtomicReference<ServiceBusAsyncConsumer> consumer = new AtomicReference<>(); /** * Creates a receiver that listens to a Service Bus resource. * * @param fullyQualifiedNamespace The fully qualified domain name for the Service Bus resource. * @param entityPath The name of the topic or queue. * @param entityType The type of the Service Bus resource. * @param receiverOptions Options when receiving messages. * @param connectionProcessor The AMQP connection to the Service Bus resource. * @param tracerProvider Tracer for telemetry. * @param messageSerializer Serializes and deserializes Service Bus messages. * @param onClientClose Operation to run when the client completes. */ ServiceBusReceiverAsyncClient(String fullyQualifiedNamespace, String entityPath, MessagingEntityType entityType, ReceiverOptions receiverOptions, ServiceBusConnectionProcessor connectionProcessor, Duration cleanupInterval, TracerProvider tracerProvider, MessageSerializer messageSerializer, Runnable onClientClose) { this.fullyQualifiedNamespace = Objects.requireNonNull(fullyQualifiedNamespace, "'fullyQualifiedNamespace' cannot be null."); this.entityPath = Objects.requireNonNull(entityPath, "'entityPath' cannot be null."); this.entityType = Objects.requireNonNull(entityType, "'entityType' cannot be null."); this.receiverOptions = Objects.requireNonNull(receiverOptions, "'receiveOptions cannot be null.'"); this.connectionProcessor = Objects.requireNonNull(connectionProcessor, "'connectionProcessor' cannot be null."); this.tracerProvider = Objects.requireNonNull(tracerProvider, "'tracerProvider' cannot be null."); this.messageSerializer = Objects.requireNonNull(messageSerializer, "'messageSerializer' cannot be null."); this.onClientClose = Objects.requireNonNull(onClientClose, "'onClientClose' cannot be null."); this.managementNodeLocks = new LockContainer<>(cleanupInterval); this.renewalContainer = new LockContainer<>(Duration.ofMinutes(2), renewal -> { logger.verbose("Closing expired renewal operation. lockToken[{}]. status[{}]. throwable[{}].", renewal.getLockToken(), renewal.getStatus(), renewal.getThrowable()); renewal.close(); }); this.sessionManager = null; } ServiceBusReceiverAsyncClient(String fullyQualifiedNamespace, String entityPath, MessagingEntityType entityType, ReceiverOptions receiverOptions, ServiceBusConnectionProcessor connectionProcessor, Duration cleanupInterval, TracerProvider tracerProvider, MessageSerializer messageSerializer, Runnable onClientClose, ServiceBusSessionManager sessionManager) { this.fullyQualifiedNamespace = Objects.requireNonNull(fullyQualifiedNamespace, "'fullyQualifiedNamespace' cannot be null."); this.entityPath = Objects.requireNonNull(entityPath, "'entityPath' cannot be null."); this.entityType = Objects.requireNonNull(entityType, "'entityType' cannot be null."); this.receiverOptions = Objects.requireNonNull(receiverOptions, "'receiveOptions cannot be null.'"); this.connectionProcessor = Objects.requireNonNull(connectionProcessor, "'connectionProcessor' cannot be null."); this.tracerProvider = Objects.requireNonNull(tracerProvider, "'tracerProvider' cannot be null."); this.messageSerializer = Objects.requireNonNull(messageSerializer, "'messageSerializer' cannot be null."); this.onClientClose = Objects.requireNonNull(onClientClose, "'onClientClose' cannot be null."); this.sessionManager = Objects.requireNonNull(sessionManager, "'sessionManager' cannot be null."); this.managementNodeLocks = new LockContainer<>(cleanupInterval); this.renewalContainer = new LockContainer<>(Duration.ofMinutes(2), renewal -> { logger.info("Closing expired renewal operation. sessionId[{}]. status[{}]. throwable[{}]", renewal.getSessionId(), renewal.getStatus(), renewal.getThrowable()); renewal.close(); }); } /** * Gets the fully qualified Service Bus namespace that the connection is associated with. This is likely similar to * {@code {yournamespace}.servicebus.windows.net}. * * @return The fully qualified Service Bus namespace that the connection is associated with. */ public String getFullyQualifiedNamespace() { return fullyQualifiedNamespace; } /** * Gets the Service Bus resource this client interacts with. * * @return The Service Bus resource this client interacts with. */ public String getEntityPath() { return entityPath; } /** * Abandon a {@link ServiceBusReceivedMessage message}. This will make the message available * again for processing. Abandoning a message will increase the delivery count on the message. * * @param message The {@link ServiceBusReceivedMessage} to perform this operation. * * @return A {@link Mono} that completes when the Service Bus abandon operation completes. * @throws NullPointerException if {@code message} is null. * @throws UnsupportedOperationException if the receiver was opened in {@link ReceiveMode * mode. */ public Mono<Void> abandon(ServiceBusReceivedMessage message) { return updateDisposition(message, DispositionStatus.ABANDONED, null, null, null, null); } /** * Abandon a {@link ServiceBusReceivedMessage message} updates the message's properties. * This will make the message available again for processing. Abandoning a message will increase the delivery count * on the message. * * @param message The {@link ServiceBusReceivedMessage} to perform this operation. * @param options to abandon the message. You can specify * {@link AbandonOptions * {@code transactionContext} can be set using * {@link AbandonOptions * created first by {@link ServiceBusReceiverAsyncClient * {@link ServiceBusSenderAsyncClient * * @return A {@link Mono} that completes when the Service Bus operation finishes. * @throws NullPointerException if {@code message} or {@code options} is null. Also if * {@code transactionContext.transactionId} is null when {@code options.transactionContext} is specified. * @throws UnsupportedOperationException if the receiver was opened in {@link ReceiveMode * mode. */ public Mono<Void> abandon(ServiceBusReceivedMessage message, AbandonOptions options) { if (Objects.isNull(options)) { return monoError(logger, new NullPointerException("'settlementOptions' cannot be null.")); } else if (!Objects.isNull(options.getTransactionContext()) && Objects.isNull(options.getTransactionContext().getTransactionId())) { return monoError(logger, new NullPointerException( "'options.transactionContext.transactionId' cannot be null.")); } return updateDisposition(message, DispositionStatus.ABANDONED, null, null, options.getPropertiesToModify(), options.getTransactionContext()); } /** * Completes a {@link ServiceBusReceivedMessage message}. This will delete the message from the service. * * @param message The {@link ServiceBusReceivedMessage} to perform this operation. * * @return A {@link Mono} that finishes when the message is completed on Service Bus. * @throws NullPointerException if {@code message} is null. * @throws UnsupportedOperationException if the receiver was opened in {@link ReceiveMode * mode. */ public Mono<Void> complete(ServiceBusReceivedMessage message) { return updateDisposition(message, DispositionStatus.COMPLETED, null, null, null, null); } /** * Completes a {@link ServiceBusReceivedMessage message}. This will delete the message from the * service. * * @param message The {@link ServiceBusReceivedMessage} to perform this operation. * @param options to complete the message. The {@code transactionContext} can be set using * {@link CompleteOptions * created first by {@link ServiceBusReceiverAsyncClient * {@link ServiceBusSenderAsyncClient * * @return A {@link Mono} that finishes when the message is completed on Service Bus. * @throws NullPointerException if {@code message} or {@code options} is null. Also if * {@code transactionContext.transactionId} is null when {@code options.transactionContext} is specified. * @throws UnsupportedOperationException if the receiver was opened in {@link ReceiveMode * mode. */ public Mono<Void> complete(ServiceBusReceivedMessage message, CompleteOptions options) { if (Objects.isNull(options)) { return monoError(logger, new NullPointerException("'options' cannot be null.")); } else if (!Objects.isNull(options.getTransactionContext()) && Objects.isNull(options.getTransactionContext().getTransactionId())) { return monoError(logger, new NullPointerException( "'options.transactionContext.transactionId' cannot be null.")); } return updateDisposition(message, DispositionStatus.COMPLETED, null, null, null, options.getTransactionContext()); } /** * Defers a {@link ServiceBusReceivedMessage message}. This will move message into the deferred subqueue. * * @param message The {@link ServiceBusReceivedMessage} to perform this operation. * * @return A {@link Mono} that completes when the Service Bus defer operation finishes. * @throws NullPointerException if {@code message} is null. * @throws UnsupportedOperationException if the receiver was opened in {@link ReceiveMode * @see <a href="https: */ public Mono<Void> defer(ServiceBusReceivedMessage message) { return updateDisposition(message, DispositionStatus.DEFERRED, null, null, null, null); } /** * Defers a {@link ServiceBusReceivedMessage message} with modified message property. This will move message into * the deferred subqueue. * * @param message The {@link ServiceBusReceivedMessage} to perform this operation. * @param options to defer the message. You can specify {@link DeferOptions * to modify on the Message. The {@code transactionContext} can be set using * {@link DeferOptions * created first by {@link ServiceBusReceiverAsyncClient * {@link ServiceBusSenderAsyncClient * * @return A {@link Mono} that completes when the defer operation finishes. * @throws NullPointerException if {@code message} or {@code options} is null. Also if * {@code transactionContext.transactionId} is null when {@code options.transactionContext} is specified. * @throws UnsupportedOperationException if the receiver was opened in {@link ReceiveMode * mode. * @see <a href="https: */ public Mono<Void> defer(ServiceBusReceivedMessage message, DeferOptions options) { if (Objects.isNull(options)) { return monoError(logger, new NullPointerException("'options' cannot be null.")); } else if (!Objects.isNull(options.getTransactionContext()) && Objects.isNull(options.getTransactionContext().getTransactionId())) { return monoError(logger, new NullPointerException( "'options.transactionContext.transactionId' cannot be null.")); } return updateDisposition(message, DispositionStatus.DEFERRED, null, null, options.getPropertiesToModify(), options.getTransactionContext()); } /** * Moves a {@link ServiceBusReceivedMessage message} to the deadletter sub-queue. * * @param message The {@link ServiceBusReceivedMessage} to perform this operation. * * @return A {@link Mono} that completes when the dead letter operation finishes. * @throws NullPointerException if {@code message} is null. * @throws UnsupportedOperationException if the receiver was opened in {@link ReceiveMode * mode. * @see <a href="https: * queues</a> */ public Mono<Void> deadLetter(ServiceBusReceivedMessage message) { return deadLetter(message, DEFAULT_DEAD_LETTER_OPTIONS); } /** * Moves a {@link ServiceBusReceivedMessage message} to the deadletter sub-queue. * * @param message The {@link ServiceBusReceivedMessage} to perform this operation. * @param options to deadLetter the message. You can specify * {@link DeadLetterOptions * {@code transactionContext} can be set using * {@link DeadLetterOptions * created first by {@link ServiceBusReceiverAsyncClient * {@link ServiceBusSenderAsyncClient * @return A {@link Mono} that completes when the dead letter operation finishes. * @throws NullPointerException if {@code message} or {@code options} is null. Also if * {@code transactionContext.transactionId} is null when {@code options.transactionContext} is specified. * @throws UnsupportedOperationException if the receiver was opened in {@link ReceiveMode * mode. * @see <a href="https: * queues</a> */ public Mono<Void> deadLetter(ServiceBusReceivedMessage message, DeadLetterOptions options) { if (Objects.isNull(options)) { return monoError(logger, new NullPointerException("'options' cannot be null.")); } else if (!Objects.isNull(options.getTransactionContext()) && Objects.isNull(options.getTransactionContext().getTransactionId())) { return monoError(logger, new NullPointerException( "'options.transactionContext.transactionId' cannot be null.")); } return updateDisposition(message, DispositionStatus.SUSPENDED, options.getDeadLetterReason(), options.getDeadLetterErrorDescription(), options.getPropertiesToModify(), options.getTransactionContext()); } /** * Gets the state of the session if this receiver is a session receiver. * * @return The session state or an empty Mono if there is no state set for the session. * @throws IllegalStateException if the receiver is a non-session receiver. */ public Mono<byte[]> getSessionState() { return getSessionState(receiverOptions.getSessionId()); } /** * Reads the next active message without changing the state of the receiver or the message source. The first call to * {@code peek()} fetches the first active message for this receiver. Each subsequent call fetches the subsequent * message in the entity. * * @return A peeked {@link ServiceBusReceivedMessage}. * @see <a href="https: */ public Mono<ServiceBusReceivedMessage> peekMessage() { return peekMessage(receiverOptions.getSessionId()); } /** * Reads the next active message without changing the state of the receiver or the message source. The first call to * {@code peek()} fetches the first active message for this receiver. Each subsequent call fetches the subsequent * message in the entity. * * @param sessionId Session id of the message to peek from. {@code null} if there is no session. * * @return A peeked {@link ServiceBusReceivedMessage}. * @throws IllegalStateException if the receiver is disposed. * @see <a href="https: */ Mono<ServiceBusReceivedMessage> peekMessage(String sessionId) { if (isDisposed.get()) { return monoError(logger, new IllegalStateException( String.format(INVALID_OPERATION_DISPOSED_RECEIVER, "peek"))); } return connectionProcessor .flatMap(connection -> connection.getManagementNode(entityPath, entityType)) .flatMap(channel -> { final long sequence = lastPeekedSequenceNumber.get() + 1; logger.verbose("Peek message from sequence number: {}", sequence); return channel.peek(sequence, sessionId, getLinkName(sessionId)); }) .handle((message, sink) -> { final long current = lastPeekedSequenceNumber .updateAndGet(value -> Math.max(value, message.getSequenceNumber())); logger.verbose("Updating last peeked sequence number: {}", current); sink.next(message); }); } /** * Starting from the given sequence number, reads next the active message without changing the state of the receiver * or the message source. * * @param sequenceNumber The sequence number from where to read the message. * * @return A peeked {@link ServiceBusReceivedMessage}. * @see <a href="https: */ public Mono<ServiceBusReceivedMessage> peekMessageAt(long sequenceNumber) { return peekMessageAt(sequenceNumber, receiverOptions.getSessionId()); } /** * Starting from the given sequence number, reads next the active message without changing the state of the receiver * or the message source. * * @param sequenceNumber The sequence number from where to read the message. * @param sessionId Session id of the message to peek from. {@code null} if there is no session. * * @return A peeked {@link ServiceBusReceivedMessage}. * @see <a href="https: */ Mono<ServiceBusReceivedMessage> peekMessageAt(long sequenceNumber, String sessionId) { if (isDisposed.get()) { return monoError(logger, new IllegalStateException( String.format(INVALID_OPERATION_DISPOSED_RECEIVER, "peekAt"))); } return connectionProcessor .flatMap(connection -> connection.getManagementNode(entityPath, entityType)) .flatMap(node -> node.peek(sequenceNumber, sessionId, getLinkName(sessionId))); } /** * Reads the next batch of active messages without changing the state of the receiver or the message source. * * @param maxMessages The number of messages. * * @return A {@link Flux} of {@link ServiceBusReceivedMessage messages} that are peeked. * @throws IllegalArgumentException if {@code maxMessages} is not a positive integer. * @see <a href="https: */ public Flux<ServiceBusReceivedMessage> peekMessages(int maxMessages) { return peekMessages(maxMessages, receiverOptions.getSessionId()); } /** * Reads the next batch of active messages without changing the state of the receiver or the message source. * * @param maxMessages The number of messages. * @param sessionId Session id of the messages to peek from. {@code null} if there is no session. * * @return An {@link IterableStream} of {@link ServiceBusReceivedMessage messages} that are peeked. * @throws IllegalArgumentException if {@code maxMessages} is not a positive integer. * @see <a href="https: */ Flux<ServiceBusReceivedMessage> peekMessages(int maxMessages, String sessionId) { if (isDisposed.get()) { return fluxError(logger, new IllegalStateException( String.format(INVALID_OPERATION_DISPOSED_RECEIVER, "peekBatch"))); } return connectionProcessor .flatMap(connection -> connection.getManagementNode(entityPath, entityType)) .flatMapMany(node -> { final long nextSequenceNumber = lastPeekedSequenceNumber.get() + 1; logger.verbose("Peek batch from sequence number: {}", nextSequenceNumber); final Flux<ServiceBusReceivedMessage> messages = node.peek(nextSequenceNumber, sessionId, getLinkName(sessionId), maxMessages); final Mono<ServiceBusReceivedMessage> handle = messages .switchIfEmpty(Mono.fromCallable(() -> { ServiceBusReceivedMessage emptyMessage = new ServiceBusReceivedMessage(BinaryData .fromBytes(new byte[0])); emptyMessage.setSequenceNumber(lastPeekedSequenceNumber.get()); return emptyMessage; })) .last() .handle((last, sink) -> { final long current = lastPeekedSequenceNumber .updateAndGet(value -> Math.max(value, last.getSequenceNumber())); logger.verbose("Last peeked sequence number in batch: {}", current); sink.complete(); }); return Flux.merge(messages, handle); }); } /** * Starting from the given sequence number, reads the next batch of active messages without changing the state of * the receiver or the message source. * * @param maxMessages The number of messages. * @param sequenceNumber The sequence number from where to start reading messages. * * @return A {@link Flux} of {@link ServiceBusReceivedMessage} peeked. * @throws IllegalArgumentException if {@code maxMessages} is not a positive integer. * @see <a href="https: */ public Flux<ServiceBusReceivedMessage> peekMessagesAt(int maxMessages, long sequenceNumber) { return peekMessagesAt(maxMessages, sequenceNumber, receiverOptions.getSessionId()); } /** * Starting from the given sequence number, reads the next batch of active messages without changing the state of * the receiver or the message source. * * @param maxMessages The number of messages. * @param sequenceNumber The sequence number from where to start reading messages. * @param sessionId Session id of the messages to peek from. {@code null} if there is no session. * * @return An {@link IterableStream} of {@link ServiceBusReceivedMessage} peeked. * @throws IllegalArgumentException if {@code maxMessages} is not a positive integer. * @see <a href="https: */ Flux<ServiceBusReceivedMessage> peekMessagesAt(int maxMessages, long sequenceNumber, String sessionId) { if (isDisposed.get()) { return fluxError(logger, new IllegalStateException( String.format(INVALID_OPERATION_DISPOSED_RECEIVER, "peekBatchAt"))); } return connectionProcessor .flatMap(connection -> connection.getManagementNode(entityPath, entityType)) .flatMapMany(node -> node.peek(sequenceNumber, sessionId, getLinkName(sessionId), maxMessages)); } /** * Receives an <b>infinite</b> stream of {@link ServiceBusReceivedMessage messages} from the Service Bus entity. * This Flux continuously receives messages from a Service Bus entity until either: * * <ul> * <li>The receiver is closed.</li> * <li>The subscription to the Flux is disposed.</li> * <li>A terminal signal from a downstream subscriber is propagated upstream (ie. {@link Flux * {@link Flux * <li>An {@link AmqpException} occurs that causes the receive link to stop.</li> * </ul> * * @return An <b>infinite</b> stream of messages from the Service Bus entity. */ public Flux<ServiceBusReceivedMessage> receiveMessages() { return receiveMessagesWithContext() .handle((serviceBusMessageContext, sink) -> { if (serviceBusMessageContext.hasError()) { sink.error(serviceBusMessageContext.getThrowable()); return; } sink.next(serviceBusMessageContext.getMessage()); }); } /** * Receives an <b>infinite</b> stream of {@link ServiceBusReceivedMessage messages} from the Service Bus entity. * This Flux continuously receives messages from a Service Bus entity until either: * * <ul> * <li>The receiver is closed.</li> * <li>The subscription to the Flux is disposed.</li> * <li>A terminal signal from a downstream subscriber is propagated upstream (ie. {@link Flux * {@link Flux * <li>An {@link AmqpException} occurs that causes the receive link to stop.</li> * </ul> * * @return An <b>infinite</b> stream of messages from the Service Bus entity. */ Flux<ServiceBusMessageContext> receiveMessagesWithContext() { final Flux<ServiceBusMessageContext> messageFlux = sessionManager != null ? sessionManager.receive() : getOrCreateConsumer().receive().map(ServiceBusMessageContext::new); final Flux<ServiceBusMessageContext> withAutoLockRenewal; if (receiverOptions.isAutoLockRenewEnabled()) { withAutoLockRenewal = new FluxAutoLockRenew(messageFlux, receiverOptions.getMaxLockRenewDuration(), renewalContainer, this::renewMessageLock); } else { withAutoLockRenewal = messageFlux; } final Flux<ServiceBusMessageContext> withAutoComplete; if (receiverOptions.isEnableAutoComplete()) { withAutoComplete = new FluxAutoComplete(withAutoLockRenewal, completionLock, context -> context.getMessage() != null ? complete(context.getMessage()) : Mono.empty(), context -> context.getMessage() != null ? abandon(context.getMessage()) : Mono.empty()); } else { withAutoComplete = withAutoLockRenewal; } return withAutoComplete .onErrorMap(throwable -> mapError(throwable, ServiceBusErrorSource.RECEIVE)); } /** * Receives a deferred {@link ServiceBusReceivedMessage message}. Deferred messages can only be received by using * sequence number. * * @param sequenceNumber The {@link ServiceBusReceivedMessage * message. * * @return A deferred message with the matching {@code sequenceNumber}. */ public Mono<ServiceBusReceivedMessage> receiveDeferredMessage(long sequenceNumber) { return receiveDeferredMessage(sequenceNumber, receiverOptions.getSessionId()); } /** * Receives a deferred {@link ServiceBusReceivedMessage message}. Deferred messages can only be received by using * sequence number. * * @param sequenceNumber The {@link ServiceBusReceivedMessage * message. * @param sessionId Session id of the deferred message. {@code null} if there is no session. * * @return A deferred message with the matching {@code sequenceNumber}. */ Mono<ServiceBusReceivedMessage> receiveDeferredMessage(long sequenceNumber, String sessionId) { return connectionProcessor .flatMap(connection -> connection.getManagementNode(entityPath, entityType)) .flatMap(node -> node.receiveDeferredMessages(receiverOptions.getReceiveMode(), sessionId, getLinkName(sessionId), Collections.singleton(sequenceNumber)).last()) .map(receivedMessage -> { if (CoreUtils.isNullOrEmpty(receivedMessage.getLockToken())) { return receivedMessage; } if (receiverOptions.getReceiveMode() == ReceiveMode.PEEK_LOCK) { receivedMessage.setLockedUntil(managementNodeLocks.addOrUpdate(receivedMessage.getLockToken(), receivedMessage.getLockedUntil(), receivedMessage.getLockedUntil())); } return receivedMessage; }); } /** * Receives a batch of deferred {@link ServiceBusReceivedMessage messages}. Deferred messages can only be received * by using sequence number. * * @param sequenceNumbers The sequence numbers of the deferred messages. * * @return A {@link Flux} of deferred {@link ServiceBusReceivedMessage messages}. */ public Flux<ServiceBusReceivedMessage> receiveDeferredMessages(Iterable<Long> sequenceNumbers) { return receiveDeferredMessages(sequenceNumbers, receiverOptions.getSessionId()); } /** * Receives a batch of deferred {@link ServiceBusReceivedMessage messages}. Deferred messages can only be received * by using sequence number. * * @param sequenceNumbers The sequence numbers of the deferred messages. * @param sessionId Session id of the deferred messages. {@code null} if there is no session. * * @return An {@link IterableStream} of deferred {@link ServiceBusReceivedMessage messages}. */ Flux<ServiceBusReceivedMessage> receiveDeferredMessages(Iterable<Long> sequenceNumbers, String sessionId) { if (isDisposed.get()) { return fluxError(logger, new IllegalStateException( String.format(INVALID_OPERATION_DISPOSED_RECEIVER, "receiveDeferredMessageBatch"))); } return connectionProcessor .flatMap(connection -> connection.getManagementNode(entityPath, entityType)) .flatMapMany(node -> node.receiveDeferredMessages(receiverOptions.getReceiveMode(), sessionId, getLinkName(sessionId), sequenceNumbers)) .map(receivedMessage -> { if (CoreUtils.isNullOrEmpty(receivedMessage.getLockToken())) { return receivedMessage; } if (receiverOptions.getReceiveMode() == ReceiveMode.PEEK_LOCK) { receivedMessage.setLockedUntil(managementNodeLocks.addOrUpdate(receivedMessage.getLockToken(), receivedMessage.getLockedUntil(), receivedMessage.getLockedUntil())); } return receivedMessage; }); } /** * Asynchronously renews the lock on the message. The lock will be renewed based on the setting specified on the * entity. When a message is received in {@link ReceiveMode * this receiver instance for a duration as specified during the entity creation (LockDuration). If processing of * the message requires longer than this duration, the lock needs to be renewed. For each renewal, the lock is reset * to the entity's LockDuration value. * * @param message The {@link ServiceBusReceivedMessage} to perform auto-lock renewal. * * @return The new expiration time for the message. * @throws NullPointerException if {@code message} or {@code message.getLockToken()} is null. * @throws UnsupportedOperationException if the receiver was opened in {@link ReceiveMode * mode. * @throws IllegalStateException if the receiver is a session receiver. * @throws IllegalArgumentException if {@code message.getLockToken()} is an empty value. */ public Mono<OffsetDateTime> renewMessageLock(ServiceBusReceivedMessage message) { if (isDisposed.get()) { return monoError(logger, new IllegalStateException( String.format(INVALID_OPERATION_DISPOSED_RECEIVER, "renewMessageLock"))); } else if (Objects.isNull(message)) { return monoError(logger, new NullPointerException("'message' cannot be null.")); } else if (Objects.isNull(message.getLockToken())) { return monoError(logger, new NullPointerException("'message.getLockToken()' cannot be null.")); } else if (message.getLockToken().isEmpty()) { return monoError(logger, new IllegalArgumentException("'message.getLockToken()' cannot be empty.")); } else if (receiverOptions.isSessionReceiver()) { return monoError(logger, new IllegalStateException( String.format("Cannot renew message lock [%s] for a session receiver.", message.getLockToken()))); } return renewMessageLock(message.getLockToken()) .onErrorMap(throwable -> mapError(throwable, ServiceBusErrorSource.RENEW_LOCK)); } /** * Asynchronously renews the lock on the message. The lock will be renewed based on the setting specified on the * entity. * * @param lockToken to be renewed. * * @return The new expiration time for the message. */ Mono<OffsetDateTime> renewMessageLock(String lockToken) { return connectionProcessor .flatMap(connection -> connection.getManagementNode(entityPath, entityType)) .flatMap(serviceBusManagementNode -> serviceBusManagementNode.renewMessageLock(lockToken, getLinkName(null))) .map(offsetDateTime -> managementNodeLocks.addOrUpdate(lockToken, offsetDateTime, offsetDateTime)); } /** * Starts the auto lock renewal for a {@link ServiceBusReceivedMessage message}. * * @param message The {@link ServiceBusReceivedMessage} to perform this operation. * @param maxLockRenewalDuration Maximum duration to keep renewing the lock token. * * @return A lock renewal operation for the message. * @throws NullPointerException if {@code message}, {@code message.getLockToken()} or {@code * maxLockRenewalDuration} is null. * @throws IllegalStateException if the receiver is a session receiver or the receiver is disposed. * @throws IllegalArgumentException if {@code message.getLockToken()} is an empty value. */ public Mono<Void> renewMessageLock(ServiceBusReceivedMessage message, Duration maxLockRenewalDuration) { if (isDisposed.get()) { return monoError(logger, new IllegalStateException( String.format(INVALID_OPERATION_DISPOSED_RECEIVER, "getAutoRenewMessageLock"))); } else if (Objects.isNull(message)) { return monoError(logger, new NullPointerException("'message' cannot be null.")); } else if (Objects.isNull(message.getLockToken())) { return monoError(logger, new NullPointerException("'message.getLockToken()' cannot be null.")); } else if (message.getLockToken().isEmpty()) { return monoError(logger, new IllegalArgumentException("'message.getLockToken()' cannot be empty.")); } else if (receiverOptions.isSessionReceiver()) { return monoError(logger, new IllegalStateException( String.format("Cannot renew message lock [%s] for a session receiver.", message.getLockToken()))); } else if (maxLockRenewalDuration == null) { return monoError(logger, new NullPointerException("'maxLockRenewalDuration' cannot be null.")); } else if (maxLockRenewalDuration.isNegative()) { return monoError(logger, new IllegalArgumentException("'maxLockRenewalDuration' cannot be negative.")); } final LockRenewalOperation operation = new LockRenewalOperation(message.getLockToken(), maxLockRenewalDuration, false, ignored -> renewMessageLock(message)); renewalContainer.addOrUpdate(message.getLockToken(), OffsetDateTime.now().plus(maxLockRenewalDuration), operation); return operation.getCompletionOperation() .onErrorMap(throwable -> mapError(throwable, ServiceBusErrorSource.RENEW_LOCK)); } /** * Renews the session lock if this receiver is a session receiver. * * @return The next expiration time for the session lock. * @throws IllegalStateException if the receiver is a non-session receiver. */ public Mono<OffsetDateTime> renewSessionLock() { return renewSessionLock(receiverOptions.getSessionId()); } /** * Starts the auto lock renewal for the session this receiver works for. * * @param maxLockRenewalDuration Maximum duration to keep renewing the session lock. * * @return A lock renewal operation for the message. * @throws NullPointerException if {@code sessionId} or {@code maxLockRenewalDuration} is null. * @throws IllegalArgumentException if {@code sessionId} is an empty string. * @throws IllegalStateException if the receiver is a non-session receiver or the receiver is disposed. */ public Mono<Void> renewSessionLock(Duration maxLockRenewalDuration) { return this.renewSessionLock(receiverOptions.getSessionId(), maxLockRenewalDuration); } /** * Sets the state of the session this receiver works for. * * @param sessionState State to set on the session. * * @return A Mono that completes when the session is set * @throws IllegalStateException if the receiver is a non-session receiver. */ public Mono<Void> setSessionState(byte[] sessionState) { return this.setSessionState(receiverOptions.getSessionId(), sessionState); } /** * Starts a new service side transaction. The {@link ServiceBusTransactionContext} should be passed to all * operations that needs to be in this transaction. * * <p><strong>Create a transaction</strong></p> * {@codesnippet com.azure.messaging.servicebus.servicebusasyncreceiverclient.createTransaction} * * @return The {@link Mono} that finishes this operation on service bus resource. */ public Mono<ServiceBusTransactionContext> createTransaction() { if (isDisposed.get()) { return monoError(logger, new IllegalStateException( String.format(INVALID_OPERATION_DISPOSED_RECEIVER, "createTransaction"))); } return connectionProcessor .flatMap(connection -> connection.createSession(TRANSACTION_LINK_NAME)) .flatMap(transactionSession -> transactionSession.createTransaction()) .map(transaction -> new ServiceBusTransactionContext(transaction.getTransactionId())); } /** * Commits the transaction given {@link ServiceBusTransactionContext}. This will make a call to Service Bus. * <p><strong>Commit a transaction</strong></p> * {@codesnippet com.azure.messaging.servicebus.servicebusasyncreceiverclient.commitTransaction} * * @param transactionContext to be committed. * * @return The {@link Mono} that finishes this operation on service bus resource. * @throws NullPointerException if {@code transactionContext} or {@code transactionContext.transactionId} is * null. */ public Mono<Void> commitTransaction(ServiceBusTransactionContext transactionContext) { if (isDisposed.get()) { return monoError(logger, new IllegalStateException( String.format(INVALID_OPERATION_DISPOSED_RECEIVER, "commitTransaction"))); } if (Objects.isNull(transactionContext)) { return monoError(logger, new NullPointerException("'transactionContext' cannot be null.")); } else if (Objects.isNull(transactionContext.getTransactionId())) { return monoError(logger, new NullPointerException("'transactionContext.transactionId' cannot be null.")); } return connectionProcessor .flatMap(connection -> connection.createSession(TRANSACTION_LINK_NAME)) .flatMap(transactionSession -> transactionSession.commitTransaction(new AmqpTransaction( transactionContext.getTransactionId()))); } /** * Rollbacks the transaction given {@link ServiceBusTransactionContext}. This will make a call to Service Bus. * <p><strong>Rollback a transaction</strong></p> * {@codesnippet com.azure.messaging.servicebus.servicebusasyncreceiverclient.rollbackTransaction} * * @param transactionContext to be rollbacked. * * @return The {@link Mono} that finishes this operation on service bus resource. * @throws NullPointerException if {@code transactionContext} or {@code transactionContext.transactionId} is * null. */ public Mono<Void> rollbackTransaction(ServiceBusTransactionContext transactionContext) { if (isDisposed.get()) { return monoError(logger, new IllegalStateException( String.format(INVALID_OPERATION_DISPOSED_RECEIVER, "rollbackTransaction"))); } if (Objects.isNull(transactionContext)) { return monoError(logger, new NullPointerException("'transactionContext' cannot be null.")); } else if (Objects.isNull(transactionContext.getTransactionId())) { return monoError(logger, new NullPointerException("'transactionContext.transactionId' cannot be null.")); } return connectionProcessor .flatMap(connection -> connection.createSession(TRANSACTION_LINK_NAME)) .flatMap(transactionSession -> transactionSession.rollbackTransaction(new AmqpTransaction( transactionContext.getTransactionId()))); } /** * Disposes of the consumer by closing the underlying connection to the service. */ @Override public void close() { if (isDisposed.getAndSet(true)) { return; } try { completionLock.acquire(); } catch (InterruptedException e) { logger.info("Unable to obtain completion lock.", e); } logger.info("Removing receiver links."); final ServiceBusAsyncConsumer disposed = consumer.getAndSet(null); if (disposed != null) { disposed.close(); } if (sessionManager != null) { sessionManager.close(); } onClientClose.run(); } /** * @return receiver options set by user; */ ReceiverOptions getReceiverOptions() { return receiverOptions; } /** * Gets whether or not the management node contains the message lock token and it has not expired. Lock tokens are * held by the management node when they are received from the management node or management operations are * performed using that {@code lockToken}. * * @param lockToken Lock token to check for. * * @return {@code true} if the management node contains the lock token and false otherwise. */ private boolean isManagementToken(String lockToken) { return managementNodeLocks.containsUnexpired(lockToken); } private Mono<Void> updateDisposition(ServiceBusReceivedMessage message, DispositionStatus dispositionStatus, String deadLetterReason, String deadLetterErrorDescription, Map<String, Object> propertiesToModify, ServiceBusTransactionContext transactionContext) { if (isDisposed.get()) { return monoError(logger, new IllegalStateException( String.format(INVALID_OPERATION_DISPOSED_RECEIVER, dispositionStatus.getValue()))); } else if (Objects.isNull(message)) { return monoError(logger, new NullPointerException("'message' cannot be null.")); } final String lockToken = message.getLockToken(); final String sessionId = message.getSessionId(); if (receiverOptions.getReceiveMode() != ReceiveMode.PEEK_LOCK) { return Mono.error(logger.logExceptionAsError(new UnsupportedOperationException(String.format( "'%s' is not supported on a receiver opened in ReceiveMode.RECEIVE_AND_DELETE.", dispositionStatus)))); } final String sessionIdToUse; if (sessionId == null && !CoreUtils.isNullOrEmpty(receiverOptions.getSessionId())) { sessionIdToUse = receiverOptions.getSessionId(); } else { sessionIdToUse = sessionId; } logger.verbose("{}: Update started. Disposition: {}. Lock: {}. SessionId: {}.", entityPath, dispositionStatus, lockToken, sessionIdToUse); final Mono<Void> performOnManagement = connectionProcessor .flatMap(connection -> connection.getManagementNode(entityPath, entityType)) .flatMap(node -> node.updateDisposition(lockToken, dispositionStatus, deadLetterReason, deadLetterErrorDescription, propertiesToModify, sessionId, getLinkName(sessionId), transactionContext)) .then(Mono.fromRunnable(() -> { logger.info("{}: Management node Update completed. Disposition: {}. Lock: {}.", entityPath, dispositionStatus, lockToken); managementNodeLocks.remove(lockToken); renewalContainer.remove(lockToken); })); Mono<Void> updateDispositionOperation; if (sessionManager != null) { updateDispositionOperation = sessionManager.updateDisposition(lockToken, sessionId, dispositionStatus, propertiesToModify, deadLetterReason, deadLetterErrorDescription, transactionContext) .flatMap(isSuccess -> { if (isSuccess) { renewalContainer.remove(lockToken); return Mono.empty(); } logger.info("Could not perform on session manger. Performing on management node."); return performOnManagement; }); } else { final ServiceBusAsyncConsumer existingConsumer = consumer.get(); if (isManagementToken(lockToken) || existingConsumer == null) { updateDispositionOperation = performOnManagement; } else { updateDispositionOperation = existingConsumer.updateDisposition(lockToken, dispositionStatus, deadLetterReason, deadLetterErrorDescription, propertiesToModify, transactionContext) .then(Mono.fromRunnable(() -> { logger.verbose("{}: Update completed. Disposition: {}. Lock: {}.", entityPath, dispositionStatus, lockToken); renewalContainer.remove(lockToken); })); } } return updateDispositionOperation .onErrorMap(throwable -> { if (receiverOptions.isEnableAutoComplete() && throwable instanceof AmqpException) { switch (dispositionStatus) { case COMPLETED: return new ServiceBusReceiverException((AmqpException) throwable, ServiceBusErrorSource.COMPLETE); case ABANDONED: return new ServiceBusReceiverException((AmqpException) throwable, ServiceBusErrorSource.ABANDONED); default: } } return throwable; }); } private ServiceBusAsyncConsumer getOrCreateConsumer() { final ServiceBusAsyncConsumer existing = consumer.get(); if (existing != null) { return existing; } final String linkName = StringUtil.getRandomString(entityPath); logger.info("{}: Creating consumer for link '{}'", entityPath, linkName); final Flux<ServiceBusReceiveLink> receiveLink = connectionProcessor.flatMap(connection -> { if (receiverOptions.isSessionReceiver()) { return connection.createReceiveLink(linkName, entityPath, receiverOptions.getReceiveMode(), null, entityType, receiverOptions.getSessionId()); } else { return connection.createReceiveLink(linkName, entityPath, receiverOptions.getReceiveMode(), null, entityType); } }) .doOnNext(next -> { final String format = "Created consumer for Service Bus resource: [{}] mode: [{}]" + " sessionEnabled? {} transferEntityPath: [{}], entityType: [{}]"; logger.verbose(format, next.getEntityPath(), receiverOptions.getReceiveMode(), CoreUtils.isNullOrEmpty(receiverOptions.getSessionId()), "N/A", entityType); }) .repeat(); final AmqpRetryPolicy retryPolicy = RetryUtil.getRetryPolicy(connectionProcessor.getRetryOptions()); final ServiceBusReceiveLinkProcessor linkMessageProcessor = receiveLink.subscribeWith( new ServiceBusReceiveLinkProcessor(receiverOptions.getPrefetchCount(), retryPolicy, receiverOptions.getReceiveMode())); final ServiceBusAsyncConsumer newConsumer = new ServiceBusAsyncConsumer(linkName, linkMessageProcessor, messageSerializer, receiverOptions); if (consumer.compareAndSet(null, newConsumer)) { return newConsumer; } else { newConsumer.close(); return consumer.get(); } } /** * If the receiver has not connected via {@link * through the management node. * * @return The name of the receive link, or null of it has not connected via a receive link. */ private String getLinkName(String sessionId) { if (sessionManager != null && !CoreUtils.isNullOrEmpty(sessionId)) { return sessionManager.getLinkName(sessionId); } else if (!CoreUtils.isNullOrEmpty(sessionId) && !receiverOptions.isSessionReceiver()) { return null; } else { final ServiceBusAsyncConsumer existing = consumer.get(); return existing != null ? existing.getLinkName() : null; } } Mono<OffsetDateTime> renewSessionLock(String sessionId) { if (isDisposed.get()) { return monoError(logger, new IllegalStateException( String.format(INVALID_OPERATION_DISPOSED_RECEIVER, "renewSessionLock"))); } else if (!receiverOptions.isSessionReceiver()) { return monoError(logger, new IllegalStateException("Cannot renew session lock on a non-session receiver.")); } final String linkName = sessionManager != null ? sessionManager.getLinkName(sessionId) : null; return connectionProcessor .flatMap(connection -> connection.getManagementNode(entityPath, entityType)) .flatMap(channel -> channel.renewSessionLock(sessionId, linkName)) .onErrorMap(throwable -> mapError(throwable, ServiceBusErrorSource.RENEW_LOCK)); } Mono<Void> renewSessionLock(String sessionId, Duration maxLockRenewalDuration) { if (isDisposed.get()) { return monoError(logger, new IllegalStateException( String.format(INVALID_OPERATION_DISPOSED_RECEIVER, "getAutoRenewSessionLock"))); } else if (!receiverOptions.isSessionReceiver()) { return monoError(logger, new IllegalStateException( "Cannot renew session lock on a non-session receiver.")); } else if (maxLockRenewalDuration == null) { return monoError(logger, new NullPointerException("'maxLockRenewalDuration' cannot be null.")); } else if (maxLockRenewalDuration.isNegative()) { return monoError(logger, new IllegalArgumentException( "'maxLockRenewalDuration' cannot be negative.")); } else if (Objects.isNull(sessionId)) { return monoError(logger, new NullPointerException("'sessionId' cannot be null.")); } else if (sessionId.isEmpty()) { return monoError(logger, new IllegalArgumentException("'sessionId' cannot be empty.")); } final LockRenewalOperation operation = new LockRenewalOperation(sessionId, maxLockRenewalDuration, true, this::renewSessionLock); renewalContainer.addOrUpdate(sessionId, OffsetDateTime.now().plus(maxLockRenewalDuration), operation); return operation.getCompletionOperation() .onErrorMap(throwable -> mapError(throwable, ServiceBusErrorSource.RENEW_LOCK)); } Mono<Void> setSessionState(String sessionId, byte[] sessionState) { if (isDisposed.get()) { return monoError(logger, new IllegalStateException( String.format(INVALID_OPERATION_DISPOSED_RECEIVER, "setSessionState"))); } else if (!receiverOptions.isSessionReceiver()) { return monoError(logger, new IllegalStateException("Cannot set session state on a non-session receiver.")); } final String linkName = sessionManager != null ? sessionManager.getLinkName(sessionId) : null; return connectionProcessor .flatMap(connection -> connection.getManagementNode(entityPath, entityType)) .flatMap(channel -> channel.setSessionState(sessionId, sessionState, linkName)); } Mono<byte[]> getSessionState(String sessionId) { if (isDisposed.get()) { return monoError(logger, new IllegalStateException( String.format(INVALID_OPERATION_DISPOSED_RECEIVER, "getSessionState"))); } else if (!receiverOptions.isSessionReceiver()) { return monoError(logger, new IllegalStateException("Cannot get session state on a non-session receiver.")); } if (sessionManager != null) { return sessionManager.getSessionState(sessionId); } else { return connectionProcessor .flatMap(connection -> connection.getManagementNode(entityPath, entityType)) .flatMap(channel -> channel.getSessionState(sessionId, getLinkName(sessionId))); } } /** * Map the error to {@link ServiceBusReceiverException} */ boolean isConnectionClosed() { return this.connectionProcessor.isChannelClosed(); } }
class ServiceBusReceiverAsyncClient implements AutoCloseable { private static final DeadLetterOptions DEFAULT_DEAD_LETTER_OPTIONS = new DeadLetterOptions(); private static final String TRANSACTION_LINK_NAME = "coordinator"; private final LockContainer<LockRenewalOperation> renewalContainer; private final AtomicBoolean isDisposed = new AtomicBoolean(); private final LockContainer<OffsetDateTime> managementNodeLocks; private final ClientLogger logger = new ClientLogger(ServiceBusReceiverAsyncClient.class); private final String fullyQualifiedNamespace; private final String entityPath; private final MessagingEntityType entityType; private final ReceiverOptions receiverOptions; private final ServiceBusConnectionProcessor connectionProcessor; private final TracerProvider tracerProvider; private final MessageSerializer messageSerializer; private final Runnable onClientClose; private final ServiceBusSessionManager sessionManager; private final Semaphore completionLock = new Semaphore(1); private final AtomicLong lastPeekedSequenceNumber = new AtomicLong(-1); private final AtomicReference<ServiceBusAsyncConsumer> consumer = new AtomicReference<>(); /** * Creates a receiver that listens to a Service Bus resource. * * @param fullyQualifiedNamespace The fully qualified domain name for the Service Bus resource. * @param entityPath The name of the topic or queue. * @param entityType The type of the Service Bus resource. * @param receiverOptions Options when receiving messages. * @param connectionProcessor The AMQP connection to the Service Bus resource. * @param tracerProvider Tracer for telemetry. * @param messageSerializer Serializes and deserializes Service Bus messages. * @param onClientClose Operation to run when the client completes. */ ServiceBusReceiverAsyncClient(String fullyQualifiedNamespace, String entityPath, MessagingEntityType entityType, ReceiverOptions receiverOptions, ServiceBusConnectionProcessor connectionProcessor, Duration cleanupInterval, TracerProvider tracerProvider, MessageSerializer messageSerializer, Runnable onClientClose) { this.fullyQualifiedNamespace = Objects.requireNonNull(fullyQualifiedNamespace, "'fullyQualifiedNamespace' cannot be null."); this.entityPath = Objects.requireNonNull(entityPath, "'entityPath' cannot be null."); this.entityType = Objects.requireNonNull(entityType, "'entityType' cannot be null."); this.receiverOptions = Objects.requireNonNull(receiverOptions, "'receiveOptions cannot be null.'"); this.connectionProcessor = Objects.requireNonNull(connectionProcessor, "'connectionProcessor' cannot be null."); this.tracerProvider = Objects.requireNonNull(tracerProvider, "'tracerProvider' cannot be null."); this.messageSerializer = Objects.requireNonNull(messageSerializer, "'messageSerializer' cannot be null."); this.onClientClose = Objects.requireNonNull(onClientClose, "'onClientClose' cannot be null."); this.managementNodeLocks = new LockContainer<>(cleanupInterval); this.renewalContainer = new LockContainer<>(Duration.ofMinutes(2), renewal -> { logger.verbose("Closing expired renewal operation. lockToken[{}]. status[{}]. throwable[{}].", renewal.getLockToken(), renewal.getStatus(), renewal.getThrowable()); renewal.close(); }); this.sessionManager = null; } ServiceBusReceiverAsyncClient(String fullyQualifiedNamespace, String entityPath, MessagingEntityType entityType, ReceiverOptions receiverOptions, ServiceBusConnectionProcessor connectionProcessor, Duration cleanupInterval, TracerProvider tracerProvider, MessageSerializer messageSerializer, Runnable onClientClose, ServiceBusSessionManager sessionManager) { this.fullyQualifiedNamespace = Objects.requireNonNull(fullyQualifiedNamespace, "'fullyQualifiedNamespace' cannot be null."); this.entityPath = Objects.requireNonNull(entityPath, "'entityPath' cannot be null."); this.entityType = Objects.requireNonNull(entityType, "'entityType' cannot be null."); this.receiverOptions = Objects.requireNonNull(receiverOptions, "'receiveOptions cannot be null.'"); this.connectionProcessor = Objects.requireNonNull(connectionProcessor, "'connectionProcessor' cannot be null."); this.tracerProvider = Objects.requireNonNull(tracerProvider, "'tracerProvider' cannot be null."); this.messageSerializer = Objects.requireNonNull(messageSerializer, "'messageSerializer' cannot be null."); this.onClientClose = Objects.requireNonNull(onClientClose, "'onClientClose' cannot be null."); this.sessionManager = Objects.requireNonNull(sessionManager, "'sessionManager' cannot be null."); this.managementNodeLocks = new LockContainer<>(cleanupInterval); this.renewalContainer = new LockContainer<>(Duration.ofMinutes(2), renewal -> { logger.info("Closing expired renewal operation. sessionId[{}]. status[{}]. throwable[{}]", renewal.getSessionId(), renewal.getStatus(), renewal.getThrowable()); renewal.close(); }); } /** * Gets the fully qualified Service Bus namespace that the connection is associated with. This is likely similar to * {@code {yournamespace}.servicebus.windows.net}. * * @return The fully qualified Service Bus namespace that the connection is associated with. */ public String getFullyQualifiedNamespace() { return fullyQualifiedNamespace; } /** * Gets the Service Bus resource this client interacts with. * * @return The Service Bus resource this client interacts with. */ public String getEntityPath() { return entityPath; } /** * Abandon a {@link ServiceBusReceivedMessage message}. This will make the message available * again for processing. Abandoning a message will increase the delivery count on the message. * * @param message The {@link ServiceBusReceivedMessage} to perform this operation. * * @return A {@link Mono} that completes when the Service Bus abandon operation completes. * @throws NullPointerException if {@code message} is null. * @throws UnsupportedOperationException if the receiver was opened in {@link ReceiveMode * mode. */ public Mono<Void> abandon(ServiceBusReceivedMessage message) { return updateDisposition(message, DispositionStatus.ABANDONED, null, null, null, null); } /** * Abandon a {@link ServiceBusReceivedMessage message} updates the message's properties. * This will make the message available again for processing. Abandoning a message will increase the delivery count * on the message. * * @param message The {@link ServiceBusReceivedMessage} to perform this operation. * @param options to abandon the message. You can specify * {@link AbandonOptions * {@code transactionContext} can be set using * {@link AbandonOptions * created first by {@link ServiceBusReceiverAsyncClient * {@link ServiceBusSenderAsyncClient * * @return A {@link Mono} that completes when the Service Bus operation finishes. * @throws NullPointerException if {@code message} or {@code options} is null. Also if * {@code transactionContext.transactionId} is null when {@code options.transactionContext} is specified. * @throws UnsupportedOperationException if the receiver was opened in {@link ReceiveMode * mode. */ public Mono<Void> abandon(ServiceBusReceivedMessage message, AbandonOptions options) { if (Objects.isNull(options)) { return monoError(logger, new NullPointerException("'settlementOptions' cannot be null.")); } else if (!Objects.isNull(options.getTransactionContext()) && Objects.isNull(options.getTransactionContext().getTransactionId())) { return monoError(logger, new NullPointerException( "'options.transactionContext.transactionId' cannot be null.")); } return updateDisposition(message, DispositionStatus.ABANDONED, null, null, options.getPropertiesToModify(), options.getTransactionContext()); } /** * Completes a {@link ServiceBusReceivedMessage message}. This will delete the message from the service. * * @param message The {@link ServiceBusReceivedMessage} to perform this operation. * * @return A {@link Mono} that finishes when the message is completed on Service Bus. * @throws NullPointerException if {@code message} is null. * @throws UnsupportedOperationException if the receiver was opened in {@link ReceiveMode * mode. */ public Mono<Void> complete(ServiceBusReceivedMessage message) { return updateDisposition(message, DispositionStatus.COMPLETED, null, null, null, null); } /** * Completes a {@link ServiceBusReceivedMessage message}. This will delete the message from the * service. * * @param message The {@link ServiceBusReceivedMessage} to perform this operation. * @param options to complete the message. The {@code transactionContext} can be set using * {@link CompleteOptions * created first by {@link ServiceBusReceiverAsyncClient * {@link ServiceBusSenderAsyncClient * * @return A {@link Mono} that finishes when the message is completed on Service Bus. * @throws NullPointerException if {@code message} or {@code options} is null. Also if * {@code transactionContext.transactionId} is null when {@code options.transactionContext} is specified. * @throws UnsupportedOperationException if the receiver was opened in {@link ReceiveMode * mode. */ public Mono<Void> complete(ServiceBusReceivedMessage message, CompleteOptions options) { if (Objects.isNull(options)) { return monoError(logger, new NullPointerException("'options' cannot be null.")); } else if (!Objects.isNull(options.getTransactionContext()) && Objects.isNull(options.getTransactionContext().getTransactionId())) { return monoError(logger, new NullPointerException( "'options.transactionContext.transactionId' cannot be null.")); } return updateDisposition(message, DispositionStatus.COMPLETED, null, null, null, options.getTransactionContext()); } /** * Defers a {@link ServiceBusReceivedMessage message}. This will move message into the deferred subqueue. * * @param message The {@link ServiceBusReceivedMessage} to perform this operation. * * @return A {@link Mono} that completes when the Service Bus defer operation finishes. * @throws NullPointerException if {@code message} is null. * @throws UnsupportedOperationException if the receiver was opened in {@link ReceiveMode * @see <a href="https: */ public Mono<Void> defer(ServiceBusReceivedMessage message) { return updateDisposition(message, DispositionStatus.DEFERRED, null, null, null, null); } /** * Defers a {@link ServiceBusReceivedMessage message} with modified message property. This will move message into * the deferred subqueue. * * @param message The {@link ServiceBusReceivedMessage} to perform this operation. * @param options to defer the message. You can specify {@link DeferOptions * to modify on the Message. The {@code transactionContext} can be set using * {@link DeferOptions * created first by {@link ServiceBusReceiverAsyncClient * {@link ServiceBusSenderAsyncClient * * @return A {@link Mono} that completes when the defer operation finishes. * @throws NullPointerException if {@code message} or {@code options} is null. Also if * {@code transactionContext.transactionId} is null when {@code options.transactionContext} is specified. * @throws UnsupportedOperationException if the receiver was opened in {@link ReceiveMode * mode. * @see <a href="https: */ public Mono<Void> defer(ServiceBusReceivedMessage message, DeferOptions options) { if (Objects.isNull(options)) { return monoError(logger, new NullPointerException("'options' cannot be null.")); } else if (!Objects.isNull(options.getTransactionContext()) && Objects.isNull(options.getTransactionContext().getTransactionId())) { return monoError(logger, new NullPointerException( "'options.transactionContext.transactionId' cannot be null.")); } return updateDisposition(message, DispositionStatus.DEFERRED, null, null, options.getPropertiesToModify(), options.getTransactionContext()); } /** * Moves a {@link ServiceBusReceivedMessage message} to the deadletter sub-queue. * * @param message The {@link ServiceBusReceivedMessage} to perform this operation. * * @return A {@link Mono} that completes when the dead letter operation finishes. * @throws NullPointerException if {@code message} is null. * @throws UnsupportedOperationException if the receiver was opened in {@link ReceiveMode * mode. * @see <a href="https: * queues</a> */ public Mono<Void> deadLetter(ServiceBusReceivedMessage message) { return deadLetter(message, DEFAULT_DEAD_LETTER_OPTIONS); } /** * Moves a {@link ServiceBusReceivedMessage message} to the deadletter sub-queue. * * @param message The {@link ServiceBusReceivedMessage} to perform this operation. * @param options to deadLetter the message. You can specify * {@link DeadLetterOptions * {@code transactionContext} can be set using * {@link DeadLetterOptions * created first by {@link ServiceBusReceiverAsyncClient * {@link ServiceBusSenderAsyncClient * @return A {@link Mono} that completes when the dead letter operation finishes. * @throws NullPointerException if {@code message} or {@code options} is null. Also if * {@code transactionContext.transactionId} is null when {@code options.transactionContext} is specified. * @throws UnsupportedOperationException if the receiver was opened in {@link ReceiveMode * mode. * @see <a href="https: * queues</a> */ public Mono<Void> deadLetter(ServiceBusReceivedMessage message, DeadLetterOptions options) { if (Objects.isNull(options)) { return monoError(logger, new NullPointerException("'options' cannot be null.")); } else if (!Objects.isNull(options.getTransactionContext()) && Objects.isNull(options.getTransactionContext().getTransactionId())) { return monoError(logger, new NullPointerException( "'options.transactionContext.transactionId' cannot be null.")); } return updateDisposition(message, DispositionStatus.SUSPENDED, options.getDeadLetterReason(), options.getDeadLetterErrorDescription(), options.getPropertiesToModify(), options.getTransactionContext()); } /** * Gets the state of the session if this receiver is a session receiver. * * @return The session state or an empty Mono if there is no state set for the session. * @throws IllegalStateException if the receiver is a non-session receiver. */ public Mono<byte[]> getSessionState() { return getSessionState(receiverOptions.getSessionId()); } /** * Reads the next active message without changing the state of the receiver or the message source. The first call to * {@code peek()} fetches the first active message for this receiver. Each subsequent call fetches the subsequent * message in the entity. * * @return A peeked {@link ServiceBusReceivedMessage}. * @see <a href="https: */ public Mono<ServiceBusReceivedMessage> peekMessage() { return peekMessage(receiverOptions.getSessionId()); } /** * Reads the next active message without changing the state of the receiver or the message source. The first call to * {@code peek()} fetches the first active message for this receiver. Each subsequent call fetches the subsequent * message in the entity. * * @param sessionId Session id of the message to peek from. {@code null} if there is no session. * * @return A peeked {@link ServiceBusReceivedMessage}. * @throws IllegalStateException if the receiver is disposed. * @see <a href="https: */ Mono<ServiceBusReceivedMessage> peekMessage(String sessionId) { if (isDisposed.get()) { return monoError(logger, new IllegalStateException( String.format(INVALID_OPERATION_DISPOSED_RECEIVER, "peek"))); } return connectionProcessor .flatMap(connection -> connection.getManagementNode(entityPath, entityType)) .flatMap(channel -> { final long sequence = lastPeekedSequenceNumber.get() + 1; logger.verbose("Peek message from sequence number: {}", sequence); return channel.peek(sequence, sessionId, getLinkName(sessionId)); }) .handle((message, sink) -> { final long current = lastPeekedSequenceNumber .updateAndGet(value -> Math.max(value, message.getSequenceNumber())); logger.verbose("Updating last peeked sequence number: {}", current); sink.next(message); }); } /** * Starting from the given sequence number, reads next the active message without changing the state of the receiver * or the message source. * * @param sequenceNumber The sequence number from where to read the message. * * @return A peeked {@link ServiceBusReceivedMessage}. * @see <a href="https: */ public Mono<ServiceBusReceivedMessage> peekMessageAt(long sequenceNumber) { return peekMessageAt(sequenceNumber, receiverOptions.getSessionId()); } /** * Starting from the given sequence number, reads next the active message without changing the state of the receiver * or the message source. * * @param sequenceNumber The sequence number from where to read the message. * @param sessionId Session id of the message to peek from. {@code null} if there is no session. * * @return A peeked {@link ServiceBusReceivedMessage}. * @see <a href="https: */ Mono<ServiceBusReceivedMessage> peekMessageAt(long sequenceNumber, String sessionId) { if (isDisposed.get()) { return monoError(logger, new IllegalStateException( String.format(INVALID_OPERATION_DISPOSED_RECEIVER, "peekAt"))); } return connectionProcessor .flatMap(connection -> connection.getManagementNode(entityPath, entityType)) .flatMap(node -> node.peek(sequenceNumber, sessionId, getLinkName(sessionId))); } /** * Reads the next batch of active messages without changing the state of the receiver or the message source. * * @param maxMessages The number of messages. * * @return A {@link Flux} of {@link ServiceBusReceivedMessage messages} that are peeked. * @throws IllegalArgumentException if {@code maxMessages} is not a positive integer. * @see <a href="https: */ public Flux<ServiceBusReceivedMessage> peekMessages(int maxMessages) { return peekMessages(maxMessages, receiverOptions.getSessionId()); } /** * Reads the next batch of active messages without changing the state of the receiver or the message source. * * @param maxMessages The number of messages. * @param sessionId Session id of the messages to peek from. {@code null} if there is no session. * * @return An {@link IterableStream} of {@link ServiceBusReceivedMessage messages} that are peeked. * @throws IllegalArgumentException if {@code maxMessages} is not a positive integer. * @see <a href="https: */ Flux<ServiceBusReceivedMessage> peekMessages(int maxMessages, String sessionId) { if (isDisposed.get()) { return fluxError(logger, new IllegalStateException( String.format(INVALID_OPERATION_DISPOSED_RECEIVER, "peekBatch"))); } return connectionProcessor .flatMap(connection -> connection.getManagementNode(entityPath, entityType)) .flatMapMany(node -> { final long nextSequenceNumber = lastPeekedSequenceNumber.get() + 1; logger.verbose("Peek batch from sequence number: {}", nextSequenceNumber); final Flux<ServiceBusReceivedMessage> messages = node.peek(nextSequenceNumber, sessionId, getLinkName(sessionId), maxMessages); final Mono<ServiceBusReceivedMessage> handle = messages .switchIfEmpty(Mono.fromCallable(() -> { ServiceBusReceivedMessage emptyMessage = new ServiceBusReceivedMessage(BinaryData .fromBytes(new byte[0])); emptyMessage.setSequenceNumber(lastPeekedSequenceNumber.get()); return emptyMessage; })) .last() .handle((last, sink) -> { final long current = lastPeekedSequenceNumber .updateAndGet(value -> Math.max(value, last.getSequenceNumber())); logger.verbose("Last peeked sequence number in batch: {}", current); sink.complete(); }); return Flux.merge(messages, handle); }); } /** * Starting from the given sequence number, reads the next batch of active messages without changing the state of * the receiver or the message source. * * @param maxMessages The number of messages. * @param sequenceNumber The sequence number from where to start reading messages. * * @return A {@link Flux} of {@link ServiceBusReceivedMessage} peeked. * @throws IllegalArgumentException if {@code maxMessages} is not a positive integer. * @see <a href="https: */ public Flux<ServiceBusReceivedMessage> peekMessagesAt(int maxMessages, long sequenceNumber) { return peekMessagesAt(maxMessages, sequenceNumber, receiverOptions.getSessionId()); } /** * Starting from the given sequence number, reads the next batch of active messages without changing the state of * the receiver or the message source. * * @param maxMessages The number of messages. * @param sequenceNumber The sequence number from where to start reading messages. * @param sessionId Session id of the messages to peek from. {@code null} if there is no session. * * @return An {@link IterableStream} of {@link ServiceBusReceivedMessage} peeked. * @throws IllegalArgumentException if {@code maxMessages} is not a positive integer. * @see <a href="https: */ Flux<ServiceBusReceivedMessage> peekMessagesAt(int maxMessages, long sequenceNumber, String sessionId) { if (isDisposed.get()) { return fluxError(logger, new IllegalStateException( String.format(INVALID_OPERATION_DISPOSED_RECEIVER, "peekBatchAt"))); } return connectionProcessor .flatMap(connection -> connection.getManagementNode(entityPath, entityType)) .flatMapMany(node -> node.peek(sequenceNumber, sessionId, getLinkName(sessionId), maxMessages)); } /** * Receives an <b>infinite</b> stream of {@link ServiceBusReceivedMessage messages} from the Service Bus entity. * This Flux continuously receives messages from a Service Bus entity until either: * * <ul> * <li>The receiver is closed.</li> * <li>The subscription to the Flux is disposed.</li> * <li>A terminal signal from a downstream subscriber is propagated upstream (ie. {@link Flux * {@link Flux * <li>An {@link AmqpException} occurs that causes the receive link to stop.</li> * </ul> * * @return An <b>infinite</b> stream of messages from the Service Bus entity. */ public Flux<ServiceBusReceivedMessage> receiveMessages() { return receiveMessagesWithContext() .handle((serviceBusMessageContext, sink) -> { if (serviceBusMessageContext.hasError()) { sink.error(serviceBusMessageContext.getThrowable()); return; } sink.next(serviceBusMessageContext.getMessage()); }); } /** * Receives an <b>infinite</b> stream of {@link ServiceBusReceivedMessage messages} from the Service Bus entity. * This Flux continuously receives messages from a Service Bus entity until either: * * <ul> * <li>The receiver is closed.</li> * <li>The subscription to the Flux is disposed.</li> * <li>A terminal signal from a downstream subscriber is propagated upstream (ie. {@link Flux * {@link Flux * <li>An {@link AmqpException} occurs that causes the receive link to stop.</li> * </ul> * * @return An <b>infinite</b> stream of messages from the Service Bus entity. */ Flux<ServiceBusMessageContext> receiveMessagesWithContext() { final Flux<ServiceBusMessageContext> messageFlux = sessionManager != null ? sessionManager.receive() : getOrCreateConsumer().receive().map(ServiceBusMessageContext::new); final Flux<ServiceBusMessageContext> withAutoLockRenewal; if (receiverOptions.isAutoLockRenewEnabled()) { withAutoLockRenewal = new FluxAutoLockRenew(messageFlux, receiverOptions.getMaxLockRenewDuration(), renewalContainer, this::renewMessageLock); } else { withAutoLockRenewal = messageFlux; } final Flux<ServiceBusMessageContext> withAutoComplete; if (receiverOptions.isEnableAutoComplete()) { withAutoComplete = new FluxAutoComplete(withAutoLockRenewal, completionLock, context -> context.getMessage() != null ? complete(context.getMessage()) : Mono.empty(), context -> context.getMessage() != null ? abandon(context.getMessage()) : Mono.empty()); } else { withAutoComplete = withAutoLockRenewal; } return withAutoComplete .onErrorMap(throwable -> mapError(throwable, ServiceBusErrorSource.RECEIVE)); } /** * Receives a deferred {@link ServiceBusReceivedMessage message}. Deferred messages can only be received by using * sequence number. * * @param sequenceNumber The {@link ServiceBusReceivedMessage * message. * * @return A deferred message with the matching {@code sequenceNumber}. */ public Mono<ServiceBusReceivedMessage> receiveDeferredMessage(long sequenceNumber) { return receiveDeferredMessage(sequenceNumber, receiverOptions.getSessionId()); } /** * Receives a deferred {@link ServiceBusReceivedMessage message}. Deferred messages can only be received by using * sequence number. * * @param sequenceNumber The {@link ServiceBusReceivedMessage * message. * @param sessionId Session id of the deferred message. {@code null} if there is no session. * * @return A deferred message with the matching {@code sequenceNumber}. */ Mono<ServiceBusReceivedMessage> receiveDeferredMessage(long sequenceNumber, String sessionId) { return connectionProcessor .flatMap(connection -> connection.getManagementNode(entityPath, entityType)) .flatMap(node -> node.receiveDeferredMessages(receiverOptions.getReceiveMode(), sessionId, getLinkName(sessionId), Collections.singleton(sequenceNumber)).last()) .map(receivedMessage -> { if (CoreUtils.isNullOrEmpty(receivedMessage.getLockToken())) { return receivedMessage; } if (receiverOptions.getReceiveMode() == ReceiveMode.PEEK_LOCK) { receivedMessage.setLockedUntil(managementNodeLocks.addOrUpdate(receivedMessage.getLockToken(), receivedMessage.getLockedUntil(), receivedMessage.getLockedUntil())); } return receivedMessage; }); } /** * Receives a batch of deferred {@link ServiceBusReceivedMessage messages}. Deferred messages can only be received * by using sequence number. * * @param sequenceNumbers The sequence numbers of the deferred messages. * * @return A {@link Flux} of deferred {@link ServiceBusReceivedMessage messages}. */ public Flux<ServiceBusReceivedMessage> receiveDeferredMessages(Iterable<Long> sequenceNumbers) { return receiveDeferredMessages(sequenceNumbers, receiverOptions.getSessionId()); } /** * Receives a batch of deferred {@link ServiceBusReceivedMessage messages}. Deferred messages can only be received * by using sequence number. * * @param sequenceNumbers The sequence numbers of the deferred messages. * @param sessionId Session id of the deferred messages. {@code null} if there is no session. * * @return An {@link IterableStream} of deferred {@link ServiceBusReceivedMessage messages}. */ Flux<ServiceBusReceivedMessage> receiveDeferredMessages(Iterable<Long> sequenceNumbers, String sessionId) { if (isDisposed.get()) { return fluxError(logger, new IllegalStateException( String.format(INVALID_OPERATION_DISPOSED_RECEIVER, "receiveDeferredMessageBatch"))); } return connectionProcessor .flatMap(connection -> connection.getManagementNode(entityPath, entityType)) .flatMapMany(node -> node.receiveDeferredMessages(receiverOptions.getReceiveMode(), sessionId, getLinkName(sessionId), sequenceNumbers)) .map(receivedMessage -> { if (CoreUtils.isNullOrEmpty(receivedMessage.getLockToken())) { return receivedMessage; } if (receiverOptions.getReceiveMode() == ReceiveMode.PEEK_LOCK) { receivedMessage.setLockedUntil(managementNodeLocks.addOrUpdate(receivedMessage.getLockToken(), receivedMessage.getLockedUntil(), receivedMessage.getLockedUntil())); } return receivedMessage; }); } /** * Asynchronously renews the lock on the message. The lock will be renewed based on the setting specified on the * entity. When a message is received in {@link ReceiveMode * this receiver instance for a duration as specified during the entity creation (LockDuration). If processing of * the message requires longer than this duration, the lock needs to be renewed. For each renewal, the lock is reset * to the entity's LockDuration value. * * @param message The {@link ServiceBusReceivedMessage} to perform auto-lock renewal. * * @return The new expiration time for the message. * @throws NullPointerException if {@code message} or {@code message.getLockToken()} is null. * @throws UnsupportedOperationException if the receiver was opened in {@link ReceiveMode * mode. * @throws IllegalStateException if the receiver is a session receiver. * @throws IllegalArgumentException if {@code message.getLockToken()} is an empty value. */ public Mono<OffsetDateTime> renewMessageLock(ServiceBusReceivedMessage message) { if (isDisposed.get()) { return monoError(logger, new IllegalStateException( String.format(INVALID_OPERATION_DISPOSED_RECEIVER, "renewMessageLock"))); } else if (Objects.isNull(message)) { return monoError(logger, new NullPointerException("'message' cannot be null.")); } else if (Objects.isNull(message.getLockToken())) { return monoError(logger, new NullPointerException("'message.getLockToken()' cannot be null.")); } else if (message.getLockToken().isEmpty()) { return monoError(logger, new IllegalArgumentException("'message.getLockToken()' cannot be empty.")); } else if (receiverOptions.isSessionReceiver()) { return monoError(logger, new IllegalStateException( String.format("Cannot renew message lock [%s] for a session receiver.", message.getLockToken()))); } return renewMessageLock(message.getLockToken()) .onErrorMap(throwable -> mapError(throwable, ServiceBusErrorSource.RENEW_LOCK)); } /** * Asynchronously renews the lock on the message. The lock will be renewed based on the setting specified on the * entity. * * @param lockToken to be renewed. * * @return The new expiration time for the message. */ Mono<OffsetDateTime> renewMessageLock(String lockToken) { return connectionProcessor .flatMap(connection -> connection.getManagementNode(entityPath, entityType)) .flatMap(serviceBusManagementNode -> serviceBusManagementNode.renewMessageLock(lockToken, getLinkName(null))) .map(offsetDateTime -> managementNodeLocks.addOrUpdate(lockToken, offsetDateTime, offsetDateTime)); } /** * Starts the auto lock renewal for a {@link ServiceBusReceivedMessage message}. * * @param message The {@link ServiceBusReceivedMessage} to perform this operation. * @param maxLockRenewalDuration Maximum duration to keep renewing the lock token. * * @return A lock renewal operation for the message. * @throws NullPointerException if {@code message}, {@code message.getLockToken()} or {@code * maxLockRenewalDuration} is null. * @throws IllegalStateException if the receiver is a session receiver or the receiver is disposed. * @throws IllegalArgumentException if {@code message.getLockToken()} is an empty value. */ public Mono<Void> renewMessageLock(ServiceBusReceivedMessage message, Duration maxLockRenewalDuration) { if (isDisposed.get()) { return monoError(logger, new IllegalStateException( String.format(INVALID_OPERATION_DISPOSED_RECEIVER, "getAutoRenewMessageLock"))); } else if (Objects.isNull(message)) { return monoError(logger, new NullPointerException("'message' cannot be null.")); } else if (Objects.isNull(message.getLockToken())) { return monoError(logger, new NullPointerException("'message.getLockToken()' cannot be null.")); } else if (message.getLockToken().isEmpty()) { return monoError(logger, new IllegalArgumentException("'message.getLockToken()' cannot be empty.")); } else if (receiverOptions.isSessionReceiver()) { return monoError(logger, new IllegalStateException( String.format("Cannot renew message lock [%s] for a session receiver.", message.getLockToken()))); } else if (maxLockRenewalDuration == null) { return monoError(logger, new NullPointerException("'maxLockRenewalDuration' cannot be null.")); } else if (maxLockRenewalDuration.isNegative()) { return monoError(logger, new IllegalArgumentException("'maxLockRenewalDuration' cannot be negative.")); } final LockRenewalOperation operation = new LockRenewalOperation(message.getLockToken(), maxLockRenewalDuration, false, ignored -> renewMessageLock(message)); renewalContainer.addOrUpdate(message.getLockToken(), OffsetDateTime.now().plus(maxLockRenewalDuration), operation); return operation.getCompletionOperation() .onErrorMap(throwable -> mapError(throwable, ServiceBusErrorSource.RENEW_LOCK)); } /** * Renews the session lock if this receiver is a session receiver. * * @return The next expiration time for the session lock. * @throws IllegalStateException if the receiver is a non-session receiver. */ public Mono<OffsetDateTime> renewSessionLock() { return renewSessionLock(receiverOptions.getSessionId()); } /** * Starts the auto lock renewal for the session this receiver works for. * * @param maxLockRenewalDuration Maximum duration to keep renewing the session lock. * * @return A lock renewal operation for the message. * @throws NullPointerException if {@code sessionId} or {@code maxLockRenewalDuration} is null. * @throws IllegalArgumentException if {@code sessionId} is an empty string. * @throws IllegalStateException if the receiver is a non-session receiver or the receiver is disposed. */ public Mono<Void> renewSessionLock(Duration maxLockRenewalDuration) { return this.renewSessionLock(receiverOptions.getSessionId(), maxLockRenewalDuration); } /** * Sets the state of the session this receiver works for. * * @param sessionState State to set on the session. * * @return A Mono that completes when the session is set * @throws IllegalStateException if the receiver is a non-session receiver. */ public Mono<Void> setSessionState(byte[] sessionState) { return this.setSessionState(receiverOptions.getSessionId(), sessionState); } /** * Starts a new service side transaction. The {@link ServiceBusTransactionContext} should be passed to all * operations that needs to be in this transaction. * * <p><strong>Create a transaction</strong></p> * {@codesnippet com.azure.messaging.servicebus.servicebusasyncreceiverclient.createTransaction} * * @return The {@link Mono} that finishes this operation on service bus resource. */ public Mono<ServiceBusTransactionContext> createTransaction() { if (isDisposed.get()) { return monoError(logger, new IllegalStateException( String.format(INVALID_OPERATION_DISPOSED_RECEIVER, "createTransaction"))); } return connectionProcessor .flatMap(connection -> connection.createSession(TRANSACTION_LINK_NAME)) .flatMap(transactionSession -> transactionSession.createTransaction()) .map(transaction -> new ServiceBusTransactionContext(transaction.getTransactionId())); } /** * Commits the transaction given {@link ServiceBusTransactionContext}. This will make a call to Service Bus. * <p><strong>Commit a transaction</strong></p> * {@codesnippet com.azure.messaging.servicebus.servicebusasyncreceiverclient.commitTransaction} * * @param transactionContext to be committed. * * @return The {@link Mono} that finishes this operation on service bus resource. * @throws NullPointerException if {@code transactionContext} or {@code transactionContext.transactionId} is * null. */ public Mono<Void> commitTransaction(ServiceBusTransactionContext transactionContext) { if (isDisposed.get()) { return monoError(logger, new IllegalStateException( String.format(INVALID_OPERATION_DISPOSED_RECEIVER, "commitTransaction"))); } if (Objects.isNull(transactionContext)) { return monoError(logger, new NullPointerException("'transactionContext' cannot be null.")); } else if (Objects.isNull(transactionContext.getTransactionId())) { return monoError(logger, new NullPointerException("'transactionContext.transactionId' cannot be null.")); } return connectionProcessor .flatMap(connection -> connection.createSession(TRANSACTION_LINK_NAME)) .flatMap(transactionSession -> transactionSession.commitTransaction(new AmqpTransaction( transactionContext.getTransactionId()))); } /** * Rollbacks the transaction given {@link ServiceBusTransactionContext}. This will make a call to Service Bus. * <p><strong>Rollback a transaction</strong></p> * {@codesnippet com.azure.messaging.servicebus.servicebusasyncreceiverclient.rollbackTransaction} * * @param transactionContext to be rollbacked. * * @return The {@link Mono} that finishes this operation on service bus resource. * @throws NullPointerException if {@code transactionContext} or {@code transactionContext.transactionId} is * null. */ public Mono<Void> rollbackTransaction(ServiceBusTransactionContext transactionContext) { if (isDisposed.get()) { return monoError(logger, new IllegalStateException( String.format(INVALID_OPERATION_DISPOSED_RECEIVER, "rollbackTransaction"))); } if (Objects.isNull(transactionContext)) { return monoError(logger, new NullPointerException("'transactionContext' cannot be null.")); } else if (Objects.isNull(transactionContext.getTransactionId())) { return monoError(logger, new NullPointerException("'transactionContext.transactionId' cannot be null.")); } return connectionProcessor .flatMap(connection -> connection.createSession(TRANSACTION_LINK_NAME)) .flatMap(transactionSession -> transactionSession.rollbackTransaction(new AmqpTransaction( transactionContext.getTransactionId()))); } /** * Disposes of the consumer by closing the underlying connection to the service. */ @Override public void close() { if (isDisposed.getAndSet(true)) { return; } try { completionLock.acquire(); } catch (InterruptedException e) { logger.info("Unable to obtain completion lock.", e); } logger.info("Removing receiver links."); final ServiceBusAsyncConsumer disposed = consumer.getAndSet(null); if (disposed != null) { disposed.close(); } if (sessionManager != null) { sessionManager.close(); } onClientClose.run(); } /** * @return receiver options set by user; */ ReceiverOptions getReceiverOptions() { return receiverOptions; } /** * Gets whether or not the management node contains the message lock token and it has not expired. Lock tokens are * held by the management node when they are received from the management node or management operations are * performed using that {@code lockToken}. * * @param lockToken Lock token to check for. * * @return {@code true} if the management node contains the lock token and false otherwise. */ private boolean isManagementToken(String lockToken) { return managementNodeLocks.containsUnexpired(lockToken); } private Mono<Void> updateDisposition(ServiceBusReceivedMessage message, DispositionStatus dispositionStatus, String deadLetterReason, String deadLetterErrorDescription, Map<String, Object> propertiesToModify, ServiceBusTransactionContext transactionContext) { if (isDisposed.get()) { return monoError(logger, new IllegalStateException( String.format(INVALID_OPERATION_DISPOSED_RECEIVER, dispositionStatus.getValue()))); } else if (Objects.isNull(message)) { return monoError(logger, new NullPointerException("'message' cannot be null.")); } final String lockToken = message.getLockToken(); final String sessionId = message.getSessionId(); if (receiverOptions.getReceiveMode() != ReceiveMode.PEEK_LOCK) { return Mono.error(logger.logExceptionAsError(new UnsupportedOperationException(String.format( "'%s' is not supported on a receiver opened in ReceiveMode.RECEIVE_AND_DELETE.", dispositionStatus)))); } final String sessionIdToUse; if (sessionId == null && !CoreUtils.isNullOrEmpty(receiverOptions.getSessionId())) { sessionIdToUse = receiverOptions.getSessionId(); } else { sessionIdToUse = sessionId; } logger.verbose("{}: Update started. Disposition: {}. Lock: {}. SessionId: {}.", entityPath, dispositionStatus, lockToken, sessionIdToUse); final Mono<Void> performOnManagement = connectionProcessor .flatMap(connection -> connection.getManagementNode(entityPath, entityType)) .flatMap(node -> node.updateDisposition(lockToken, dispositionStatus, deadLetterReason, deadLetterErrorDescription, propertiesToModify, sessionId, getLinkName(sessionId), transactionContext)) .then(Mono.fromRunnable(() -> { logger.info("{}: Management node Update completed. Disposition: {}. Lock: {}.", entityPath, dispositionStatus, lockToken); managementNodeLocks.remove(lockToken); renewalContainer.remove(lockToken); })); Mono<Void> updateDispositionOperation; if (sessionManager != null) { updateDispositionOperation = sessionManager.updateDisposition(lockToken, sessionId, dispositionStatus, propertiesToModify, deadLetterReason, deadLetterErrorDescription, transactionContext) .flatMap(isSuccess -> { if (isSuccess) { renewalContainer.remove(lockToken); return Mono.empty(); } logger.info("Could not perform on session manger. Performing on management node."); return performOnManagement; }); } else { final ServiceBusAsyncConsumer existingConsumer = consumer.get(); if (isManagementToken(lockToken) || existingConsumer == null) { updateDispositionOperation = performOnManagement; } else { updateDispositionOperation = existingConsumer.updateDisposition(lockToken, dispositionStatus, deadLetterReason, deadLetterErrorDescription, propertiesToModify, transactionContext) .then(Mono.fromRunnable(() -> { logger.verbose("{}: Update completed. Disposition: {}. Lock: {}.", entityPath, dispositionStatus, lockToken); renewalContainer.remove(lockToken); })); } } return updateDispositionOperation .onErrorMap(throwable -> { if (throwable instanceof ServiceBusReceiverException) { return throwable; } switch (dispositionStatus) { case COMPLETED: return new ServiceBusReceiverException(throwable, ServiceBusErrorSource.COMPLETE); case ABANDONED: return new ServiceBusReceiverException(throwable, ServiceBusErrorSource.ABANDONED); default: return new ServiceBusReceiverException(throwable, ServiceBusErrorSource.UNKNOWN); } }); } private ServiceBusAsyncConsumer getOrCreateConsumer() { final ServiceBusAsyncConsumer existing = consumer.get(); if (existing != null) { return existing; } final String linkName = StringUtil.getRandomString(entityPath); logger.info("{}: Creating consumer for link '{}'", entityPath, linkName); final Flux<ServiceBusReceiveLink> receiveLink = connectionProcessor.flatMap(connection -> { if (receiverOptions.isSessionReceiver()) { return connection.createReceiveLink(linkName, entityPath, receiverOptions.getReceiveMode(), null, entityType, receiverOptions.getSessionId()); } else { return connection.createReceiveLink(linkName, entityPath, receiverOptions.getReceiveMode(), null, entityType); } }) .doOnNext(next -> { final String format = "Created consumer for Service Bus resource: [{}] mode: [{}]" + " sessionEnabled? {} transferEntityPath: [{}], entityType: [{}]"; logger.verbose(format, next.getEntityPath(), receiverOptions.getReceiveMode(), CoreUtils.isNullOrEmpty(receiverOptions.getSessionId()), "N/A", entityType); }) .repeat(); final AmqpRetryPolicy retryPolicy = RetryUtil.getRetryPolicy(connectionProcessor.getRetryOptions()); final ServiceBusReceiveLinkProcessor linkMessageProcessor = receiveLink.subscribeWith( new ServiceBusReceiveLinkProcessor(receiverOptions.getPrefetchCount(), retryPolicy, receiverOptions.getReceiveMode())); final ServiceBusAsyncConsumer newConsumer = new ServiceBusAsyncConsumer(linkName, linkMessageProcessor, messageSerializer, receiverOptions); if (consumer.compareAndSet(null, newConsumer)) { return newConsumer; } else { newConsumer.close(); return consumer.get(); } } /** * If the receiver has not connected via {@link * through the management node. * * @return The name of the receive link, or null of it has not connected via a receive link. */ private String getLinkName(String sessionId) { if (sessionManager != null && !CoreUtils.isNullOrEmpty(sessionId)) { return sessionManager.getLinkName(sessionId); } else if (!CoreUtils.isNullOrEmpty(sessionId) && !receiverOptions.isSessionReceiver()) { return null; } else { final ServiceBusAsyncConsumer existing = consumer.get(); return existing != null ? existing.getLinkName() : null; } } Mono<OffsetDateTime> renewSessionLock(String sessionId) { if (isDisposed.get()) { return monoError(logger, new IllegalStateException( String.format(INVALID_OPERATION_DISPOSED_RECEIVER, "renewSessionLock"))); } else if (!receiverOptions.isSessionReceiver()) { return monoError(logger, new IllegalStateException("Cannot renew session lock on a non-session receiver.")); } final String linkName = sessionManager != null ? sessionManager.getLinkName(sessionId) : null; return connectionProcessor .flatMap(connection -> connection.getManagementNode(entityPath, entityType)) .flatMap(channel -> channel.renewSessionLock(sessionId, linkName)) .onErrorMap(throwable -> mapError(throwable, ServiceBusErrorSource.RENEW_LOCK)); } Mono<Void> renewSessionLock(String sessionId, Duration maxLockRenewalDuration) { if (isDisposed.get()) { return monoError(logger, new IllegalStateException( String.format(INVALID_OPERATION_DISPOSED_RECEIVER, "getAutoRenewSessionLock"))); } else if (!receiverOptions.isSessionReceiver()) { return monoError(logger, new IllegalStateException( "Cannot renew session lock on a non-session receiver.")); } else if (maxLockRenewalDuration == null) { return monoError(logger, new NullPointerException("'maxLockRenewalDuration' cannot be null.")); } else if (maxLockRenewalDuration.isNegative()) { return monoError(logger, new IllegalArgumentException( "'maxLockRenewalDuration' cannot be negative.")); } else if (Objects.isNull(sessionId)) { return monoError(logger, new NullPointerException("'sessionId' cannot be null.")); } else if (sessionId.isEmpty()) { return monoError(logger, new IllegalArgumentException("'sessionId' cannot be empty.")); } final LockRenewalOperation operation = new LockRenewalOperation(sessionId, maxLockRenewalDuration, true, this::renewSessionLock); renewalContainer.addOrUpdate(sessionId, OffsetDateTime.now().plus(maxLockRenewalDuration), operation); return operation.getCompletionOperation() .onErrorMap(throwable -> mapError(throwable, ServiceBusErrorSource.RENEW_LOCK)); } Mono<Void> setSessionState(String sessionId, byte[] sessionState) { if (isDisposed.get()) { return monoError(logger, new IllegalStateException( String.format(INVALID_OPERATION_DISPOSED_RECEIVER, "setSessionState"))); } else if (!receiverOptions.isSessionReceiver()) { return monoError(logger, new IllegalStateException("Cannot set session state on a non-session receiver.")); } final String linkName = sessionManager != null ? sessionManager.getLinkName(sessionId) : null; return connectionProcessor .flatMap(connection -> connection.getManagementNode(entityPath, entityType)) .flatMap(channel -> channel.setSessionState(sessionId, sessionState, linkName)); } Mono<byte[]> getSessionState(String sessionId) { if (isDisposed.get()) { return monoError(logger, new IllegalStateException( String.format(INVALID_OPERATION_DISPOSED_RECEIVER, "getSessionState"))); } else if (!receiverOptions.isSessionReceiver()) { return monoError(logger, new IllegalStateException("Cannot get session state on a non-session receiver.")); } if (sessionManager != null) { return sessionManager.getSessionState(sessionId); } else { return connectionProcessor .flatMap(connection -> connection.getManagementNode(entityPath, entityType)) .flatMap(channel -> channel.getSessionState(sessionId, getLinkName(sessionId))); } } /** * Map the error to {@link ServiceBusReceiverException} */ boolean isConnectionClosed() { return this.connectionProcessor.isChannelClosed(); } }
Need to set one environment here?
public void testWithNoPrefixSet() { final PrefixedSpringCredentialBuilder builder = new PrefixedSpringCredentialBuilder(); assertThrows(IllegalArgumentException.class, builder::build); }
final PrefixedSpringCredentialBuilder builder = new PrefixedSpringCredentialBuilder();
public void testWithNoPrefixSet() { final PrefixedSpringCredentialBuilder builder = new PrefixedSpringCredentialBuilder(); builder.environment(buildEnvironment(new Properties())); assertThrows(IllegalArgumentException.class, builder::build); }
class PrefixedSpringCredentialBuilderTest extends SpringCredentialTestBase { @Test public void testWithNoEnvironmentSet() { final PrefixedSpringCredentialBuilder builder = new PrefixedSpringCredentialBuilder(); assertThrows(IllegalArgumentException.class, builder::build); } @Test @Test public void testBuild() { final PrefixedSpringCredentialBuilderExt builder = new PrefixedSpringCredentialBuilderExt(); final TokenCredential tokenCredential = builder.prefix("test-prefix") .environment(buildEnvironment(new Properties())) .build(); assertTrue(tokenCredential instanceof ManagedIdentityCredential); assertEquals(1, builder.prefixes.size()); assertEquals(Lists.newArrayList("test-prefix"), builder.prefixes); } static class PrefixedSpringCredentialBuilderExt extends PrefixedSpringCredentialBuilder { List<String> prefixes = new ArrayList<>(); @Override protected TokenCredential populateTokenCredential(String prefix) { this.prefixes.add(prefix); return super.populateTokenCredential(prefix); } } }
class PrefixedSpringCredentialBuilderTest extends SpringCredentialTestBase { @Test public void testWithNoEnvironmentSet() { final PrefixedSpringCredentialBuilder builder = new PrefixedSpringCredentialBuilder(); assertThrows(IllegalArgumentException.class, builder::build); } @Test @Test public void testBuild() { final PrefixedSpringCredentialBuilderExt builder = new PrefixedSpringCredentialBuilderExt(); final TokenCredential tokenCredential = builder.prefix("test-prefix") .environment(buildEnvironment(new Properties())) .build(); assertTrue(tokenCredential instanceof ManagedIdentityCredential); assertEquals(1, builder.prefixes.size()); assertEquals(Lists.newArrayList("test-prefix"), builder.prefixes); } static class PrefixedSpringCredentialBuilderExt extends PrefixedSpringCredentialBuilder { List<String> prefixes = new ArrayList<>(); @Override protected TokenCredential populateTokenCredential(String prefix) { this.prefixes.add(prefix); return super.populateTokenCredential(prefix); } } }
If alternativePrefix is not null, then we will get managed identity credential if there is no such prefixed configuration in Environment. The order should be: - Check alternativePrefix client-id, client-secret - Check alternativePrefix client-id, client-certificate-path - If there is only alternativePrefix client-id, try managed identity with client-id - Check default prefix client-id, client-secret - Check default prefix client-id, client-certificate-path - If there is only default prefix client-id, try managed identity with client-id - Use managed identity with no argument -
public TokenCredential build() { if (environment == null) { throw new IllegalArgumentException("To build a spring credential the environment must be set"); } List<TokenCredential> tokenCredentials = new ArrayList<>(); if (alternativePrefix != null) { tokenCredentials.add(populateTokenCredential(alternativePrefix)); } tokenCredentials.add(populateDefaultTokenCredential()); return new ChainedTokenCredentialBuilder().addAll(tokenCredentials).build(); }
tokenCredentials.add(populateTokenCredential(alternativePrefix));
public TokenCredential build() { if (environment == null) { throw new IllegalArgumentException("To build a spring credential the environment must be set."); } List<TokenCredential> tokenCredentials = new ArrayList<>(); if (alternativePrefix != null) { addToChain(tokenCredentials, populateTokenCredentialBasedOnClientId(alternativePrefix)); } addToChain(tokenCredentials, populateTokenCredentialBasedOnClientId(AZURE_CREDENTIAL_PREFIX)); addToChain(tokenCredentials, defaultManagedIdentityCredential()); return new ChainedTokenCredentialBuilder().addAll(tokenCredentials).build(); }
class DefaultSpringCredentialBuilder extends SpringCredentialBuilderBase<DefaultSpringCredentialBuilder> { /** * Defines the AZURE_CREDENTIAL_PREFIX. */ static final String AZURE_CREDENTIAL_PREFIX = "azure.credential."; private String alternativePrefix; public DefaultSpringCredentialBuilder alternativePrfix(String alternative) { if (alternative != null) { this.alternativePrefix = alternative + (alternative.endsWith(".") ? "" : "."); } return this; } private TokenCredential populateDefaultTokenCredential() { return populateTokenCredential(AZURE_CREDENTIAL_PREFIX); } }
class DefaultSpringCredentialBuilder extends SpringCredentialBuilderBase<DefaultSpringCredentialBuilder> { /** * Defines the AZURE_CREDENTIAL_PREFIX. */ static final String AZURE_CREDENTIAL_PREFIX = "azure.credential."; private String alternativePrefix; public DefaultSpringCredentialBuilder alternativePrefix(String alternative) { if (alternative != null) { this.alternativePrefix = alternative + (alternative.endsWith(".") ? "" : "."); } return this; } /** * Build a default Spring token credential, which will be a chained credential. * If an alternative prefix is specified in the builder, the chain of credential * will have three credentials, one with the specified prefix, one with the default * spring credential prefix, and the default managed identity credential without client id * set. Otherwise, the chain will consist the credential with the default prefix and the default * managed identity credential. * * @return the default Spring token credential. * @throws IllegalArgumentException if no environment is set. */ private void addToChain(List<TokenCredential> chain, TokenCredential tokenCredential) { if (tokenCredential != null) { chain.add(tokenCredential); } } }
Is it better we call it xxx.user-group.xxx?
public void validateUserGroupProperties() { if (this.sessionStateless) { if (allowedGroupsConfigured()) { LOGGER.warn("Group names are not supported if you set 'sessionSateless' to 'true'."); } } else if (!allowedGroupsConfigured()) { throw new IllegalArgumentException("One of the User Group Properties must be populated. " + "Please populate azure.activedirectory.user-group.allowed-groups"); } if (!DEFAULT_GROUP_RELATIONSHIP.equalsIgnoreCase(groupRelationship) && !"transitive".equalsIgnoreCase(groupRelationship)) { throw new IllegalArgumentException("Configuration 'azure.activedirectory.group-relationship' " + "should be 'direct' or 'transitive'."); } }
throw new IllegalArgumentException("Configuration 'azure.activedirectory.group-relationship' "
public void validateUserGroupProperties() { if (this.sessionStateless) { if (allowedGroupsConfigured()) { LOGGER.warn("Group names are not supported if you set 'sessionSateless' to 'true'."); } } else if (!allowedGroupsConfigured()) { throw new IllegalArgumentException("One of the User Group Properties must be populated. " + "Please populate azure.activedirectory.user-group.allowed-groups"); } if (!GROUP_RELATIONSHIP_DIRECT.equalsIgnoreCase(userGroup.groupRelationship) && !GROUP_RELATIONSHIP_TRANSITIVE.equalsIgnoreCase(userGroup.groupRelationship)) { throw new IllegalArgumentException("Configuration 'azure.activedirectory.user-group.group-relationship' " + "should be 'direct' or 'transitive'."); } }
class UserGroupProperties { /** * Expected UserGroups that an authority will be granted to if found in the response from the MemeberOf Graph * API Call. */ private List<String> allowedGroups = new ArrayList<>(); /** * Key of the JSON Node to get from the Azure AD response object that will be checked to contain the {@code * azure.activedirectory.user-group.value} to signify that this node is a valid {@code UserGroup}. */ @NotEmpty private String key = "objectType"; /** * Value of the JSON Node identified by the {@code azure.activedirectory.user-group.key} to validate the JSON * Node is a UserGroup. */ @NotEmpty private String value = Membership.OBJECT_TYPE_GROUP; /** * Key of the JSON Node containing the Azure Object ID for the {@code UserGroup}. */ @NotEmpty private String objectIDKey = "objectId"; public List<String> getAllowedGroups() { return allowedGroups; } public void setAllowedGroups(List<String> allowedGroups) { this.allowedGroups = allowedGroups; } public String getKey() { return key; } public void setKey(String key) { this.key = key; } public String getValue() { return value; } public void setValue(String value) { this.value = value; } public String getObjectIDKey() { return objectIDKey; } public void setObjectIDKey(String objectIDKey) { this.objectIDKey = objectIDKey; } @Override public String toString() { return "UserGroupProperties{" + "allowedGroups=" + allowedGroups + ", key='" + key + '\'' + ", value='" + value + '\'' + ", objectIDKey='" + objectIDKey + '\'' + '}'; } @Override public boolean equals(Object o) { if (this == o) { return true; } if (o == null || getClass() != o.getClass()) { return false; } UserGroupProperties that = (UserGroupProperties) o; return Objects.equals(allowedGroups, that.allowedGroups) && Objects.equals(key, that.key) && Objects.equals(value, that.value) && Objects.equals(objectIDKey, that.objectIDKey); } @Override public int hashCode() { return Objects.hash(allowedGroups, key, value, objectIDKey); } }
class UserGroupProperties { /** * Expected UserGroups that an authority will be granted to if found in the response from the MemeberOf Graph * API Call. */ private List<String> allowedGroups = new ArrayList<>(); /** * Key of the JSON Node to get from the Azure AD response object that will be checked to contain the {@code * azure.activedirectory.user-group.value} to signify that this node is a valid {@code UserGroup}. */ @NotEmpty private String key = "objectType"; /** * Value of the JSON Node identified by the {@code azure.activedirectory.user-group.key} to validate the JSON * Node is a UserGroup. */ @NotEmpty private String value = Membership.OBJECT_TYPE_GROUP; /** * Key of the JSON Node containing the Azure Object ID for the {@code UserGroup}. */ @NotEmpty private String objectIDKey = "objectId"; /** * The way to obtain group relationship.<br/> * direct: the default value, get groups that the user is a direct member of;<br/> * transitive: Get groups that the user is a member of, and will also return all * groups the user is a nested member of; */ @NotEmpty private String groupRelationship = GROUP_RELATIONSHIP_DIRECT; public List<String> getAllowedGroups() { return allowedGroups; } public void setAllowedGroups(List<String> allowedGroups) { this.allowedGroups = allowedGroups; } public String getKey() { return key; } public void setKey(String key) { this.key = key; } public String getValue() { return value; } public void setValue(String value) { this.value = value; } public String getObjectIDKey() { return objectIDKey; } public void setObjectIDKey(String objectIDKey) { this.objectIDKey = objectIDKey; } public String getGroupRelationship() { return groupRelationship; } public void setGroupRelationship(String groupRelationship) { this.groupRelationship = groupRelationship; } @Override public String toString() { return "UserGroupProperties{" + "allowedGroups=" + allowedGroups + ", key='" + key + '\'' + ", value='" + value + '\'' + ", objectIDKey='" + objectIDKey + '\'' + ", groupRelationship='" + groupRelationship + '\'' + '}'; } @Override public boolean equals(Object o) { if (this == o) { return true; } if (o == null || getClass() != o.getClass()) { return false; } UserGroupProperties that = (UserGroupProperties) o; return Objects.equals(allowedGroups, that.allowedGroups) && Objects.equals(key, that.key) && Objects.equals(value, that.value) && Objects.equals(objectIDKey, that.objectIDKey) && Objects.equals(groupRelationship, that.groupRelationship); } @Override public int hashCode() { return Objects.hash(allowedGroups, key, value, objectIDKey); } }
Ah yes.
public void testWithNoPrefixSet() { final PrefixedSpringCredentialBuilder builder = new PrefixedSpringCredentialBuilder(); assertThrows(IllegalArgumentException.class, builder::build); }
final PrefixedSpringCredentialBuilder builder = new PrefixedSpringCredentialBuilder();
public void testWithNoPrefixSet() { final PrefixedSpringCredentialBuilder builder = new PrefixedSpringCredentialBuilder(); builder.environment(buildEnvironment(new Properties())); assertThrows(IllegalArgumentException.class, builder::build); }
class PrefixedSpringCredentialBuilderTest extends SpringCredentialTestBase { @Test public void testWithNoEnvironmentSet() { final PrefixedSpringCredentialBuilder builder = new PrefixedSpringCredentialBuilder(); assertThrows(IllegalArgumentException.class, builder::build); } @Test @Test public void testBuild() { final PrefixedSpringCredentialBuilderExt builder = new PrefixedSpringCredentialBuilderExt(); final TokenCredential tokenCredential = builder.prefix("test-prefix") .environment(buildEnvironment(new Properties())) .build(); assertTrue(tokenCredential instanceof ManagedIdentityCredential); assertEquals(1, builder.prefixes.size()); assertEquals(Lists.newArrayList("test-prefix"), builder.prefixes); } static class PrefixedSpringCredentialBuilderExt extends PrefixedSpringCredentialBuilder { List<String> prefixes = new ArrayList<>(); @Override protected TokenCredential populateTokenCredential(String prefix) { this.prefixes.add(prefix); return super.populateTokenCredential(prefix); } } }
class PrefixedSpringCredentialBuilderTest extends SpringCredentialTestBase { @Test public void testWithNoEnvironmentSet() { final PrefixedSpringCredentialBuilder builder = new PrefixedSpringCredentialBuilder(); assertThrows(IllegalArgumentException.class, builder::build); } @Test @Test public void testBuild() { final PrefixedSpringCredentialBuilderExt builder = new PrefixedSpringCredentialBuilderExt(); final TokenCredential tokenCredential = builder.prefix("test-prefix") .environment(buildEnvironment(new Properties())) .build(); assertTrue(tokenCredential instanceof ManagedIdentityCredential); assertEquals(1, builder.prefixes.size()); assertEquals(Lists.newArrayList("test-prefix"), builder.prefixes); } static class PrefixedSpringCredentialBuilderExt extends PrefixedSpringCredentialBuilder { List<String> prefixes = new ArrayList<>(); @Override protected TokenCredential populateTokenCredential(String prefix) { this.prefixes.add(prefix); return super.populateTokenCredential(prefix); } } }
Changed.
public TokenCredential build() { if (environment == null) { throw new IllegalArgumentException("To build a spring credential the environment must be set"); } List<TokenCredential> tokenCredentials = new ArrayList<>(); if (alternativePrefix != null) { tokenCredentials.add(populateTokenCredential(alternativePrefix)); } tokenCredentials.add(populateDefaultTokenCredential()); return new ChainedTokenCredentialBuilder().addAll(tokenCredentials).build(); }
tokenCredentials.add(populateTokenCredential(alternativePrefix));
public TokenCredential build() { if (environment == null) { throw new IllegalArgumentException("To build a spring credential the environment must be set."); } List<TokenCredential> tokenCredentials = new ArrayList<>(); if (alternativePrefix != null) { addToChain(tokenCredentials, populateTokenCredentialBasedOnClientId(alternativePrefix)); } addToChain(tokenCredentials, populateTokenCredentialBasedOnClientId(AZURE_CREDENTIAL_PREFIX)); addToChain(tokenCredentials, defaultManagedIdentityCredential()); return new ChainedTokenCredentialBuilder().addAll(tokenCredentials).build(); }
class DefaultSpringCredentialBuilder extends SpringCredentialBuilderBase<DefaultSpringCredentialBuilder> { /** * Defines the AZURE_CREDENTIAL_PREFIX. */ static final String AZURE_CREDENTIAL_PREFIX = "azure.credential."; private String alternativePrefix; public DefaultSpringCredentialBuilder alternativePrfix(String alternative) { if (alternative != null) { this.alternativePrefix = alternative + (alternative.endsWith(".") ? "" : "."); } return this; } private TokenCredential populateDefaultTokenCredential() { return populateTokenCredential(AZURE_CREDENTIAL_PREFIX); } }
class DefaultSpringCredentialBuilder extends SpringCredentialBuilderBase<DefaultSpringCredentialBuilder> { /** * Defines the AZURE_CREDENTIAL_PREFIX. */ static final String AZURE_CREDENTIAL_PREFIX = "azure.credential."; private String alternativePrefix; public DefaultSpringCredentialBuilder alternativePrefix(String alternative) { if (alternative != null) { this.alternativePrefix = alternative + (alternative.endsWith(".") ? "" : "."); } return this; } /** * Build a default Spring token credential, which will be a chained credential. * If an alternative prefix is specified in the builder, the chain of credential * will have three credentials, one with the specified prefix, one with the default * spring credential prefix, and the default managed identity credential without client id * set. Otherwise, the chain will consist the credential with the default prefix and the default * managed identity credential. * * @return the default Spring token credential. * @throws IllegalArgumentException if no environment is set. */ private void addToChain(List<TokenCredential> chain, TokenCredential tokenCredential) { if (tokenCredential != null) { chain.add(tokenCredential); } } }